mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 10:40:43 -08:00
Add Proxy Support and Create Provider to Readme
Add proxy support to many providers
This commit is contained in:
parent
7a699c8ca0
commit
c6b33e527c
9 changed files with 103 additions and 80 deletions
31
README.md
31
README.md
|
|
@ -224,19 +224,15 @@ from g4f.Provider import (
|
|||
Bing,
|
||||
ChatBase,
|
||||
ChatgptAi,
|
||||
ChatgptLogin,
|
||||
CodeLinkAva,
|
||||
DeepAi,
|
||||
H2o,
|
||||
HuggingChat,
|
||||
Opchatgpts,
|
||||
OpenAssistant,
|
||||
OpenaiChat,
|
||||
Raycast,
|
||||
Theb,
|
||||
Vercel,
|
||||
Vitalentum,
|
||||
Wewordle,
|
||||
Ylokh,
|
||||
You,
|
||||
Yqcloud,
|
||||
|
|
@ -284,19 +280,18 @@ _providers = [
|
|||
g4f.Provider.Aichat,
|
||||
g4f.Provider.ChatBase,
|
||||
g4f.Provider.Bing,
|
||||
g4f.Provider.CodeLinkAva,
|
||||
g4f.Provider.DeepAi,
|
||||
g4f.Provider.GptGo,
|
||||
g4f.Provider.Wewordle,
|
||||
g4f.Provider.You,
|
||||
g4f.Provider.Yqcloud,
|
||||
]
|
||||
|
||||
async def run_provider(provider: g4f.Provider.AsyncProvider):
|
||||
async def run_provider(provider: g4f.Provider.BaseProvider):
|
||||
try:
|
||||
response = await provider.create_async(
|
||||
model=g4f.models.default.name,
|
||||
response = await g4f.ChatCompletion.create_async(
|
||||
model=g4f.models.default,
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
provider=provider,
|
||||
)
|
||||
print(f"{provider.__name__}:", response)
|
||||
except Exception as e:
|
||||
|
|
@ -311,6 +306,22 @@ async def run_all():
|
|||
asyncio.run(run_all())
|
||||
```
|
||||
|
||||
##### Proxy Support:
|
||||
|
||||
All providers support specifying a proxy in the create function.
|
||||
|
||||
```py
|
||||
import g4f
|
||||
|
||||
response = await g4f.ChatCompletion.create(
|
||||
model=g4f.models.default,
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
proxy="http://host:port",
|
||||
# or socks5://user:pass@host:port
|
||||
)
|
||||
print(f"Result:", response)
|
||||
```
|
||||
|
||||
### interference openai-proxy api (use with openai python package)
|
||||
|
||||
#### run interference from pypi package:
|
||||
|
|
@ -528,7 +539,7 @@ Call in your terminal the "create_provider" script:
|
|||
$ python etc/tool/create_provider.py
|
||||
```
|
||||
1. Enter your name for the new provider.
|
||||
2. Copy&Paste cURL command from your browser developer tools.
|
||||
2. Copy&Paste a cURL command from your browser developer tools.
|
||||
3. Let the AI create the provider for you.
|
||||
4. Customize the provider according to your needs.
|
||||
|
||||
|
|
|
|||
|
|
@ -12,23 +12,13 @@ def read_code(text):
|
|||
if match:
|
||||
return match.group("code")
|
||||
|
||||
def read_result(result):
|
||||
lines = []
|
||||
for line in result.split("\n"):
|
||||
if (line.startswith("```")):
|
||||
break
|
||||
if (line):
|
||||
lines.append(line)
|
||||
explanation = "\n".join(lines) if lines else ""
|
||||
return explanation, read_code(result)
|
||||
|
||||
def input_command():
|
||||
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
|
||||
contents = []
|
||||
while True:
|
||||
try:
|
||||
line = input()
|
||||
except:
|
||||
except EOFError:
|
||||
break
|
||||
contents.append(line)
|
||||
return "\n".join(contents)
|
||||
|
|
@ -41,12 +31,12 @@ from __future__ import annotations
|
|||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import format_prompt
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class ChatgptDuo(AsyncGeneratorProvider):
|
||||
class ChatGpt(AsyncGeneratorProvider):
|
||||
url = "https://chat-gpt.com"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
|
@ -55,9 +45,10 @@ class ChatgptDuo(AsyncGeneratorProvider):
|
|||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"authority": "chat-gpt.com",
|
||||
"accept": "application/json",
|
||||
|
|
@ -65,16 +56,16 @@ class ChatgptDuo(AsyncGeneratorProvider):
|
|||
"referer": f"{cls.url}/chat",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages),
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"purpose": "ask",
|
||||
"purpose": "",
|
||||
}
|
||||
async with session.post(cls.url + "/api/chat", json=data) as response:
|
||||
async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for stream in response.content:
|
||||
if stream:
|
||||
yield stream.decode()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
yield chunk.decode()
|
||||
"""
|
||||
|
||||
if not path.isfile(provider_path):
|
||||
|
|
@ -95,18 +86,23 @@ Replace "hello" with `format_prompt(messages)`.
|
|||
And replace "gpt-3.5-turbo" with `model`.
|
||||
"""
|
||||
|
||||
print("Create code...")
|
||||
response = g4f.ChatCompletion.create(
|
||||
response = []
|
||||
for chunk in g4f.ChatCompletion.create(
|
||||
model=g4f.models.gpt_35_long,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
auth=True,
|
||||
timeout=120,
|
||||
)
|
||||
print(response)
|
||||
explanation, code = read_result(response)
|
||||
timeout=300,
|
||||
stream=True
|
||||
):
|
||||
response.append(chunk)
|
||||
print(chunk, end="", flush=True)
|
||||
print()
|
||||
response = "".join(response)
|
||||
|
||||
code = read_code(response)
|
||||
if code:
|
||||
with open(provider_path, "w") as file:
|
||||
file.write(code)
|
||||
print("Saved at:", provider_path)
|
||||
with open(f"g4f/Provider/__init__.py", "a") as file:
|
||||
file.write(f"\nfrom .{name} import {name}")
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||
|
||||
import time, hashlib, random
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests import StreamSession
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
|
@ -20,11 +20,11 @@ class FreeGpt(AsyncGeneratorProvider):
|
|||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
) -> AsyncResult:
|
||||
async with StreamSession(
|
||||
impersonate="chrome107",
|
||||
timeout=timeout,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,9 @@ import re
|
|||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..base_provider import AsyncProvider, format_prompt, get_cookies
|
||||
from ...typing import Messages
|
||||
from ..base_provider import AsyncProvider
|
||||
from ..helper import format_prompt, get_cookies
|
||||
|
||||
|
||||
class Bard(AsyncProvider):
|
||||
|
|
@ -19,25 +21,22 @@ class Bard(AsyncProvider):
|
|||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
prompt = format_prompt(messages)
|
||||
if proxy and "://" not in proxy:
|
||||
proxy = f"http://{proxy}"
|
||||
if not cookies:
|
||||
cookies = get_cookies(".google.com")
|
||||
|
||||
headers = {
|
||||
'authority': 'bard.google.com',
|
||||
'origin': 'https://bard.google.com',
|
||||
'referer': 'https://bard.google.com/',
|
||||
'origin': cls.url,
|
||||
'referer': f'{cls.url}/',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||
'x-same-domain': '1',
|
||||
}
|
||||
|
||||
async with ClientSession(
|
||||
cookies=cookies,
|
||||
headers=headers
|
||||
|
|
@ -67,7 +66,6 @@ class Bard(AsyncProvider):
|
|||
'lamda',
|
||||
'BardFrontendService'
|
||||
])
|
||||
|
||||
async with session.post(
|
||||
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
|
||||
data=data,
|
||||
|
|
|
|||
|
|
@ -4,8 +4,9 @@ import json, uuid
|
|||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncGenerator
|
||||
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt, get_cookies
|
||||
|
||||
|
||||
class HuggingChat(AsyncGeneratorProvider):
|
||||
|
|
@ -18,12 +19,12 @@ class HuggingChat(AsyncGeneratorProvider):
|
|||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
messages: Messages,
|
||||
stream: bool = True,
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
) -> AsyncResult:
|
||||
model = model if model else cls.model
|
||||
if proxy and "://" not in proxy:
|
||||
proxy = f"http://{proxy}"
|
||||
|
|
|
|||
|
|
@ -4,8 +4,9 @@ import json
|
|||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import Any, AsyncGenerator
|
||||
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt, get_cookies
|
||||
|
||||
|
||||
class OpenAssistant(AsyncGeneratorProvider):
|
||||
|
|
@ -18,11 +19,11 @@ class OpenAssistant(AsyncGeneratorProvider):
|
|||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
**kwargs: Any
|
||||
) -> AsyncGenerator:
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if not cookies:
|
||||
cookies = get_cookies("open-assistant.io")
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import uuid, json, time
|
|||
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import get_browser, get_cookies, format_prompt
|
||||
from ...typing import AsyncGenerator
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...requests import StreamSession
|
||||
|
||||
class OpenaiChat(AsyncGeneratorProvider):
|
||||
|
|
@ -18,13 +18,13 @@ class OpenaiChat(AsyncGeneratorProvider):
|
|||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
access_token: str = None,
|
||||
cookies: dict = None,
|
||||
timeout: int = 30,
|
||||
**kwargs: dict
|
||||
) -> AsyncGenerator:
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
proxies = {"https": proxy}
|
||||
if not access_token:
|
||||
access_token = await cls.get_access_token(cookies, proxies)
|
||||
|
|
@ -32,7 +32,12 @@ class OpenaiChat(AsyncGeneratorProvider):
|
|||
"Accept": "text/event-stream",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
}
|
||||
async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session:
|
||||
async with StreamSession(
|
||||
proxies=proxies,
|
||||
headers=headers,
|
||||
impersonate="chrome107",
|
||||
timeout=timeout
|
||||
) as session:
|
||||
messages = [
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import json
|
|||
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ...typing import CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
|
||||
|
||||
|
|
@ -19,9 +19,10 @@ class Raycast(BaseProvider):
|
|||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
proxy: str = None,
|
||||
**kwargs,
|
||||
) -> CreateResult:
|
||||
auth = kwargs.get('auth')
|
||||
headers = {
|
||||
|
|
@ -47,7 +48,13 @@ class Raycast(BaseProvider):
|
|||
"system_instruction": "markdown",
|
||||
"temperature": 0.5
|
||||
}
|
||||
response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
|
||||
response = requests.post(
|
||||
"https://backend.raycast.com/api/v1/ai/chat_completions",
|
||||
headers=headers,
|
||||
json=data,
|
||||
stream=True,
|
||||
proxies={"https": proxy}
|
||||
)
|
||||
for token in response.iter_lines():
|
||||
if b'data: ' not in token:
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@ from __future__ import annotations
|
|||
|
||||
import json
|
||||
import random
|
||||
|
||||
import requests
|
||||
|
||||
from ...typing import Any, CreateResult
|
||||
from ...typing import Any, CreateResult, Messages
|
||||
from ..base_provider import BaseProvider
|
||||
from ..helper import format_prompt
|
||||
|
||||
|
||||
class Theb(BaseProvider):
|
||||
|
|
@ -19,12 +19,11 @@ class Theb(BaseProvider):
|
|||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
||||
conversation += "\nassistant: "
|
||||
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
auth = kwargs.get("auth", {
|
||||
"bearer_token":"free",
|
||||
"org_id":"theb",
|
||||
|
|
@ -54,7 +53,7 @@ class Theb(BaseProvider):
|
|||
req_rand = random.randint(100000000, 9999999999)
|
||||
|
||||
json_data: dict[str, Any] = {
|
||||
"text" : conversation,
|
||||
"text" : format_prompt(messages),
|
||||
"category" : "04f58f64a4aa4191a957b47290fee864",
|
||||
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
|
||||
"model_params": {
|
||||
|
|
@ -67,8 +66,13 @@ class Theb(BaseProvider):
|
|||
}
|
||||
}
|
||||
|
||||
response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
|
||||
headers=headers, json=json_data, stream=True)
|
||||
response = requests.post(
|
||||
f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
stream=True,
|
||||
proxies={"https": proxy}
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
content = ""
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue