Add Proxy Support and Create Provider to Readme

Add proxy support to many providers
This commit is contained in:
Heiner Lohaus 2023-10-09 13:33:20 +02:00
parent 7a699c8ca0
commit c6b33e527c
9 changed files with 103 additions and 80 deletions

View file

@ -224,19 +224,15 @@ from g4f.Provider import (
Bing, Bing,
ChatBase, ChatBase,
ChatgptAi, ChatgptAi,
ChatgptLogin,
CodeLinkAva,
DeepAi, DeepAi,
H2o, H2o,
HuggingChat, HuggingChat,
Opchatgpts,
OpenAssistant, OpenAssistant,
OpenaiChat, OpenaiChat,
Raycast, Raycast,
Theb, Theb,
Vercel, Vercel,
Vitalentum, Vitalentum,
Wewordle,
Ylokh, Ylokh,
You, You,
Yqcloud, Yqcloud,
@ -284,19 +280,18 @@ _providers = [
g4f.Provider.Aichat, g4f.Provider.Aichat,
g4f.Provider.ChatBase, g4f.Provider.ChatBase,
g4f.Provider.Bing, g4f.Provider.Bing,
g4f.Provider.CodeLinkAva,
g4f.Provider.DeepAi, g4f.Provider.DeepAi,
g4f.Provider.GptGo, g4f.Provider.GptGo,
g4f.Provider.Wewordle,
g4f.Provider.You, g4f.Provider.You,
g4f.Provider.Yqcloud, g4f.Provider.Yqcloud,
] ]
async def run_provider(provider: g4f.Provider.AsyncProvider): async def run_provider(provider: g4f.Provider.BaseProvider):
try: try:
response = await provider.create_async( response = await g4f.ChatCompletion.create_async(
model=g4f.models.default.name, model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}], messages=[{"role": "user", "content": "Hello"}],
provider=provider,
) )
print(f"{provider.__name__}:", response) print(f"{provider.__name__}:", response)
except Exception as e: except Exception as e:
@ -311,6 +306,22 @@ async def run_all():
asyncio.run(run_all()) asyncio.run(run_all())
``` ```
##### Proxy Support:
All providers support specifying a proxy in the create function.
```py
import g4f
response = await g4f.ChatCompletion.create(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
proxy="http://host:port",
# or socks5://user:pass@host:port
)
print(f"Result:", response)
```
### interference openai-proxy api (use with openai python package) ### interference openai-proxy api (use with openai python package)
#### run interference from pypi package: #### run interference from pypi package:
@ -528,7 +539,7 @@ Call in your terminal the "create_provider" script:
$ python etc/tool/create_provider.py $ python etc/tool/create_provider.py
``` ```
1. Enter your name for the new provider. 1. Enter your name for the new provider.
2. Copy&Paste cURL command from your browser developer tools. 2. Copy&Paste a cURL command from your browser developer tools.
3. Let the AI create the provider for you. 3. Let the AI create the provider for you.
4. Customize the provider according to your needs. 4. Customize the provider according to your needs.

View file

@ -12,23 +12,13 @@ def read_code(text):
if match: if match:
return match.group("code") return match.group("code")
def read_result(result):
lines = []
for line in result.split("\n"):
if (line.startswith("```")):
break
if (line):
lines.append(line)
explanation = "\n".join(lines) if lines else ""
return explanation, read_code(result)
def input_command(): def input_command():
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.") print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
contents = [] contents = []
while True: while True:
try: try:
line = input() line = input()
except: except EOFError:
break break
contents.append(line) contents.append(line)
return "\n".join(contents) return "\n".join(contents)
@ -41,12 +31,12 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
class ChatgptDuo(AsyncGeneratorProvider): class ChatGpt(AsyncGeneratorProvider):
url = "https://chat-gpt.com" url = "https://chat-gpt.com"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True working = True
@ -55,9 +45,10 @@ class ChatgptDuo(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"authority": "chat-gpt.com", "authority": "chat-gpt.com",
"accept": "application/json", "accept": "application/json",
@ -65,16 +56,16 @@ class ChatgptDuo(AsyncGeneratorProvider):
"referer": f"{cls.url}/chat", "referer": f"{cls.url}/chat",
} }
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages), prompt = format_prompt(messages)
data = { data = {
"prompt": prompt, "prompt": prompt,
"purpose": "ask", "purpose": "",
} }
async with session.post(cls.url + "/api/chat", json=data) as response: async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for stream in response.content: async for chunk in response.content:
if stream: if chunk:
yield stream.decode() yield chunk.decode()
""" """
if not path.isfile(provider_path): if not path.isfile(provider_path):
@ -95,18 +86,23 @@ Replace "hello" with `format_prompt(messages)`.
And replace "gpt-3.5-turbo" with `model`. And replace "gpt-3.5-turbo" with `model`.
""" """
print("Create code...") response = []
response = g4f.ChatCompletion.create( for chunk in g4f.ChatCompletion.create(
model=g4f.models.gpt_35_long, model=g4f.models.gpt_35_long,
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
auth=True, timeout=300,
timeout=120, stream=True
) ):
print(response) response.append(chunk)
explanation, code = read_result(response) print(chunk, end="", flush=True)
print()
response = "".join(response)
code = read_code(response)
if code: if code:
with open(provider_path, "w") as file: with open(provider_path, "w") as file:
file.write(code) file.write(code)
print("Saved at:", provider_path)
with open(f"g4f/Provider/__init__.py", "a") as file: with open(f"g4f/Provider/__init__.py", "a") as file:
file.write(f"\nfrom .{name} import {name}") file.write(f"\nfrom .{name} import {name}")
else: else:

View file

@ -2,7 +2,7 @@ from __future__ import annotations
import time, hashlib, random import time, hashlib, random
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -20,11 +20,11 @@ class FreeGpt(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 120, timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with StreamSession( async with StreamSession(
impersonate="chrome107", impersonate="chrome107",
timeout=timeout, timeout=timeout,

View file

@ -6,7 +6,9 @@ import re
from aiohttp import ClientSession from aiohttp import ClientSession
from ..base_provider import AsyncProvider, format_prompt, get_cookies from ...typing import Messages
from ..base_provider import AsyncProvider
from ..helper import format_prompt, get_cookies
class Bard(AsyncProvider): class Bard(AsyncProvider):
@ -19,25 +21,22 @@ class Bard(AsyncProvider):
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
**kwargs **kwargs
) -> str: ) -> str:
prompt = format_prompt(messages) prompt = format_prompt(messages)
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
if not cookies: if not cookies:
cookies = get_cookies(".google.com") cookies = get_cookies(".google.com")
headers = { headers = {
'authority': 'bard.google.com', 'authority': 'bard.google.com',
'origin': 'https://bard.google.com', 'origin': cls.url,
'referer': 'https://bard.google.com/', 'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'x-same-domain': '1', 'x-same-domain': '1',
} }
async with ClientSession( async with ClientSession(
cookies=cookies, cookies=cookies,
headers=headers headers=headers
@ -67,7 +66,6 @@ class Bard(AsyncProvider):
'lamda', 'lamda',
'BardFrontendService' 'BardFrontendService'
]) ])
async with session.post( async with session.post(
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate', f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
data=data, data=data,

View file

@ -4,8 +4,9 @@ import json, uuid
from aiohttp import ClientSession from aiohttp import ClientSession
from ...typing import AsyncGenerator from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
class HuggingChat(AsyncGeneratorProvider): class HuggingChat(AsyncGeneratorProvider):
@ -18,12 +19,12 @@ class HuggingChat(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool = True, stream: bool = True,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
model = model if model else cls.model model = model if model else cls.model
if proxy and "://" not in proxy: if proxy and "://" not in proxy:
proxy = f"http://{proxy}" proxy = f"http://{proxy}"

View file

@ -4,8 +4,9 @@ import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ...typing import Any, AsyncGenerator from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
class OpenAssistant(AsyncGeneratorProvider): class OpenAssistant(AsyncGeneratorProvider):
@ -18,11 +19,11 @@ class OpenAssistant(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
**kwargs: Any **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not cookies: if not cookies:
cookies = get_cookies("open-assistant.io") cookies = get_cookies("open-assistant.io")

View file

@ -4,7 +4,7 @@ import uuid, json, time
from ..base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from ..helper import get_browser, get_cookies, format_prompt from ..helper import get_browser, get_cookies, format_prompt
from ...typing import AsyncGenerator from ...typing import AsyncResult, Messages
from ...requests import StreamSession from ...requests import StreamSession
class OpenaiChat(AsyncGeneratorProvider): class OpenaiChat(AsyncGeneratorProvider):
@ -18,13 +18,13 @@ class OpenaiChat(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 120,
access_token: str = None, access_token: str = None,
cookies: dict = None, cookies: dict = None,
timeout: int = 30, **kwargs
**kwargs: dict ) -> AsyncResult:
) -> AsyncGenerator:
proxies = {"https": proxy} proxies = {"https": proxy}
if not access_token: if not access_token:
access_token = await cls.get_access_token(cookies, proxies) access_token = await cls.get_access_token(cookies, proxies)
@ -32,7 +32,12 @@ class OpenaiChat(AsyncGeneratorProvider):
"Accept": "text/event-stream", "Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}", "Authorization": f"Bearer {access_token}",
} }
async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session: async with StreamSession(
proxies=proxies,
headers=headers,
impersonate="chrome107",
timeout=timeout
) as session:
messages = [ messages = [
{ {
"id": str(uuid.uuid4()), "id": str(uuid.uuid4()),

View file

@ -4,7 +4,7 @@ import json
import requests import requests
from ...typing import Any, CreateResult from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider from ..base_provider import BaseProvider
@ -19,9 +19,10 @@ class Raycast(BaseProvider):
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, stream: bool,
**kwargs: Any, proxy: str = None,
**kwargs,
) -> CreateResult: ) -> CreateResult:
auth = kwargs.get('auth') auth = kwargs.get('auth')
headers = { headers = {
@ -47,7 +48,13 @@ class Raycast(BaseProvider):
"system_instruction": "markdown", "system_instruction": "markdown",
"temperature": 0.5 "temperature": 0.5
} }
response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True) response = requests.post(
"https://backend.raycast.com/api/v1/ai/chat_completions",
headers=headers,
json=data,
stream=True,
proxies={"https": proxy}
)
for token in response.iter_lines(): for token in response.iter_lines():
if b'data: ' not in token: if b'data: ' not in token:
continue continue

View file

@ -2,11 +2,11 @@ from __future__ import annotations
import json import json
import random import random
import requests import requests
from ...typing import Any, CreateResult from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider from ..base_provider import BaseProvider
from ..helper import format_prompt
class Theb(BaseProvider): class Theb(BaseProvider):
@ -19,12 +19,11 @@ class Theb(BaseProvider):
@staticmethod @staticmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, **kwargs: Any) -> CreateResult: stream: bool,
proxy: str = None,
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) **kwargs
conversation += "\nassistant: " ) -> CreateResult:
auth = kwargs.get("auth", { auth = kwargs.get("auth", {
"bearer_token":"free", "bearer_token":"free",
"org_id":"theb", "org_id":"theb",
@ -54,7 +53,7 @@ class Theb(BaseProvider):
req_rand = random.randint(100000000, 9999999999) req_rand = random.randint(100000000, 9999999999)
json_data: dict[str, Any] = { json_data: dict[str, Any] = {
"text" : conversation, "text" : format_prompt(messages),
"category" : "04f58f64a4aa4191a957b47290fee864", "category" : "04f58f64a4aa4191a957b47290fee864",
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8", "model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
"model_params": { "model_params": {
@ -67,8 +66,13 @@ class Theb(BaseProvider):
} }
} }
response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", response = requests.post(
headers=headers, json=json_data, stream=True) f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
headers=headers,
json=json_data,
stream=True,
proxies={"https": proxy}
)
response.raise_for_status() response.raise_for_status()
content = "" content = ""