Add proxy support to all providers

This commit is contained in:
Heiner Lohaus 2023-10-09 10:22:17 +02:00
parent 115c41c439
commit e46b5fe043
29 changed files with 162 additions and 113 deletions

View file

@ -2,7 +2,7 @@ from __future__ import annotations
import json import json
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
@ -16,12 +16,12 @@ class AItianhu(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
cookies: dict = None, cookies: dict = None,
timeout: int = 30, timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not cookies: if not cookies:
cookies = get_cookies("www.aitianhu.com") cookies = get_cookies("www.aitianhu.com")
data = { data = {

View file

@ -2,7 +2,7 @@ from __future__ import annotations
import random, json import random, json
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
@ -20,13 +20,13 @@ class AItianhuSpace(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
domain: str = None, domain: str = None,
cookies: dict = None, cookies: dict = None,
timeout: int = 30, timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif not model in domains: elif not model in domains:

View file

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -15,11 +15,10 @@ class Acytoo(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with ClientSession( async with ClientSession(
headers=_create_header() headers=_create_header()
) as session: ) as session:

View file

@ -1,7 +1,7 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider): class AiAsk(AsyncGeneratorProvider):
@ -13,9 +13,10 @@ class AiAsk(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"accept": "application/json, text/plain, */*", "accept": "application/json, text/plain, */*",
"origin": cls.url, "origin": cls.url,
@ -33,7 +34,7 @@ class AiAsk(AsyncGeneratorProvider):
} }
buffer = "" buffer = ""
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!" rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response: async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.content.iter_any(): async for chunk in response.content.iter_any():
buffer += chunk.decode() buffer += chunk.decode()

View file

@ -3,7 +3,7 @@ from __future__ import annotations
import time import time
import hashlib import hashlib
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -17,11 +17,16 @@ class Aibn(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
timeout: int = 30, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with StreamSession(impersonate="chrome107", timeout=timeout) as session: async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {
"messages": messages, "messages": messages,

View file

@ -2,6 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import Messages
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt
@ -13,7 +14,7 @@ class Aichat(AsyncProvider):
@staticmethod @staticmethod
async def create_async( async def create_async(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> str: ) -> str:

View file

@ -7,7 +7,7 @@ import json
from datetime import datetime from datetime import datetime
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import SHA256, AsyncGenerator from ..typing import SHA256, AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -19,11 +19,11 @@ class Ails(AsyncGeneratorProvider):
@staticmethod @staticmethod
async def create_async_generator( async def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, stream: bool,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"authority": "api.caipacity.com", "authority": "api.caipacity.com",
"accept": "*/*", "accept": "*/*",

View file

@ -2,7 +2,7 @@ from __future__ import annotations
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models # to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = { models = {
@ -26,11 +26,12 @@ class Aivvm(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, stream: bool,
timeout: int = 30, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif model not in models: elif model not in models:
@ -48,7 +49,12 @@ class Aivvm(AsyncGeneratorProvider):
"Origin": cls.url, "Origin": cls.url,
"Referer": f"{cls.url}/", "Referer": f"{cls.url}/",
} }
async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session: async with StreamSession(
impersonate="chrome107",
headers=headers,
proxies={"https": proxy},
timeout=timeout
) as session:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response: async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.iter_content(): async for chunk in response.iter_content():

View file

@ -7,7 +7,7 @@ import os
import uuid import uuid
import urllib.parse import urllib.parse
from aiohttp import ClientSession, ClientTimeout from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
class Tones(): class Tones():
@ -32,11 +32,12 @@ class Bing(AsyncGeneratorProvider):
@staticmethod @staticmethod
def create_async_generator( def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
cookies: dict = None, cookies: dict = None,
tone: str = Tones.creative, tone: str = Tones.creative,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if len(messages) < 2: if len(messages) < 2:
prompt = messages[0]["content"] prompt = messages[0]["content"]
context = None context = None
@ -46,9 +47,9 @@ class Bing(AsyncGeneratorProvider):
if not cookies or "SRCHD" not in cookies: if not cookies or "SRCHD" not in cookies:
cookies = default_cookies cookies = default_cookies
return stream_generate(prompt, tone, context, cookies) return stream_generate(prompt, tone, context, proxy, cookies)
def create_context(messages: list[dict[str, str]]): def create_context(messages: Messages):
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages) context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
return context return context
@ -59,10 +60,10 @@ class Conversation():
self.clientId = clientId self.clientId = clientId
self.conversationSignature = conversationSignature self.conversationSignature = conversationSignature
async def create_conversation(session: ClientSession) -> Conversation: async def create_conversation(session: ClientSession, proxy: str = None) -> Conversation:
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3' url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
async with await session.get(url) as response: async with await session.get(url, proxy=proxy) as response:
data = await response.json() data = await response.json()
conversationId = data.get('conversationId') conversationId = data.get('conversationId')
@ -80,7 +81,7 @@ async def list_conversations(session: ClientSession) -> list:
response = await response.json() response = await response.json()
return response["chats"] return response["chats"]
async def delete_conversation(session: ClientSession, conversation: Conversation) -> list: async def delete_conversation(session: ClientSession, conversation: Conversation, proxy: str = None) -> list:
url = "https://sydney.bing.com/sydney/DeleteSingleConversation" url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
json = { json = {
"conversationId": conversation.conversationId, "conversationId": conversation.conversationId,
@ -89,7 +90,7 @@ async def delete_conversation(session: ClientSession, conversation: Conversation
"source": "cib", "source": "cib",
"optionsSets": ["autosave"] "optionsSets": ["autosave"]
} }
async with session.post(url, json=json) as response: async with session.post(url, json=json, proxy=proxy) as response:
response = await response.json() response = await response.json()
return response["result"]["value"] == "Success" return response["result"]["value"] == "Success"
@ -239,20 +240,22 @@ def create_message(conversation: Conversation, prompt: str, tone: str, context:
async def stream_generate( async def stream_generate(
prompt: str, prompt: str,
tone: str, tone: str,
context: str=None, context: str = None,
cookies: dict=None, proxy: str = None,
cookies: dict = None
): ):
async with ClientSession( async with ClientSession(
timeout=ClientTimeout(total=900), timeout=ClientTimeout(total=900),
cookies=cookies, cookies=cookies,
headers=Defaults.headers, headers=Defaults.headers,
) as session: ) as session:
conversation = await create_conversation(session) conversation = await create_conversation(session, proxy)
try: try:
async with session.ws_connect( async with session.ws_connect(
f'wss://sydney.bing.com/sydney/ChatHub', f'wss://sydney.bing.com/sydney/ChatHub',
autoping=False, autoping=False,
params={'sec_access_token': conversation.conversationSignature} params={'sec_access_token': conversation.conversationSignature},
proxy=proxy
) as wss: ) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
@ -297,4 +300,4 @@ async def stream_generate(
raise Exception(f"{result['value']}: {result['message']}") raise Exception(f"{result['value']}: {result['message']}")
return return
finally: finally:
await delete_conversation(session, conversation) await delete_conversation(session, conversation, proxy)

View file

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -16,9 +16,10 @@ class ChatBase(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if model == "gpt-4": if model == "gpt-4":
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn" chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
elif model == "gpt-3.5-turbo" or not model: elif model == "gpt-3.5-turbo" or not model:
@ -44,7 +45,7 @@ class ChatBase(AsyncGeneratorProvider):
"chatId": chat_id, "chatId": chat_id,
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}" "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
} }
async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response: async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for stream in response.content.iter_any(): async for stream in response.content.iter_any():
yield stream.decode() yield stream.decode()

View file

@ -1,6 +1,6 @@
from __future__ import annotations from __future__ import annotations
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -14,11 +14,12 @@ class ChatForAi(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
timeout: int = 30, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with StreamSession(impersonate="chrome107", timeout=timeout) as session: async with StreamSession(impersonate="chrome107", proxies={"https": proxy}, timeout=timeout) as session:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
data = { data = {
"conversationId": "temp", "conversationId": "temp",

View file

@ -3,7 +3,7 @@ from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
@ -16,9 +16,10 @@ class Chatgpt4Online(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
async with ClientSession() as session: async with ClientSession() as session:
data = { data = {
"botId": "default", "botId": "default",
@ -30,7 +31,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
"newMessage": messages[-1]["content"], "newMessage": messages[-1]["content"],
"stream": True "stream": True
} }
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response: async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line.startswith(b"data: "):

View file

@ -3,6 +3,7 @@ from __future__ import annotations
import re import re
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import Messages
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt
@ -18,7 +19,7 @@ class ChatgptAi(AsyncProvider):
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> str: ) -> str:

View file

@ -2,8 +2,8 @@ from __future__ import annotations
import time, json, re import time, json, re
from aiohttp import ClientSession from aiohttp import ClientSession
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -16,10 +16,10 @@ class ChatgptDemo(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"authority": "chat.chatgptdemo.net", "authority": "chat.chatgptdemo.net",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US", "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",

View file

@ -1,5 +1,6 @@
from __future__ import annotations from __future__ import annotations
from ..typing import Messages
from curl_cffi.requests import AsyncSession from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt from .base_provider import AsyncProvider, format_prompt
@ -13,9 +14,9 @@ class ChatgptDuo(AsyncProvider):
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 30, timeout: int = 120,
**kwargs **kwargs
) -> str: ) -> str:
async with AsyncSession( async with AsyncSession(

View file

@ -19,6 +19,7 @@ class ChatgptX(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
headers = { headers = {
@ -32,7 +33,7 @@ class ChatgptX(AsyncGeneratorProvider):
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
} }
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/") as response: async with session.get(f"{cls.url}/", proxy=proxy) as response:
response = await response.text() response = await response.text()
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response) result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
if result: if result:
@ -62,7 +63,7 @@ class ChatgptX(AsyncGeneratorProvider):
'x-csrf-token': csrf_token, 'x-csrf-token': csrf_token,
'x-requested-with': 'XMLHttpRequest' 'x-requested-with': 'XMLHttpRequest'
} }
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response: async with session.post(cls.url + '/sendchat', data=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
chat = await response.json() chat = await response.json()
if "response" not in chat or not chat["response"]: if "response" not in chat or not chat["response"]:
@ -82,7 +83,7 @@ class ChatgptX(AsyncGeneratorProvider):
"conversions_id": chat["conversions_id"], "conversions_id": chat["conversions_id"],
"ass_conversions_id": chat["ass_conversions_id"], "ass_conversions_id": chat["ass_conversions_id"],
} }
async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers) as response: async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line.startswith(b"data: "):

View file

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from hashlib import sha256 from hashlib import sha256
from typing import AsyncGenerator, Dict, List from ..typing import AsyncResult, Messages, Dict
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -17,10 +17,10 @@ class Cromicle(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: List[Dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator[str, None]: ) -> AsyncResult:
async with ClientSession( async with ClientSession(
headers=_create_header() headers=_create_header()
) as session: ) as session:

View file

@ -6,22 +6,22 @@ import random
import hashlib import hashlib
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
class DeepAi(AsyncGeneratorProvider): class DeepAi(AsyncGeneratorProvider):
url: str = "https://deepai.org" url = "https://deepai.org"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
async def create_async_generator( async def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
token_js = """ token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'

View file

@ -21,10 +21,15 @@ class FreeGpt(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
timeout: int = 30, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107", timeout=timeout) as session: async with StreamSession(
impersonate="chrome107",
timeout=timeout,
proxies={"https": proxy}
) as session:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {

View file

@ -2,8 +2,8 @@ from __future__ import annotations
import secrets, time, json import secrets, time, json
from aiohttp import ClientSession from aiohttp import ClientSession
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -18,9 +18,10 @@ class GPTalk(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
timestamp = int(time.time()) timestamp = int(time.time())
@ -48,7 +49,7 @@ class GPTalk(AsyncGeneratorProvider):
"fingerprint": secrets.token_hex(16).zfill(32), "fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint" "platform": "fingerprint"
} }
async with session.post(cls.url + "/api/chatgpt/user/login", json=data) as response: async with session.post(cls.url + "/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
cls._auth = (await response.json())["data"] cls._auth = (await response.json())["data"]
data = { data = {
@ -68,11 +69,11 @@ class GPTalk(AsyncGeneratorProvider):
headers = { headers = {
'authorization': f'Bearer {cls._auth["token"]}', 'authorization': f'Bearer {cls._auth["token"]}',
} }
async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers) as response: async with session.post(cls.url + "/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
token = (await response.json())["data"]["token"] token = (await response.json())["data"]["token"]
last_message = "" last_message = ""
async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}) as response: async with session.get(cls.url + "/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line.startswith(b"data: "):

View file

@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
import execjs, os, json import execjs, os, json
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -16,9 +16,10 @@ class GptForLove(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
headers = { headers = {
@ -47,7 +48,7 @@ class GptForLove(AsyncGeneratorProvider):
"secret": get_secret(), "secret": get_secret(),
**kwargs **kwargs
} }
async with session.post("https://api.gptplus.one/chat-process", json=data) as response: async with session.post("https://api.gptplus.one/chat-process", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
try: try:

View file

@ -18,7 +18,6 @@ class GptGo(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
headers = { headers = {
@ -73,6 +72,7 @@ class GptGo(AsyncGeneratorProvider):
("model", "str"), ("model", "str"),
("messages", "list[dict[str, str]]"), ("messages", "list[dict[str, str]]"),
("stream", "bool"), ("stream", "bool"),
("proxy", "str"),
("temperature", "float"), ("temperature", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])

View file

@ -1,7 +1,7 @@
from __future__ import annotations from __future__ import annotations
import secrets, json import secrets, json
from aiohttp import ClientSession from aiohttp import ClientSession
from typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
@ -14,9 +14,10 @@ class GptGod(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
headers = { headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "text/event-stream", "Accept": "text/event-stream",
@ -24,7 +25,7 @@ class GptGod(AsyncGeneratorProvider):
"Accept-Encoding": "gzip, deflate, br", "Accept-Encoding": "gzip, deflate, br",
"Alt-Used": "gptgod.site", "Alt-Used": "gptgod.site",
"Connection": "keep-alive", "Connection": "keep-alive",
"Referer": "https://gptgod.site/", "Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty", "Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors", "Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin", "Sec-Fetch-Site": "same-origin",
@ -37,7 +38,7 @@ class GptGod(AsyncGeneratorProvider):
"content": prompt, "content": prompt,
"id": secrets.token_hex(16).zfill(32) "id": secrets.token_hex(16).zfill(32)
} }
async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response: async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
event = None event = None
async for line in response.content: async for line in response.content:

View file

@ -4,7 +4,7 @@ import uuid
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
models = { models = {
@ -39,11 +39,11 @@ class Liaobots(AsyncGeneratorProvider):
async def create_async_generator( async def create_async_generator(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
auth: str = None, auth: str = None,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncResult:
model = model if model in models else "gpt-3.5-turbo" model = model if model in models else "gpt-3.5-turbo"
headers = { headers = {
"authority": "liaobots.com", "authority": "liaobots.com",

View file

@ -28,6 +28,7 @@ class Myshell(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 90, timeout: int = 90,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
@ -47,7 +48,8 @@ class Myshell(AsyncGeneratorProvider):
async with session.ws_connect( async with session.ws_connect(
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket", "wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
autoping=False, autoping=False,
timeout=timeout timeout=timeout,
proxy=proxy
) as wss: ) as wss:
# Send and receive hello message # Send and receive hello message
await wss.receive_str() await wss.receive_str()

View file

@ -19,6 +19,7 @@ class Phind(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789' chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
@ -43,7 +44,12 @@ class Phind(AsyncGeneratorProvider):
"Origin": cls.url, "Origin": cls.url,
"Referer": f"{cls.url}/" "Referer": f"{cls.url}/"
} }
async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session: async with StreamSession(
headers=headers,
timeout=(5, timeout),
proxies={"https": proxy},
impersonate="chrome107"
) as session:
async with session.post(f"{cls.url}/api/infer/answer", json=data) as response: async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
response.raise_for_status() response.raise_for_status()
new_lines = 0 new_lines = 0
@ -71,6 +77,7 @@ class Phind(AsyncGeneratorProvider):
("messages", "list[dict[str, str]]"), ("messages", "list[dict[str, str]]"),
("stream", "bool"), ("stream", "bool"),
("proxy", "str"), ("proxy", "str"),
("timeout", "int"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

View file

@ -2,7 +2,7 @@ from __future__ import annotations
import json, base64, requests, execjs, random, uuid import json, base64, requests, execjs, random, uuid
from ..typing import Any, TypedDict, CreateResult from ..typing import Messages, TypedDict, CreateResult
from .base_provider import BaseProvider from .base_provider import BaseProvider
from abc import abstractmethod from abc import abstractmethod
@ -17,8 +17,9 @@ class Vercel(BaseProvider):
@abstractmethod @abstractmethod
def create_completion( def create_completion(
model: str, model: str,
messages: list[dict[str, str]], messages: Messages,
stream: bool, stream: bool,
proxy: str = None,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
if not model: if not model:
@ -52,15 +53,18 @@ class Vercel(BaseProvider):
'model' : model_info[model]['id'], 'model' : model_info[model]['id'],
'messages' : messages, 'messages' : messages,
'playgroundId': str(uuid.uuid4()), 'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0} | model_info[model]['default_params'] 'chatIndex' : 0,
**model_info[model]['default_params'],
**kwargs
}
max_retries = kwargs.get('max_retries', 20) max_retries = kwargs.get('max_retries', 20)
for i in range(max_retries): for i in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate', response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True) headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try: try:
response.raise_for_status() response.raise_for_status()
except: except Exception:
continue continue
for token in response.iter_content(chunk_size=None): for token in response.iter_content(chunk_size=None):
yield token.decode() yield token.decode()

View file

@ -42,7 +42,7 @@ class Vitalentum(AsyncGeneratorProvider):
async with ClientSession( async with ClientSession(
headers=headers headers=headers
) as session: ) as session:
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response: async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
line = line.decode() line = line.decode()

View file

@ -2,7 +2,7 @@ from __future__ import annotations
from requests import get from requests import get
from g4f.models import Model, ModelUtils from g4f.models import Model, ModelUtils
from .Provider import BaseProvider from .Provider import BaseProvider
from .typing import CreateResult, Union from .typing import Messages, CreateResult, Union
from .debug import logging from .debug import logging
version = '0.1.5.6' version = '0.1.5.6'
@ -27,19 +27,19 @@ def get_model_and_provider(model : Union[Model, str],
if model in ModelUtils.convert: if model in ModelUtils.convert:
model = ModelUtils.convert[model] model = ModelUtils.convert[model]
else: else:
raise Exception(f'The model: {model} does not exist') raise ValueError(f'The model: {model} does not exist')
if not provider: if not provider:
provider = model.best_provider provider = model.best_provider
if not provider: if not provider:
raise Exception(f'No provider found for model: {model}') raise RuntimeError(f'No provider found for model: {model}')
if not provider.working: if not provider.working:
raise Exception(f'{provider.__name__} is not working') raise RuntimeError(f'{provider.__name__} is not working')
if not provider.supports_stream and stream: if not provider.supports_stream and stream:
raise Exception(f'ValueError: {provider.__name__} does not support "stream" argument') raise ValueError(f'{provider.__name__} does not support "stream" argument')
if logging: if logging:
print(f'Using {provider.__name__} provider') print(f'Using {provider.__name__} provider')
@ -48,17 +48,20 @@ def get_model_and_provider(model : Union[Model, str],
class ChatCompletion: class ChatCompletion:
@staticmethod @staticmethod
def create(model: Union[Model, str], def create(
messages : list[dict[str, str]], model: Union[Model, str],
messages : Messages,
provider : Union[type[BaseProvider], None] = None, provider : Union[type[BaseProvider], None] = None,
stream : bool = False, stream : bool = False,
auth : Union[str, None] = None, **kwargs) -> Union[CreateResult, str]: auth : Union[str, None] = None,
**kwargs
) -> Union[CreateResult, str]:
model, provider = get_model_and_provider(model, provider, stream) model, provider = get_model_and_provider(model, provider, stream)
if provider.needs_auth and not auth: if provider.needs_auth and not auth:
raise Exception( raise ValueError(
f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
if provider.needs_auth: if provider.needs_auth:
kwargs['auth'] = auth kwargs['auth'] = auth
@ -69,10 +72,14 @@ class ChatCompletion:
@staticmethod @staticmethod
async def create_async( async def create_async(
model: Union[Model, str], model: Union[Model, str],
messages: list[dict[str, str]], messages: Messages,
provider: Union[type[BaseProvider], None] = None, provider: Union[type[BaseProvider], None] = None,
stream: bool = False,
**kwargs **kwargs
) -> str: ) -> str:
if stream:
raise ValueError(f'"create_async" does not support "stream" argument')
model, provider = get_model_and_provider(model, provider, False) model, provider = get_model_and_provider(model, provider, False)
return await provider.create_async(model.name, messages, **kwargs) return await provider.create_async(model.name, messages, **kwargs)