Add AiAsk, Chatgpt4Online, ChatgptDemo

and ChatgptX Provider
Fix Bing, Liaobots and ChatgptAi Provider
Add "gpt_35_long" model and custom timeout
This commit is contained in:
Heiner Lohaus 2023-10-05 05:13:37 +02:00
parent 0bd5730bcd
commit 88d2cbff09
34 changed files with 717 additions and 177 deletions

View file

@ -4,7 +4,7 @@ import json
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class AItianhu(AsyncGeneratorProvider): class AItianhu(AsyncGeneratorProvider):
@ -18,8 +18,12 @@ class AItianhu(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
cookies: dict = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
if not cookies:
cookies = get_cookies("www.aitianhu.com")
data = { data = {
"prompt": format_prompt(messages), "prompt": format_prompt(messages),
"options": {}, "options": {},
@ -34,12 +38,19 @@ class AItianhu(AsyncGeneratorProvider):
"Origin": cls.url, "Origin": cls.url,
"Referer": f"{cls.url}/" "Referer": f"{cls.url}/"
} }
async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: async with StreamSession(
headers=headers,
cookies=cookies,
timeout=timeout,
proxies={"https": proxy},
impersonate="chrome107",
verify=False
) as session:
async with session.post(f"{cls.url}/api/chat-process", json=data) as response: async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.iter_lines(): async for line in response.iter_lines():
if line == b"<script>": if line == b"<script>":
raise RuntimeError("Solve Challenge") raise RuntimeError("Solve challenge and pass cookies")
if b"platform's risk control" in line: if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control") raise RuntimeError("Platform's Risk Control")
line = json.loads(line) line = json.loads(line)

View file

@ -4,11 +4,11 @@ import random, json
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
domains = { domains = {
"gpt-3.5-turbo": ".aitianhu.space", "gpt-3.5-turbo": "aitianhu.space",
"gpt-4": ".aitianhu.website", "gpt-4": "aitianhu.website",
} }
class AItianhuSpace(AsyncGeneratorProvider): class AItianhuSpace(AsyncGeneratorProvider):
@ -21,20 +21,31 @@ class AItianhuSpace(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool = True, proxy: str = None,
domain: str = None,
cookies: dict = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif not model in domains: elif not model in domains:
raise ValueError(f"Model are not supported: {model}") raise ValueError(f"Model are not supported: {model}")
if not domain:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
domain = f"{rand}.{domains[model]}"
if not cookies:
cookies = get_cookies(domain)
chars = 'abcdefghijklmnopqrstuvwxyz0123456789' url = f'https://{domain}'
rand = ''.join(random.choice(chars) for _ in range(6)) async with StreamSession(
domain = domains[model] proxies={"https": proxy},
url = f'https://{rand}{domain}' cookies=cookies,
timeout=timeout,
async with StreamSession(impersonate="chrome110", verify=False) as session: impersonate="chrome110",
verify=False
) as session:
data = { data = {
"prompt": format_prompt(messages), "prompt": format_prompt(messages),
"options": {}, "options": {},
@ -53,7 +64,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
response.raise_for_status() response.raise_for_status()
async for line in response.iter_lines(): async for line in response.iter_lines():
if line == b"<script>": if line == b"<script>":
raise RuntimeError("Solve Challenge") raise RuntimeError("Solve challenge and pass cookies and a fixed domain")
if b"platform's risk control" in line: if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control") raise RuntimeError("Platform's Risk Control")
line = json.loads(line) line = json.loads(line)

43
g4f/Provider/AiAsk.py Normal file
View file

@ -0,0 +1,43 @@
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider):
url = "https://e.aiask.me"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
"accept": "application/json, text/plain, */*",
"origin": cls.url,
"referer": f"{cls.url}/chat",
}
async with ClientSession(headers=headers, timeout=timeout) as session:
data = {
"continuous": True,
"id": "fRMSQtuHl91A4De9cCvKD",
"list": messages,
"models": "0",
"prompt": "",
"temperature": kwargs.get("temperature", 0.5),
"title": "",
}
buffer = ""
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
buffer += chunk.decode()
if not rate_limit.startswith(buffer):
yield buffer
buffer = ""
elif buffer == rate_limit:
raise RuntimeError("Rate limit reached")

View file

@ -18,9 +18,10 @@ class Aibn(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session: async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {
"messages": messages, "messages": messages,

View file

@ -15,6 +15,7 @@ class Aichat(AsyncProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> str: ) -> str:
headers = { headers = {
@ -33,7 +34,7 @@ class Aichat(AsyncProvider):
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
} }
async with ClientSession( async with ClientSession(
headers=headers headers=headers, timeout=timeout
) as session: ) as session:
json_data = { json_data = {
"message": format_prompt(messages), "message": format_prompt(messages),

View file

@ -29,6 +29,7 @@ class Aivvm(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, stream: bool,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
if not model: if not model:
@ -43,7 +44,12 @@ class Aivvm(AsyncGeneratorProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7) "temperature" : kwargs.get("temperature", 0.7)
} }
async with StreamSession(impersonate="chrome107") as session: headers = {
"Accept": "*/*",
"Origin": cls.url,
"Referer": f"{cls.url}/",
}
async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response: async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.iter_content(): async for chunk in response.iter_content():

View file

@ -4,6 +4,7 @@ import random
import uuid import uuid
import json import json
import os import os
import uuid
import urllib.parse import urllib.parse
from aiohttp import ClientSession, ClientTimeout from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
@ -14,6 +15,15 @@ class Tones():
balanced = "Balanced" balanced = "Balanced"
precise = "Precise" precise = "Precise"
default_cookies = {
'SRCHD' : 'AF=NOFORM',
'PPLState' : '1',
'KievRPSSecAuth': '',
'SUID' : '',
'SRCHUSR' : '',
'SRCHHPGUSR' : '',
}
class Bing(AsyncGeneratorProvider): class Bing(AsyncGeneratorProvider):
url = "https://bing.com/chat" url = "https://bing.com/chat"
working = True working = True
@ -27,7 +37,6 @@ class Bing(AsyncGeneratorProvider):
tone: str = Tones.creative, tone: str = Tones.creative,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
if len(messages) < 2: if len(messages) < 2:
prompt = messages[0]["content"] prompt = messages[0]["content"]
context = None context = None
@ -36,14 +45,7 @@ class Bing(AsyncGeneratorProvider):
context = create_context(messages[:-1]) context = create_context(messages[:-1])
if not cookies or "SRCHD" not in cookies: if not cookies or "SRCHD" not in cookies:
cookies = { cookies = default_cookies
'SRCHD' : 'AF=NOFORM',
'PPLState' : '1',
'KievRPSSecAuth': '',
'SUID' : '',
'SRCHUSR' : '',
'SRCHHPGUSR' : '',
}
return stream_generate(prompt, tone, context, cookies) return stream_generate(prompt, tone, context, cookies)
def create_context(messages: list[dict[str, str]]): def create_context(messages: list[dict[str, str]]):
@ -58,51 +60,18 @@ class Conversation():
self.conversationSignature = conversationSignature self.conversationSignature = conversationSignature
async def create_conversation(session: ClientSession) -> Conversation: async def create_conversation(session: ClientSession) -> Conversation:
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1055.6' url = 'https://www.bing.com/turing/conversation/create'
headers = { async with await session.get(url) as response:
'authority': 'www.bing.com', data = await response.json()
'accept': 'application/json', conversationId = data.get('conversationId')
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', clientId = data.get('clientId')
'cache-control': 'no-cache', conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
'pragma': 'no-cache',
'referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1', if not conversationId or not clientId or not conversationSignature:
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-arch': '"arm"',
'sec-ch-ua-bitness': '"64"',
'sec-ch-ua-full-version': '"117.0.5938.132"',
'sec-ch-ua-full-version-list': '"Google Chrome";v="117.0.5938.132", "Not;A=Brand";v="8.0.0.0", "Chromium";v="117.0.5938.132"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-model': '""',
'sec-ch-ua-platform': '"macOS"',
'sec-ch-ua-platform-version': '"14.0.0"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
'x-ms-client-request-id': str(uuid.uuid4()),
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.12.0 OS/macOS',
}
async with await session.get(url, headers=headers) as response:
conversationSignature = response.headers.get('X-Sydney-EncryptedConversationSignature', '')
response = await response.json()
conversationId = response.get('conversationId')
clientId = response.get('clientId')
if not conversationId or not clientId:
raise Exception('Failed to create conversation.') raise Exception('Failed to create conversation.')
return Conversation(conversationId, clientId, conversationSignature) return Conversation(conversationId, clientId, conversationSignature)
async def retry_conversation(session: ClientSession) -> Conversation:
for _ in range(5):
try:
return await create_conversation(session)
except:
session.cookie_jar.clear()
return await create_conversation(session)
async def list_conversations(session: ClientSession) -> list: async def list_conversations(session: ClientSession) -> list:
url = "https://www.bing.com/turing/conversation/chats" url = "https://www.bing.com/turing/conversation/chats"
async with session.get(url) as response: async with session.get(url) as response:
@ -223,30 +192,34 @@ def format_message(msg: dict) -> str:
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str: def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str:
request_id = str(uuid.uuid4())
struct = { struct = {
'arguments': [ 'arguments': [
{ {
'optionsSets': Defaults.optionsSets,
'source': 'cib', 'source': 'cib',
'optionsSets': Defaults.optionsSets,
'allowedMessageTypes': Defaults.allowedMessageTypes, 'allowedMessageTypes': Defaults.allowedMessageTypes,
'sliceIds': Defaults.sliceIds, 'sliceIds': Defaults.sliceIds,
'traceId': os.urandom(16).hex(), 'traceId': os.urandom(16).hex(),
'isStartOfSession': True, 'isStartOfSession': True,
'requestId': request_id,
'message': Defaults.location | { 'message': Defaults.location | {
'author': 'user', 'author': 'user',
'inputMethod': 'Keyboard', 'inputMethod': 'Keyboard',
'text': prompt, 'text': prompt,
'messageType': 'Chat' 'messageType': 'Chat',
'requestId': request_id,
'messageId': request_id,
}, },
'tone': tone, 'tone': tone,
'conversationSignature': conversation.conversationSignature, 'spokenTextMode': 'None',
'conversationId': conversation.conversationId,
'participant': { 'participant': {
'id': conversation.clientId 'id': conversation.clientId
}, },
'conversationId': conversation.conversationId
} }
], ],
'invocationId': '0', 'invocationId': '1',
'target': 'chat', 'target': 'chat',
'type': 4 'type': 4
} }
@ -272,16 +245,16 @@ async def stream_generate(
cookies=cookies, cookies=cookies,
headers=Defaults.headers, headers=Defaults.headers,
) as session: ) as session:
conversation = await retry_conversation(session) conversation = await create_conversation(session)
try: try:
async with session.ws_connect( async with session.ws_connect(
f'wss://sydney.bing.com/sydney/ChatHub?sec_access_token={urllib.parse.quote_plus(conversation.conversationSignature)}', f'wss://sydney.bing.com/sydney/ChatHub?sec_access_token={urllib.parse.quote_plus(conversation.conversationSignature)}',
autoping=False, autoping=False,
params={'sec_access_token': conversation.conversationSignature}
) as wss: ) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
msg = await wss.receive(timeout=900) await wss.receive(timeout=900)
await wss.send_str(create_message(conversation, prompt, tone, context)) await wss.send_str(create_message(conversation, prompt, tone, context))
response_txt = '' response_txt = ''

View file

@ -17,9 +17,10 @@ class ChatForAi(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session: async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
conversation_id = f"id_{int(time.time())}" conversation_id = f"id_{int(time.time())}"
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
timestamp = int(time.time()) timestamp = int(time.time())

View file

@ -0,0 +1,39 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with ClientSession() as session:
data = {
"botId": "default",
"customId": None,
"session": "N/A",
"chatId": "",
"contextId": 58,
"messages": messages,
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:])
if line["type"] == "live":
yield line["data"]

View file

@ -1,28 +1,28 @@
from __future__ import annotations from __future__ import annotations
import re import re
import html
import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from .base_provider import AsyncProvider, format_prompt
from .base_provider import AsyncGeneratorProvider
class ChatgptAi(AsyncGeneratorProvider): class ChatgptAi(AsyncProvider):
url: str = "https://chatgpt.ai/" url: str = "https://chatgpt.ai/"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
_system_data = None _nonce = None
_post_id = None
_bot_id = None
@classmethod @classmethod
async def create_async_generator( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> str:
headers = { headers = {
"authority" : "chatgpt.ai", "authority" : "chatgpt.ai",
"accept" : "*/*", "accept" : "*/*",
@ -40,36 +40,36 @@ class ChatgptAi(AsyncGeneratorProvider):
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
} }
async with ClientSession( async with ClientSession(
headers=headers headers=headers, timeout=timeout
) as session: ) as session:
if not cls._system_data: if not cls._nonce:
async with session.get(cls.url, proxy=proxy) as response: async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
match = re.findall(r"data-system='([^']+)'", await response.text()) text = await response.text()
if not match: result = re.search(r'data-nonce="(.*?)"', text)
raise RuntimeError("No system data") if result:
cls._system_data = json.loads(html.unescape(match[0])) cls._nonce = result.group(1)
result = re.search(r'data-post-id="(.*?)"', text)
if result:
cls._post_id = result.group(1)
result = re.search(r'data-bot-id="(.*?)"', text)
if result:
cls._bot_id = result.group(1)
if not cls._nonce or not cls._post_id or not cls._bot_id:
raise RuntimeError("Nonce, post-id or bot-id not found")
data = { data = {
"botId": cls._system_data["botId"], "_wpnonce": cls._nonce,
"clientId": "", "post_id": cls._post_id,
"contextId": cls._system_data["contextId"], "url": "https://chatgpt.ai",
"id": cls._system_data["id"], "action": "wpaicg_chat_shortcode_message",
"messages": messages[:-1], "message": format_prompt(messages),
"newMessage": messages[-1]["content"], "bot_id": cls._bot_id
"session": cls._system_data["sessionId"],
"stream": True
} }
async with session.post( async with session.post(
"https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit", "https://chatgpt.ai/wp-admin/admin-ajax.php",
proxy=proxy, proxy=proxy,
json=data data=data
) as response: ) as response:
response.raise_for_status() response.raise_for_status()
start = "data: " return (await response.json())["data"]
async for line in response.content:
line = line.decode('utf-8')
if line.startswith(start):
line = json.loads(line[len(start):-1])
if line["type"] == "live":
yield line["data"]

View file

@ -0,0 +1,62 @@
from __future__ import annotations
import time, json, re
from aiohttp import ClientSession
from typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class ChatgptDemo(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.net"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
"authority": "chat.chatgptdemo.net",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
"origin": "https://chat.chatgptdemo.net",
"referer": "https://chat.chatgptdemo.net/",
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers, timeout=timeout) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
if not result:
raise RuntimeError("No user id found")
user_id = result.group(1)
async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
if not chat_id:
raise RuntimeError("Could not create new chat")
data = {
"question": format_prompt(messages),
"chat_id": chat_id,
"timestamp": int(time.time()*1000),
}
async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:-1])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
yield chunk

View file

@ -14,9 +14,15 @@ class ChatgptDuo(AsyncProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> str: ) -> str:
async with AsyncSession(impersonate="chrome107") as session: async with AsyncSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
prompt = format_prompt(messages), prompt = format_prompt(messages),
data = { data = {
"prompt": prompt, "prompt": prompt,

66
g4f/Provider/ChatgptX.py Normal file
View file

@ -0,0 +1,66 @@
import re
from aiohttp import ClientSession
from .base_provider import AsyncProvider
from .helper import format_prompt
class ChatgptX(AsyncProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> str:
headers = {
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': 'Linux',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
}
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/") as response:
response = await response.text()
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
if result:
csrf_token = result.group(1)
result = re.search(r"openconversions\('(.*?)'\)", response)
if result:
chat_id = result.group(1)
result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
if result:
user_id = result.group(1)
if not csrf_token or not chat_id or not user_id:
raise RuntimeError("Missing csrf_token, chat_id or user_id")
data = {
'_token': csrf_token,
'user_id': user_id,
'chats_id': chat_id,
'prompt': format_prompt(messages),
'current_model': "gpt3"
}
headers = {
'authority': 'chatgptx.de',
'accept': 'application/json, text/javascript, */*; q=0.01',
'origin': cls.url,
'referer': f'{cls.url}/',
'x-csrf-token': csrf_token,
'x-requested-with': 'XMLHttpRequest'
}
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
response.raise_for_status()
data = await response.json()
if "message" in data:
return data["message"]
elif "messages" in data:
raise RuntimeError(f'Response: {data["messages"]}')

View file

@ -2,6 +2,8 @@ from __future__ import annotations
import json import json
import js2py import js2py
import random
import hashlib
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
@ -64,3 +66,12 @@ f = function () {
async for stream in response.content.iter_any(): async for stream in response.content.iter_any():
if stream: if stream:
yield stream.decode() yield stream.decode()
def get_api_key(user_agent: str):
e = str(round(1E11 * random.random()))
def hash(data: str):
return hashlib.md5(data.encode()).hexdigest()[::-1]
return f"tryit-{e}-" + hash(user_agent + hash(user_agent + hash(user_agent + e + "x")))

View file

@ -21,9 +21,10 @@ class FreeGpt(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session: async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
timestamp = int(time.time()) timestamp = int(time.time())
data = { data = {

View file

@ -18,6 +18,7 @@ class GptGo(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
headers = { headers = {
@ -31,7 +32,7 @@ class GptGo(AsyncGeneratorProvider):
"Sec-Fetch-Site" : "same-origin", "Sec-Fetch-Site" : "same-origin",
} }
async with ClientSession( async with ClientSession(
headers=headers headers=headers, timeout=timeout
) as session: ) as session:
async with session.get( async with session.get(
"https://gptgo.ai/action_get_token.php", "https://gptgo.ai/action_get_token.php",

View file

@ -30,8 +30,8 @@ models = {
} }
class Liaobots(AsyncGeneratorProvider): class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.com" url = "https://liaobots.site"
working = False working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
_auth_code = None _auth_code = None
@ -43,6 +43,7 @@ class Liaobots(AsyncGeneratorProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
auth: str = None, auth: str = None,
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
model = model if model in models else "gpt-3.5-turbo" model = model if model in models else "gpt-3.5-turbo"
@ -54,13 +55,25 @@ class Liaobots(AsyncGeneratorProvider):
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
} }
async with ClientSession( async with ClientSession(
headers=headers headers=headers, timeout=timeout
) as session: ) as session:
auth_code = auth if isinstance(auth, str) else cls._auth_code cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
if not auth_code: if not cls._auth_code:
async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response: async with session.post(
"https://liaobots.work/recaptcha/api/login",
proxy=proxy,
data={"token": "abcdefghijklmnopqrst"},
verify_ssl=False
) as response:
response.raise_for_status() response.raise_for_status()
auth_code = cls._auth_code = json.loads(await response.text())["authCode"] async with session.post(
"https://liaobots.work/api/user",
proxy=proxy,
json={"authcode": ""},
verify_ssl=False
) as response:
response.raise_for_status()
cls._auth_code = (await response.json(content_type=None))["authCode"]
data = { data = {
"conversationId": str(uuid.uuid4()), "conversationId": str(uuid.uuid4()),
"model": models[model], "model": models[model],
@ -68,7 +81,13 @@ class Liaobots(AsyncGeneratorProvider):
"key": "", "key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
} }
async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response: async with session.post(
"https://liaobots.work/api/chat",
proxy=proxy,
json=data,
headers={"x-auth-code": cls._auth_code},
verify_ssl=False
) as response:
response.raise_for_status() response.raise_for_status()
async for stream in response.content.iter_any(): async for stream in response.content.iter_any():
if stream: if stream:

View file

@ -28,6 +28,7 @@ class Myshell(AsyncGeneratorProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
timeout: int = 90,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
if not model: if not model:
@ -46,7 +47,7 @@ class Myshell(AsyncGeneratorProvider):
async with session.ws_connect( async with session.ws_connect(
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket", "wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
autoping=False, autoping=False,
timeout=90 timeout=timeout
) as wss: ) as wss:
# Send and receive hello message # Send and receive hello message
await wss.receive_str() await wss.receive_str()

View file

@ -18,6 +18,7 @@ class Vitalentum(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
headers = { headers = {
@ -40,7 +41,7 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs **kwargs
} }
async with ClientSession( async with ClientSession(
headers=headers headers=headers, timeout=timeout
) as session: ) as session:
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response: async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()

View file

@ -19,6 +19,7 @@ class Ylokh(AsyncGeneratorProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool = True, stream: bool = True,
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
model = model if model else "gpt-3.5-turbo" model = model if model else "gpt-3.5-turbo"
@ -39,7 +40,8 @@ class Ylokh(AsyncGeneratorProvider):
} }
async with StreamSession( async with StreamSession(
headers=headers, headers=headers,
proxies={"https": proxy} proxies={"https": proxy},
timeout=timeout
) as session: ) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response: async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status() response.raise_for_status()

View file

@ -2,8 +2,7 @@ from __future__ import annotations
import json import json
from curl_cffi.requests import AsyncSession from ..requests import StreamSession
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt from .base_provider import AsyncGeneratorProvider, format_prompt
@ -12,7 +11,6 @@ class You(AsyncGeneratorProvider):
url = "https://you.com" url = "https://you.com"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = False
@classmethod @classmethod
@ -21,20 +19,21 @@ class You(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs, **kwargs,
) -> AsyncGenerator: ) -> AsyncGenerator:
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session: async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
headers = { headers = {
"Accept": "text/event-stream", "Accept": "text/event-stream",
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat", "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
} }
response = await session.get( async with session.get(
"https://you.com/api/streamingSearch", "https://you.com/api/streamingSearch",
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""}, params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
headers=headers headers=headers
) ) as response:
response.raise_for_status() response.raise_for_status()
start = 'data: {"youChatToken": ' start = b'data: {"youChatToken": '
for line in response.text.splitlines(): async for line in response.iter_lines():
if line.startswith(start): if line.startswith(start):
yield json.loads(line[len(start): -1]) yield json.loads(line[len(start):-1])

View file

@ -16,10 +16,11 @@ class Yqcloud(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
timeout: int = 30,
**kwargs, **kwargs,
) -> AsyncGenerator: ) -> AsyncGenerator:
async with ClientSession( async with ClientSession(
headers=_create_header() headers=_create_header(), timeout=timeout
) as session: ) as session:
payload = _create_payload(messages) payload = _create_payload(messages)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response: async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:

View file

@ -1,34 +1,39 @@
from __future__ import annotations from __future__ import annotations
from .Acytoo import Acytoo from .Acytoo import Acytoo
from .Aibn import Aibn from .AiAsk import AiAsk
from .Aichat import Aichat from .Aibn import Aibn
from .Ails import Ails from .Aichat import Aichat
from .AItianhu import AItianhu from .Ails import Ails
from .AItianhuSpace import AItianhuSpace from .AItianhu import AItianhu
from .Aivvm import Aivvm from .AItianhuSpace import AItianhuSpace
from .Bing import Bing from .Aivvm import Aivvm
from .ChatBase import ChatBase from .Bing import Bing
from .ChatForAi import ChatForAi from .ChatBase import ChatBase
from .ChatgptAi import ChatgptAi from .ChatForAi import ChatForAi
from .ChatgptDuo import ChatgptDuo from .Chatgpt4Online import Chatgpt4Online
from .ChatgptLogin import ChatgptLogin from .ChatgptAi import ChatgptAi
from .DeepAi import DeepAi from .ChatgptDemo import ChatgptDemo
from .FreeGpt import FreeGpt from .ChatgptDuo import ChatgptDuo
from .GptGo import GptGo from .ChatgptLogin import ChatgptLogin
from .H2o import H2o from .ChatgptX import ChatgptX
from .Liaobots import Liaobots from .DeepAi import DeepAi
from .Myshell import Myshell from .FreeGpt import FreeGpt
from .Phind import Phind from .GptGo import GptGo
from .Vercel import Vercel from .H2o import H2o
from .Vitalentum import Vitalentum from .Liaobots import Liaobots
from .Ylokh import Ylokh from .Myshell import Myshell
from .You import You from .Phind import Phind
from .Yqcloud import Yqcloud from .Vercel import Vercel
from .Vitalentum import Vitalentum
from .Ylokh import Ylokh
from .You import You
from .Yqcloud import Yqcloud
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
from .retry_provider import RetryProvider from .retry_provider import RetryProvider
from .deprecated import * from .deprecated import *
from .needs_auth import * from .needs_auth import *
from .unfinished import *
__all__ = [ __all__ = [
'BaseProvider', 'BaseProvider',
@ -36,6 +41,7 @@ __all__ = [
'AsyncGeneratorProvider', 'AsyncGeneratorProvider',
'RetryProvider', 'RetryProvider',
'Acytoo', 'Acytoo',
'AiAsk',
'Aibn', 'Aibn',
'Aichat', 'Aichat',
'Ails', 'Ails',
@ -47,9 +53,12 @@ __all__ = [
'Bing', 'Bing',
'ChatBase', 'ChatBase',
'ChatForAi', 'ChatForAi',
'Chatgpt4Online',
'ChatgptAi', 'ChatgptAi',
'ChatgptDemo',
'ChatgptDuo', 'ChatgptDuo',
'ChatgptLogin', 'ChatgptLogin',
'ChatgptX',
'CodeLinkAva', 'CodeLinkAva',
'DeepAi', 'DeepAi',
'DfeHub', 'DfeHub',
@ -80,4 +89,4 @@ __all__ = [
'FastGpt', 'FastGpt',
'Wuguokai', 'Wuguokai',
'V50' 'V50'
] ]

View file

@ -6,7 +6,6 @@ from .Forefront import Forefront
from .GetGpt import GetGpt from .GetGpt import GetGpt
from .Opchatgpts import Opchatgpts from .Opchatgpts import Opchatgpts
from .Lockchat import Lockchat from .Lockchat import Lockchat
from .PerplexityAi import PerplexityAi
from .Wewordle import Wewordle from .Wewordle import Wewordle
from .Equing import Equing from .Equing import Equing
from .Wuguokai import Wuguokai from .Wuguokai import Wuguokai

View file

@ -22,6 +22,7 @@ class OpenaiChat(AsyncGeneratorProvider):
proxy: str = None, proxy: str = None,
access_token: str = None, access_token: str = None,
cookies: dict = None, cookies: dict = None,
timeout: int = 30,
**kwargs: dict **kwargs: dict
) -> AsyncGenerator: ) -> AsyncGenerator:
proxies = {"https": proxy} proxies = {"https": proxy}
@ -31,7 +32,7 @@ class OpenaiChat(AsyncGeneratorProvider):
"Accept": "text/event-stream", "Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}", "Authorization": f"Bearer {access_token}",
} }
async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107") as session: async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session:
messages = [ messages = [
{ {
"id": str(uuid.uuid4()), "id": str(uuid.uuid4()),

View file

@ -4,6 +4,7 @@ import random
from ..typing import CreateResult from ..typing import CreateResult
from .base_provider import BaseProvider, AsyncProvider from .base_provider import BaseProvider, AsyncProvider
from ..debug import logging
class RetryProvider(AsyncProvider): class RetryProvider(AsyncProvider):
@ -41,6 +42,8 @@ class RetryProvider(AsyncProvider):
started = False started = False
for provider in providers: for provider in providers:
try: try:
if logging:
print(f"Using {provider.__name__} provider")
for token in provider.create_completion(model, messages, stream, **kwargs): for token in provider.create_completion(model, messages, stream, **kwargs):
yield token yield token
started = True started = True
@ -48,6 +51,8 @@ class RetryProvider(AsyncProvider):
return return
except Exception as e: except Exception as e:
self.exceptions[provider.__name__] = e self.exceptions[provider.__name__] = e
if logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started: if started:
break break
@ -59,7 +64,7 @@ class RetryProvider(AsyncProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
**kwargs **kwargs
) -> str: ) -> str:
providers = [provider for provider in self.providers if issubclass(provider, AsyncProvider)] providers = [provider for provider in self.providers]
if self.shuffle: if self.shuffle:
random.shuffle(providers) random.shuffle(providers)
@ -69,6 +74,8 @@ class RetryProvider(AsyncProvider):
return await provider.create_async(model, messages, **kwargs) return await provider.create_async(model, messages, **kwargs)
except Exception as e: except Exception as e:
self.exceptions[provider.__name__] = e self.exceptions[provider.__name__] = e
if logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
self.raise_exceptions() self.raise_exceptions()

View file

@ -0,0 +1,44 @@
from __future__ import annotations
import json
from ...requests import StreamSession
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider, format_prompt
class Komo(AsyncGeneratorProvider):
url = "https://komo.ai/api/ask"
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with StreamSession(impersonate="chrome107") as session:
prompt = format_prompt(messages)
data = {
"query": prompt,
"FLAG_URLEXTRACT": "false",
"token": "",
"FLAG_MODELA": "1",
}
headers = {
'authority': 'komo.ai',
'accept': 'text/event-stream',
'cache-control': 'no-cache',
'referer': 'https://komo.ai/',
}
async with session.get(cls.url, params=data, headers=headers) as response:
response.raise_for_status()
next = False
async for line in response.iter_lines():
if line == b"event: line":
next = True
elif next and line.startswith(b"data: "):
yield json.loads(line[6:])
next = False

View file

@ -0,0 +1,97 @@
from __future__ import annotations
import random, json
from datetime import datetime
from ...requests import StreamSession
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider
class MikuChat(AsyncGeneratorProvider):
url = "https://ai.okmiku.com"
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
if not model:
model = "gpt-3.5-turbo"
headers = {
"authority": "api.catgpt.cc",
"accept": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/chat/",
'x-app-version': 'undefined',
'x-date': get_datetime(),
'x-fingerprint': get_fingerprint(),
'x-platform': 'web'
}
async with StreamSession(headers=headers, impersonate="chrome107") as session:
data = {
"model": model,
"top_p": 0.8,
"temperature": 0.5,
"presence_penalty": 1,
"frequency_penalty": 0,
"max_tokens": 2000,
"stream": True,
"messages": messages,
}
async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
print(await response.text())
response.raise_for_status()
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = json.loads(line[6:])
chunk = line["choices"][0]["delta"].get("content")
if chunk:
yield chunk
def k(e: str, t: int):
a = len(e) & 3
s = len(e) - a
i = t
c = 3432918353
o = 461845907
n = 0
r = 0
while n < s:
r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
n += 4
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
r = (r << 15) | (r >> 17)
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
i ^= r
i = (i << 13) | (i >> 19)
l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)
if a == 3:
r ^= (ord(e[n + 2]) & 255) << 16
elif a == 2:
r ^= (ord(e[n + 1]) & 255) << 8
elif a == 1:
r ^= ord(e[n]) & 255
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
r = (r << 15) | (r >> 17)
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
i ^= r
i ^= len(e)
i ^= i >> 16
i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
i ^= i >> 13
i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
i ^= i >> 16
return i & 0xFFFFFFFF
def get_fingerprint() -> str:
return str(k(str(int(random.random() * 100000)), 256))
def get_datetime() -> str:
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

View file

@ -10,7 +10,6 @@ from ..base_provider import AsyncProvider, format_prompt, get_cookies
class PerplexityAi(AsyncProvider): class PerplexityAi(AsyncProvider):
url = "https://www.perplexity.ai" url = "https://www.perplexity.ai"
working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
_sources = [] _sources = []

View file

@ -0,0 +1,3 @@
from .MikuChat import MikuChat
from .PerplexityAi import PerplexityAi
from .Komo import Komo

View file

@ -1,12 +1,15 @@
from __future__ import annotations from __future__ import annotations
from g4f import models from g4f import models
from .Provider import BaseProvider, AsyncProvider from .Provider import BaseProvider
from .typing import Any, CreateResult, Union from .typing import CreateResult, Union
from .debug import logging
from requests import get from requests import get
logging = False logging = False
version = '0.1.4.8' version = '0.1.4.8'
def check_pypi_version(): def check_pypi_version():
try: try:
response = get(f"https://pypi.org/pypi/g4f/json").json() response = get(f"https://pypi.org/pypi/g4f/json").json()

1
g4f/debug.py Normal file
View file

@ -0,0 +1 @@
logging = False

View file

@ -3,27 +3,28 @@ from dataclasses import dataclass
from .typing import Union from .typing import Union
from .Provider import BaseProvider, RetryProvider from .Provider import BaseProvider, RetryProvider
from .Provider import ( from .Provider import (
AItianhuSpace,
ChatgptLogin, ChatgptLogin,
PerplexityAi, ChatgptDemo,
ChatgptDuo, ChatgptDuo,
Vitalentum,
ChatgptAi, ChatgptAi,
ChatForAi, ChatForAi,
ChatBase, ChatBase,
AItianhu, Liaobots,
Wewordle,
Yqcloud, Yqcloud,
Myshell, Myshell,
FreeGpt, FreeGpt,
Vercel, Vercel,
DeepAi, DeepAi,
Aichat, Aichat,
AiAsk,
Aivvm, Aivvm,
GptGo, GptGo,
Ylokh, Ylokh,
Bard, Bard,
Aibn, Aibn,
Bing, Bing,
You,
H2o, H2o,
) )
@ -33,19 +34,25 @@ class Model:
base_provider: str base_provider: str
best_provider: Union[type[BaseProvider], RetryProvider] = None best_provider: Union[type[BaseProvider], RetryProvider] = None
# Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model( default = Model(
name = "", name = "",
base_provider = "", base_provider = "",
best_provider = RetryProvider([ best_provider = RetryProvider([
Bing, # Not fully GPT 3 or 4 Bing, # Not fully GPT 3 or 4
PerplexityAi, # Adds references to sources
Wewordle, # Responds with markdown
Yqcloud, # Answers short questions in chinese Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively ChatBase, # Don't want to answer creatively
ChatgptDuo, # Include search results ChatgptDuo, # Include search results
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh, Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
])
)
# GPT-3.5 too, but all providers supports long responses and a custom timeouts
gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
AiAsk, Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo,
FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud
]) ])
) )
@ -54,7 +61,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'openai', base_provider = 'openai',
best_provider = RetryProvider([ best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh, Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
]) ])
) )

114
tool/create_provider.py Normal file
View file

@ -0,0 +1,114 @@
import sys, re
from pathlib import Path
from os import path
sys.path.append(str(Path(__file__).parent.parent))
import g4f
def read_code(text):
match = re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text)
if match:
return match.group("code")
def read_result(result):
lines = []
for line in result.split("\n"):
if (line.startswith("```")):
break
if (line):
lines.append(line)
explanation = "\n".join(lines) if lines else ""
return explanation, read_code(result)
def input_command():
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
contents = []
while True:
try:
line = input()
except:
break
contents.append(line)
return "\n".join(contents)
name = input("Name: ")
provider_path = f"g4f/Provider/{name}.py"
example = """
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class ChatgptDuo(AsyncGeneratorProvider):
url = "https://chat-gpt.com"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
headers = {
"authority": "chat-gpt.com",
"accept": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/chat",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages),
data = {
"prompt": prompt,
"purpose": "ask",
}
async with session.post(cls.url + "/api/chat", json=data) as response:
response.raise_for_status()
async for stream in response.content:
if stream:
yield stream.decode()
"""
if not path.isfile(provider_path):
command = input_command()
prompt = f"""
Create a provider from a cURL command. The command is:
```bash
{command}
```
A example for a provider:
```py
{example}
```
The name for the provider class:
{name}
Replace "hello" with `format_prompt(messages)`.
And replace "gpt-3.5-turbo" with `model`.
"""
print("Create code...")
response = g4f.ChatCompletion.create(
model=g4f.models.gpt_35_long,
messages=[{"role": "user", "content": prompt}],
auth=True,
timeout=120,
)
print(response)
explanation, code = read_result(response)
if code:
with open(provider_path, "w") as file:
file.write(code)
with open(f"g4f/Provider/__init__.py", "a") as file:
file.write(f"\nfrom .{name} import {name}")
else:
with open(provider_path, "r") as file:
code = file.read()