mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Add AiAsk, Chatgpt4Online, ChatgptDemo
and ChatgptX Provider Fix Bing, Liaobots and ChatgptAi Provider Add "gpt_35_long" model and custom timeout
This commit is contained in:
parent
0bd5730bcd
commit
88d2cbff09
34 changed files with 717 additions and 177 deletions
|
|
@ -4,7 +4,7 @@ import json
|
|||
|
||||
from ..typing import AsyncGenerator
|
||||
from ..requests import StreamSession
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
|
||||
|
||||
class AItianhu(AsyncGeneratorProvider):
|
||||
|
|
@ -18,8 +18,12 @@ class AItianhu(AsyncGeneratorProvider):
|
|||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if not cookies:
|
||||
cookies = get_cookies("www.aitianhu.com")
|
||||
data = {
|
||||
"prompt": format_prompt(messages),
|
||||
"options": {},
|
||||
|
|
@ -34,12 +38,19 @@ class AItianhu(AsyncGeneratorProvider):
|
|||
"Origin": cls.url,
|
||||
"Referer": f"{cls.url}/"
|
||||
}
|
||||
async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
|
||||
async with StreamSession(
|
||||
headers=headers,
|
||||
cookies=cookies,
|
||||
timeout=timeout,
|
||||
proxies={"https": proxy},
|
||||
impersonate="chrome107",
|
||||
verify=False
|
||||
) as session:
|
||||
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.iter_lines():
|
||||
if line == b"<script>":
|
||||
raise RuntimeError("Solve Challenge")
|
||||
raise RuntimeError("Solve challenge and pass cookies")
|
||||
if b"platform's risk control" in line:
|
||||
raise RuntimeError("Platform's Risk Control")
|
||||
line = json.loads(line)
|
||||
|
|
|
|||
|
|
@ -4,11 +4,11 @@ import random, json
|
|||
|
||||
from ..typing import AsyncGenerator
|
||||
from ..requests import StreamSession
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
|
||||
domains = {
|
||||
"gpt-3.5-turbo": ".aitianhu.space",
|
||||
"gpt-4": ".aitianhu.website",
|
||||
"gpt-3.5-turbo": "aitianhu.space",
|
||||
"gpt-4": "aitianhu.website",
|
||||
}
|
||||
|
||||
class AItianhuSpace(AsyncGeneratorProvider):
|
||||
|
|
@ -21,20 +21,31 @@ class AItianhuSpace(AsyncGeneratorProvider):
|
|||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool = True,
|
||||
proxy: str = None,
|
||||
domain: str = None,
|
||||
cookies: dict = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
elif not model in domains:
|
||||
raise ValueError(f"Model are not supported: {model}")
|
||||
|
||||
if not domain:
|
||||
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
|
||||
rand = ''.join(random.choice(chars) for _ in range(6))
|
||||
domain = domains[model]
|
||||
url = f'https://{rand}{domain}'
|
||||
domain = f"{rand}.{domains[model]}"
|
||||
if not cookies:
|
||||
cookies = get_cookies(domain)
|
||||
|
||||
async with StreamSession(impersonate="chrome110", verify=False) as session:
|
||||
url = f'https://{domain}'
|
||||
async with StreamSession(
|
||||
proxies={"https": proxy},
|
||||
cookies=cookies,
|
||||
timeout=timeout,
|
||||
impersonate="chrome110",
|
||||
verify=False
|
||||
) as session:
|
||||
data = {
|
||||
"prompt": format_prompt(messages),
|
||||
"options": {},
|
||||
|
|
@ -53,7 +64,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
|
|||
response.raise_for_status()
|
||||
async for line in response.iter_lines():
|
||||
if line == b"<script>":
|
||||
raise RuntimeError("Solve Challenge")
|
||||
raise RuntimeError("Solve challenge and pass cookies and a fixed domain")
|
||||
if b"platform's risk control" in line:
|
||||
raise RuntimeError("Platform's Risk Control")
|
||||
line = json.loads(line)
|
||||
|
|
|
|||
43
g4f/Provider/AiAsk.py
Normal file
43
g4f/Provider/AiAsk.py
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
from aiohttp import ClientSession
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
class AiAsk(AsyncGeneratorProvider):
|
||||
url = "https://e.aiask.me"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/chat",
|
||||
}
|
||||
async with ClientSession(headers=headers, timeout=timeout) as session:
|
||||
data = {
|
||||
"continuous": True,
|
||||
"id": "fRMSQtuHl91A4De9cCvKD",
|
||||
"list": messages,
|
||||
"models": "0",
|
||||
"prompt": "",
|
||||
"temperature": kwargs.get("temperature", 0.5),
|
||||
"title": "",
|
||||
}
|
||||
buffer = ""
|
||||
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
|
||||
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content.iter_any():
|
||||
buffer += chunk.decode()
|
||||
if not rate_limit.startswith(buffer):
|
||||
yield buffer
|
||||
buffer = ""
|
||||
elif buffer == rate_limit:
|
||||
raise RuntimeError("Rate limit reached")
|
||||
|
|
@ -18,9 +18,10 @@ class Aibn(AsyncGeneratorProvider):
|
|||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
async with StreamSession(impersonate="chrome107") as session:
|
||||
async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
|
||||
timestamp = int(time.time())
|
||||
data = {
|
||||
"messages": messages,
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ class Aichat(AsyncProvider):
|
|||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> str:
|
||||
headers = {
|
||||
|
|
@ -33,7 +34,7 @@ class Aichat(AsyncProvider):
|
|||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
headers=headers, timeout=timeout
|
||||
) as session:
|
||||
json_data = {
|
||||
"message": format_prompt(messages),
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ class Aivvm(AsyncGeneratorProvider):
|
|||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if not model:
|
||||
|
|
@ -43,7 +44,12 @@ class Aivvm(AsyncGeneratorProvider):
|
|||
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
|
||||
"temperature" : kwargs.get("temperature", 0.7)
|
||||
}
|
||||
async with StreamSession(impersonate="chrome107") as session:
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Origin": cls.url,
|
||||
"Referer": f"{cls.url}/",
|
||||
}
|
||||
async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session:
|
||||
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.iter_content():
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import random
|
|||
import uuid
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
import urllib.parse
|
||||
from aiohttp import ClientSession, ClientTimeout
|
||||
from ..typing import AsyncGenerator
|
||||
|
|
@ -14,6 +15,15 @@ class Tones():
|
|||
balanced = "Balanced"
|
||||
precise = "Precise"
|
||||
|
||||
default_cookies = {
|
||||
'SRCHD' : 'AF=NOFORM',
|
||||
'PPLState' : '1',
|
||||
'KievRPSSecAuth': '',
|
||||
'SUID' : '',
|
||||
'SRCHUSR' : '',
|
||||
'SRCHHPGUSR' : '',
|
||||
}
|
||||
|
||||
class Bing(AsyncGeneratorProvider):
|
||||
url = "https://bing.com/chat"
|
||||
working = True
|
||||
|
|
@ -27,7 +37,6 @@ class Bing(AsyncGeneratorProvider):
|
|||
tone: str = Tones.creative,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
|
||||
if len(messages) < 2:
|
||||
prompt = messages[0]["content"]
|
||||
context = None
|
||||
|
|
@ -36,14 +45,7 @@ class Bing(AsyncGeneratorProvider):
|
|||
context = create_context(messages[:-1])
|
||||
|
||||
if not cookies or "SRCHD" not in cookies:
|
||||
cookies = {
|
||||
'SRCHD' : 'AF=NOFORM',
|
||||
'PPLState' : '1',
|
||||
'KievRPSSecAuth': '',
|
||||
'SUID' : '',
|
||||
'SRCHUSR' : '',
|
||||
'SRCHHPGUSR' : '',
|
||||
}
|
||||
cookies = default_cookies
|
||||
return stream_generate(prompt, tone, context, cookies)
|
||||
|
||||
def create_context(messages: list[dict[str, str]]):
|
||||
|
|
@ -58,51 +60,18 @@ class Conversation():
|
|||
self.conversationSignature = conversationSignature
|
||||
|
||||
async def create_conversation(session: ClientSession) -> Conversation:
|
||||
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1055.6'
|
||||
headers = {
|
||||
'authority': 'www.bing.com',
|
||||
'accept': 'application/json',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1',
|
||||
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
'sec-ch-ua-arch': '"arm"',
|
||||
'sec-ch-ua-bitness': '"64"',
|
||||
'sec-ch-ua-full-version': '"117.0.5938.132"',
|
||||
'sec-ch-ua-full-version-list': '"Google Chrome";v="117.0.5938.132", "Not;A=Brand";v="8.0.0.0", "Chromium";v="117.0.5938.132"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-model': '""',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-ch-ua-platform-version': '"14.0.0"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
||||
'x-ms-client-request-id': str(uuid.uuid4()),
|
||||
'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.12.0 OS/macOS',
|
||||
}
|
||||
url = 'https://www.bing.com/turing/conversation/create'
|
||||
async with await session.get(url) as response:
|
||||
data = await response.json()
|
||||
conversationId = data.get('conversationId')
|
||||
clientId = data.get('clientId')
|
||||
conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
|
||||
|
||||
async with await session.get(url, headers=headers) as response:
|
||||
conversationSignature = response.headers.get('X-Sydney-EncryptedConversationSignature', '')
|
||||
|
||||
response = await response.json()
|
||||
conversationId = response.get('conversationId')
|
||||
clientId = response.get('clientId')
|
||||
|
||||
if not conversationId or not clientId:
|
||||
if not conversationId or not clientId or not conversationSignature:
|
||||
raise Exception('Failed to create conversation.')
|
||||
|
||||
return Conversation(conversationId, clientId, conversationSignature)
|
||||
|
||||
async def retry_conversation(session: ClientSession) -> Conversation:
|
||||
for _ in range(5):
|
||||
try:
|
||||
return await create_conversation(session)
|
||||
except:
|
||||
session.cookie_jar.clear()
|
||||
return await create_conversation(session)
|
||||
|
||||
async def list_conversations(session: ClientSession) -> list:
|
||||
url = "https://www.bing.com/turing/conversation/chats"
|
||||
async with session.get(url) as response:
|
||||
|
|
@ -223,30 +192,34 @@ def format_message(msg: dict) -> str:
|
|||
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
|
||||
|
||||
def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str:
|
||||
request_id = str(uuid.uuid4())
|
||||
struct = {
|
||||
'arguments': [
|
||||
{
|
||||
'optionsSets': Defaults.optionsSets,
|
||||
'source': 'cib',
|
||||
'optionsSets': Defaults.optionsSets,
|
||||
'allowedMessageTypes': Defaults.allowedMessageTypes,
|
||||
'sliceIds': Defaults.sliceIds,
|
||||
'traceId': os.urandom(16).hex(),
|
||||
'isStartOfSession': True,
|
||||
'requestId': request_id,
|
||||
'message': Defaults.location | {
|
||||
'author': 'user',
|
||||
'inputMethod': 'Keyboard',
|
||||
'text': prompt,
|
||||
'messageType': 'Chat'
|
||||
'messageType': 'Chat',
|
||||
'requestId': request_id,
|
||||
'messageId': request_id,
|
||||
},
|
||||
'tone': tone,
|
||||
'conversationSignature': conversation.conversationSignature,
|
||||
'spokenTextMode': 'None',
|
||||
'conversationId': conversation.conversationId,
|
||||
'participant': {
|
||||
'id': conversation.clientId
|
||||
},
|
||||
'conversationId': conversation.conversationId
|
||||
}
|
||||
],
|
||||
'invocationId': '0',
|
||||
'invocationId': '1',
|
||||
'target': 'chat',
|
||||
'type': 4
|
||||
}
|
||||
|
|
@ -272,16 +245,16 @@ async def stream_generate(
|
|||
cookies=cookies,
|
||||
headers=Defaults.headers,
|
||||
) as session:
|
||||
conversation = await retry_conversation(session)
|
||||
conversation = await create_conversation(session)
|
||||
try:
|
||||
async with session.ws_connect(
|
||||
f'wss://sydney.bing.com/sydney/ChatHub?sec_access_token={urllib.parse.quote_plus(conversation.conversationSignature)}',
|
||||
autoping=False,
|
||||
params={'sec_access_token': conversation.conversationSignature}
|
||||
) as wss:
|
||||
|
||||
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
||||
msg = await wss.receive(timeout=900)
|
||||
|
||||
await wss.receive(timeout=900)
|
||||
await wss.send_str(create_message(conversation, prompt, tone, context))
|
||||
|
||||
response_txt = ''
|
||||
|
|
|
|||
|
|
@ -17,9 +17,10 @@ class ChatForAi(AsyncGeneratorProvider):
|
|||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
async with StreamSession(impersonate="chrome107") as session:
|
||||
async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
|
||||
conversation_id = f"id_{int(time.time())}"
|
||||
prompt = messages[-1]["content"]
|
||||
timestamp = int(time.time())
|
||||
|
|
|
|||
39
g4f/Provider/Chatgpt4Online.py
Normal file
39
g4f/Provider/Chatgpt4Online.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class Chatgpt4Online(AsyncGeneratorProvider):
|
||||
url = "https://chatgpt4online.org"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
async with ClientSession() as session:
|
||||
data = {
|
||||
"botId": "default",
|
||||
"customId": None,
|
||||
"session": "N/A",
|
||||
"chatId": "",
|
||||
"contextId": 58,
|
||||
"messages": messages,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"stream": True
|
||||
}
|
||||
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:])
|
||||
if line["type"] == "live":
|
||||
yield line["data"]
|
||||
|
|
@ -1,28 +1,28 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import html
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .base_provider import AsyncProvider, format_prompt
|
||||
|
||||
|
||||
class ChatgptAi(AsyncGeneratorProvider):
|
||||
class ChatgptAi(AsyncProvider):
|
||||
url: str = "https://chatgpt.ai/"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
_system_data = None
|
||||
_nonce = None
|
||||
_post_id = None
|
||||
_bot_id = None
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
) -> str:
|
||||
headers = {
|
||||
"authority" : "chatgpt.ai",
|
||||
"accept" : "*/*",
|
||||
|
|
@ -40,36 +40,36 @@ class ChatgptAi(AsyncGeneratorProvider):
|
|||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
headers=headers, timeout=timeout
|
||||
) as session:
|
||||
if not cls._system_data:
|
||||
if not cls._nonce:
|
||||
async with session.get(cls.url, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
match = re.findall(r"data-system='([^']+)'", await response.text())
|
||||
if not match:
|
||||
raise RuntimeError("No system data")
|
||||
cls._system_data = json.loads(html.unescape(match[0]))
|
||||
text = await response.text()
|
||||
result = re.search(r'data-nonce="(.*?)"', text)
|
||||
if result:
|
||||
cls._nonce = result.group(1)
|
||||
result = re.search(r'data-post-id="(.*?)"', text)
|
||||
if result:
|
||||
cls._post_id = result.group(1)
|
||||
result = re.search(r'data-bot-id="(.*?)"', text)
|
||||
if result:
|
||||
cls._bot_id = result.group(1)
|
||||
if not cls._nonce or not cls._post_id or not cls._bot_id:
|
||||
raise RuntimeError("Nonce, post-id or bot-id not found")
|
||||
|
||||
data = {
|
||||
"botId": cls._system_data["botId"],
|
||||
"clientId": "",
|
||||
"contextId": cls._system_data["contextId"],
|
||||
"id": cls._system_data["id"],
|
||||
"messages": messages[:-1],
|
||||
"newMessage": messages[-1]["content"],
|
||||
"session": cls._system_data["sessionId"],
|
||||
"stream": True
|
||||
"_wpnonce": cls._nonce,
|
||||
"post_id": cls._post_id,
|
||||
"url": "https://chatgpt.ai",
|
||||
"action": "wpaicg_chat_shortcode_message",
|
||||
"message": format_prompt(messages),
|
||||
"bot_id": cls._bot_id
|
||||
}
|
||||
async with session.post(
|
||||
"https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit",
|
||||
"https://chatgpt.ai/wp-admin/admin-ajax.php",
|
||||
proxy=proxy,
|
||||
json=data
|
||||
data=data
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
start = "data: "
|
||||
async for line in response.content:
|
||||
line = line.decode('utf-8')
|
||||
if line.startswith(start):
|
||||
line = json.loads(line[len(start):-1])
|
||||
if line["type"] == "live":
|
||||
yield line["data"]
|
||||
return (await response.json())["data"]
|
||||
62
g4f/Provider/ChatgptDemo.py
Normal file
62
g4f/Provider/ChatgptDemo.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import time, json, re
|
||||
from aiohttp import ClientSession
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
class ChatgptDemo(AsyncGeneratorProvider):
|
||||
url = "https://chat.chatgptdemo.net"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"authority": "chat.chatgptdemo.net",
|
||||
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
|
||||
"origin": "https://chat.chatgptdemo.net",
|
||||
"referer": "https://chat.chatgptdemo.net/",
|
||||
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
||||
}
|
||||
async with ClientSession(headers=headers, timeout=timeout) as session:
|
||||
async with session.get(f"{cls.url}/", proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
response = await response.text()
|
||||
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
|
||||
if not result:
|
||||
raise RuntimeError("No user id found")
|
||||
user_id = result.group(1)
|
||||
async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
chat_id = (await response.json())["id_"]
|
||||
if not chat_id:
|
||||
raise RuntimeError("Could not create new chat")
|
||||
data = {
|
||||
"question": format_prompt(messages),
|
||||
"chat_id": chat_id,
|
||||
"timestamp": int(time.time()*1000),
|
||||
}
|
||||
async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:-1])
|
||||
chunk = line["choices"][0]["delta"].get("content")
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
|
@ -14,9 +14,15 @@ class ChatgptDuo(AsyncProvider):
|
|||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> str:
|
||||
async with AsyncSession(impersonate="chrome107") as session:
|
||||
async with AsyncSession(
|
||||
impersonate="chrome107",
|
||||
proxies={"https": proxy},
|
||||
timeout=timeout
|
||||
) as session:
|
||||
prompt = format_prompt(messages),
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
|
|
|
|||
66
g4f/Provider/ChatgptX.py
Normal file
66
g4f/Provider/ChatgptX.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
import re
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from .base_provider import AsyncProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
class ChatgptX(AsyncProvider):
|
||||
url = "https://chatgptx.de"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> str:
|
||||
headers = {
|
||||
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
|
||||
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': 'Linux',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(f"{cls.url}/") as response:
|
||||
response = await response.text()
|
||||
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
|
||||
if result:
|
||||
csrf_token = result.group(1)
|
||||
result = re.search(r"openconversions\('(.*?)'\)", response)
|
||||
if result:
|
||||
chat_id = result.group(1)
|
||||
result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
|
||||
if result:
|
||||
user_id = result.group(1)
|
||||
|
||||
if not csrf_token or not chat_id or not user_id:
|
||||
raise RuntimeError("Missing csrf_token, chat_id or user_id")
|
||||
|
||||
data = {
|
||||
'_token': csrf_token,
|
||||
'user_id': user_id,
|
||||
'chats_id': chat_id,
|
||||
'prompt': format_prompt(messages),
|
||||
'current_model': "gpt3"
|
||||
}
|
||||
headers = {
|
||||
'authority': 'chatgptx.de',
|
||||
'accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
'origin': cls.url,
|
||||
'referer': f'{cls.url}/',
|
||||
'x-csrf-token': csrf_token,
|
||||
'x-requested-with': 'XMLHttpRequest'
|
||||
}
|
||||
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
data = await response.json()
|
||||
if "message" in data:
|
||||
return data["message"]
|
||||
elif "messages" in data:
|
||||
raise RuntimeError(f'Response: {data["messages"]}')
|
||||
|
|
@ -2,6 +2,8 @@ from __future__ import annotations
|
|||
|
||||
import json
|
||||
import js2py
|
||||
import random
|
||||
import hashlib
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
|
|
@ -64,3 +66,12 @@ f = function () {
|
|||
async for stream in response.content.iter_any():
|
||||
if stream:
|
||||
yield stream.decode()
|
||||
|
||||
|
||||
def get_api_key(user_agent: str):
|
||||
e = str(round(1E11 * random.random()))
|
||||
|
||||
def hash(data: str):
|
||||
return hashlib.md5(data.encode()).hexdigest()[::-1]
|
||||
|
||||
return f"tryit-{e}-" + hash(user_agent + hash(user_agent + hash(user_agent + e + "x")))
|
||||
|
|
@ -21,9 +21,10 @@ class FreeGpt(AsyncGeneratorProvider):
|
|||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
async with StreamSession(impersonate="chrome107") as session:
|
||||
async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
|
||||
prompt = messages[-1]["content"]
|
||||
timestamp = int(time.time())
|
||||
data = {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ class GptGo(AsyncGeneratorProvider):
|
|||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
|
|
@ -31,7 +32,7 @@ class GptGo(AsyncGeneratorProvider):
|
|||
"Sec-Fetch-Site" : "same-origin",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
headers=headers, timeout=timeout
|
||||
) as session:
|
||||
async with session.get(
|
||||
"https://gptgo.ai/action_get_token.php",
|
||||
|
|
|
|||
|
|
@ -30,8 +30,8 @@ models = {
|
|||
}
|
||||
|
||||
class Liaobots(AsyncGeneratorProvider):
|
||||
url = "https://liaobots.com"
|
||||
working = False
|
||||
url = "https://liaobots.site"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
_auth_code = None
|
||||
|
|
@ -43,6 +43,7 @@ class Liaobots(AsyncGeneratorProvider):
|
|||
messages: list[dict[str, str]],
|
||||
auth: str = None,
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
model = model if model in models else "gpt-3.5-turbo"
|
||||
|
|
@ -54,13 +55,25 @@ class Liaobots(AsyncGeneratorProvider):
|
|||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
headers=headers, timeout=timeout
|
||||
) as session:
|
||||
auth_code = auth if isinstance(auth, str) else cls._auth_code
|
||||
if not auth_code:
|
||||
async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response:
|
||||
cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
|
||||
if not cls._auth_code:
|
||||
async with session.post(
|
||||
"https://liaobots.work/recaptcha/api/login",
|
||||
proxy=proxy,
|
||||
data={"token": "abcdefghijklmnopqrst"},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
auth_code = cls._auth_code = json.loads(await response.text())["authCode"]
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/user",
|
||||
proxy=proxy,
|
||||
json={"authcode": ""},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
||||
data = {
|
||||
"conversationId": str(uuid.uuid4()),
|
||||
"model": models[model],
|
||||
|
|
@ -68,7 +81,13 @@ class Liaobots(AsyncGeneratorProvider):
|
|||
"key": "",
|
||||
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
||||
}
|
||||
async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/chat",
|
||||
proxy=proxy,
|
||||
json=data,
|
||||
headers={"x-auth-code": cls._auth_code},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
async for stream in response.content.iter_any():
|
||||
if stream:
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ class Myshell(AsyncGeneratorProvider):
|
|||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
timeout: int = 90,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if not model:
|
||||
|
|
@ -46,7 +47,7 @@ class Myshell(AsyncGeneratorProvider):
|
|||
async with session.ws_connect(
|
||||
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
|
||||
autoping=False,
|
||||
timeout=90
|
||||
timeout=timeout
|
||||
) as wss:
|
||||
# Send and receive hello message
|
||||
await wss.receive_str()
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ class Vitalentum(AsyncGeneratorProvider):
|
|||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
|
|
@ -40,7 +41,7 @@ class Vitalentum(AsyncGeneratorProvider):
|
|||
**kwargs
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
headers=headers, timeout=timeout
|
||||
) as session:
|
||||
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ class Ylokh(AsyncGeneratorProvider):
|
|||
messages: list[dict[str, str]],
|
||||
stream: bool = True,
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
model = model if model else "gpt-3.5-turbo"
|
||||
|
|
@ -39,7 +40,8 @@ class Ylokh(AsyncGeneratorProvider):
|
|||
}
|
||||
async with StreamSession(
|
||||
headers=headers,
|
||||
proxies={"https": proxy}
|
||||
proxies={"https": proxy},
|
||||
timeout=timeout
|
||||
) as session:
|
||||
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
|
||||
response.raise_for_status()
|
||||
|
|
|
|||
|
|
@ -2,8 +2,7 @@ from __future__ import annotations
|
|||
|
||||
import json
|
||||
|
||||
from curl_cffi.requests import AsyncSession
|
||||
|
||||
from ..requests import StreamSession
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
|
|
@ -12,7 +11,6 @@ class You(AsyncGeneratorProvider):
|
|||
url = "https://you.com"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_stream = False
|
||||
|
||||
|
||||
@classmethod
|
||||
|
|
@ -21,20 +19,21 @@ class You(AsyncGeneratorProvider):
|
|||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs,
|
||||
) -> AsyncGenerator:
|
||||
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
|
||||
async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
|
||||
headers = {
|
||||
"Accept": "text/event-stream",
|
||||
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
|
||||
}
|
||||
response = await session.get(
|
||||
async with session.get(
|
||||
"https://you.com/api/streamingSearch",
|
||||
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
|
||||
headers=headers
|
||||
)
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
start = 'data: {"youChatToken": '
|
||||
for line in response.text.splitlines():
|
||||
start = b'data: {"youChatToken": '
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(start):
|
||||
yield json.loads(line[len(start): -1])
|
||||
yield json.loads(line[len(start):-1])
|
||||
|
|
@ -16,10 +16,11 @@ class Yqcloud(AsyncGeneratorProvider):
|
|||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs,
|
||||
) -> AsyncGenerator:
|
||||
async with ClientSession(
|
||||
headers=_create_header()
|
||||
headers=_create_header(), timeout=timeout
|
||||
) as session:
|
||||
payload = _create_payload(messages)
|
||||
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from __future__ import annotations
|
||||
from .Acytoo import Acytoo
|
||||
from .AiAsk import AiAsk
|
||||
from .Aibn import Aibn
|
||||
from .Aichat import Aichat
|
||||
from .Ails import Ails
|
||||
|
|
@ -9,9 +10,12 @@ from .Aivvm import Aivvm
|
|||
from .Bing import Bing
|
||||
from .ChatBase import ChatBase
|
||||
from .ChatForAi import ChatForAi
|
||||
from .Chatgpt4Online import Chatgpt4Online
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptDemo import ChatgptDemo
|
||||
from .ChatgptDuo import ChatgptDuo
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .ChatgptX import ChatgptX
|
||||
from .DeepAi import DeepAi
|
||||
from .FreeGpt import FreeGpt
|
||||
from .GptGo import GptGo
|
||||
|
|
@ -29,6 +33,7 @@ from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
|
|||
from .retry_provider import RetryProvider
|
||||
from .deprecated import *
|
||||
from .needs_auth import *
|
||||
from .unfinished import *
|
||||
|
||||
__all__ = [
|
||||
'BaseProvider',
|
||||
|
|
@ -36,6 +41,7 @@ __all__ = [
|
|||
'AsyncGeneratorProvider',
|
||||
'RetryProvider',
|
||||
'Acytoo',
|
||||
'AiAsk',
|
||||
'Aibn',
|
||||
'Aichat',
|
||||
'Ails',
|
||||
|
|
@ -47,9 +53,12 @@ __all__ = [
|
|||
'Bing',
|
||||
'ChatBase',
|
||||
'ChatForAi',
|
||||
'Chatgpt4Online',
|
||||
'ChatgptAi',
|
||||
'ChatgptDemo',
|
||||
'ChatgptDuo',
|
||||
'ChatgptLogin',
|
||||
'ChatgptX',
|
||||
'CodeLinkAva',
|
||||
'DeepAi',
|
||||
'DfeHub',
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from .Forefront import Forefront
|
|||
from .GetGpt import GetGpt
|
||||
from .Opchatgpts import Opchatgpts
|
||||
from .Lockchat import Lockchat
|
||||
from .PerplexityAi import PerplexityAi
|
||||
from .Wewordle import Wewordle
|
||||
from .Equing import Equing
|
||||
from .Wuguokai import Wuguokai
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ class OpenaiChat(AsyncGeneratorProvider):
|
|||
proxy: str = None,
|
||||
access_token: str = None,
|
||||
cookies: dict = None,
|
||||
timeout: int = 30,
|
||||
**kwargs: dict
|
||||
) -> AsyncGenerator:
|
||||
proxies = {"https": proxy}
|
||||
|
|
@ -31,7 +32,7 @@ class OpenaiChat(AsyncGeneratorProvider):
|
|||
"Accept": "text/event-stream",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
}
|
||||
async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107") as session:
|
||||
async with StreamSession(proxies=proxies, headers=headers, impersonate="chrome107", timeout=timeout) as session:
|
||||
messages = [
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import random
|
|||
|
||||
from ..typing import CreateResult
|
||||
from .base_provider import BaseProvider, AsyncProvider
|
||||
from ..debug import logging
|
||||
|
||||
|
||||
class RetryProvider(AsyncProvider):
|
||||
|
|
@ -41,6 +42,8 @@ class RetryProvider(AsyncProvider):
|
|||
started = False
|
||||
for provider in providers:
|
||||
try:
|
||||
if logging:
|
||||
print(f"Using {provider.__name__} provider")
|
||||
for token in provider.create_completion(model, messages, stream, **kwargs):
|
||||
yield token
|
||||
started = True
|
||||
|
|
@ -48,6 +51,8 @@ class RetryProvider(AsyncProvider):
|
|||
return
|
||||
except Exception as e:
|
||||
self.exceptions[provider.__name__] = e
|
||||
if logging:
|
||||
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
|
||||
if started:
|
||||
break
|
||||
|
||||
|
|
@ -59,7 +64,7 @@ class RetryProvider(AsyncProvider):
|
|||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> str:
|
||||
providers = [provider for provider in self.providers if issubclass(provider, AsyncProvider)]
|
||||
providers = [provider for provider in self.providers]
|
||||
if self.shuffle:
|
||||
random.shuffle(providers)
|
||||
|
||||
|
|
@ -69,6 +74,8 @@ class RetryProvider(AsyncProvider):
|
|||
return await provider.create_async(model, messages, **kwargs)
|
||||
except Exception as e:
|
||||
self.exceptions[provider.__name__] = e
|
||||
if logging:
|
||||
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
|
||||
|
||||
self.raise_exceptions()
|
||||
|
||||
|
|
|
|||
44
g4f/Provider/unfinished/Komo.py
Normal file
44
g4f/Provider/unfinished/Komo.py
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from ...requests import StreamSession
|
||||
from ...typing import AsyncGenerator
|
||||
from ..base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
class Komo(AsyncGeneratorProvider):
|
||||
url = "https://komo.ai/api/ask"
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
async with StreamSession(impersonate="chrome107") as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"query": prompt,
|
||||
"FLAG_URLEXTRACT": "false",
|
||||
"token": "",
|
||||
"FLAG_MODELA": "1",
|
||||
}
|
||||
headers = {
|
||||
'authority': 'komo.ai',
|
||||
'accept': 'text/event-stream',
|
||||
'cache-control': 'no-cache',
|
||||
'referer': 'https://komo.ai/',
|
||||
}
|
||||
|
||||
async with session.get(cls.url, params=data, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
next = False
|
||||
async for line in response.iter_lines():
|
||||
if line == b"event: line":
|
||||
next = True
|
||||
elif next and line.startswith(b"data: "):
|
||||
yield json.loads(line[6:])
|
||||
next = False
|
||||
|
||||
97
g4f/Provider/unfinished/MikuChat.py
Normal file
97
g4f/Provider/unfinished/MikuChat.py
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import random, json
|
||||
from datetime import datetime
|
||||
from ...requests import StreamSession
|
||||
|
||||
from ...typing import AsyncGenerator
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class MikuChat(AsyncGeneratorProvider):
|
||||
url = "https://ai.okmiku.com"
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
headers = {
|
||||
"authority": "api.catgpt.cc",
|
||||
"accept": "application/json",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/chat/",
|
||||
'x-app-version': 'undefined',
|
||||
'x-date': get_datetime(),
|
||||
'x-fingerprint': get_fingerprint(),
|
||||
'x-platform': 'web'
|
||||
}
|
||||
async with StreamSession(headers=headers, impersonate="chrome107") as session:
|
||||
data = {
|
||||
"model": model,
|
||||
"top_p": 0.8,
|
||||
"temperature": 0.5,
|
||||
"presence_penalty": 1,
|
||||
"frequency_penalty": 0,
|
||||
"max_tokens": 2000,
|
||||
"stream": True,
|
||||
"messages": messages,
|
||||
}
|
||||
async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
|
||||
print(await response.text())
|
||||
response.raise_for_status()
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:])
|
||||
chunk = line["choices"][0]["delta"].get("content")
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
||||
def k(e: str, t: int):
|
||||
a = len(e) & 3
|
||||
s = len(e) - a
|
||||
i = t
|
||||
c = 3432918353
|
||||
o = 461845907
|
||||
n = 0
|
||||
r = 0
|
||||
while n < s:
|
||||
r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
|
||||
n += 4
|
||||
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
|
||||
r = (r << 15) | (r >> 17)
|
||||
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
|
||||
i ^= r
|
||||
i = (i << 13) | (i >> 19)
|
||||
l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
|
||||
i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)
|
||||
|
||||
if a == 3:
|
||||
r ^= (ord(e[n + 2]) & 255) << 16
|
||||
elif a == 2:
|
||||
r ^= (ord(e[n + 1]) & 255) << 8
|
||||
elif a == 1:
|
||||
r ^= ord(e[n]) & 255
|
||||
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
|
||||
r = (r << 15) | (r >> 17)
|
||||
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
|
||||
i ^= r
|
||||
|
||||
i ^= len(e)
|
||||
i ^= i >> 16
|
||||
i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
|
||||
i ^= i >> 13
|
||||
i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
|
||||
i ^= i >> 16
|
||||
return i & 0xFFFFFFFF
|
||||
|
||||
def get_fingerprint() -> str:
|
||||
return str(k(str(int(random.random() * 100000)), 256))
|
||||
|
||||
def get_datetime() -> str:
|
||||
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
|
@ -10,7 +10,6 @@ from ..base_provider import AsyncProvider, format_prompt, get_cookies
|
|||
|
||||
class PerplexityAi(AsyncProvider):
|
||||
url = "https://www.perplexity.ai"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
_sources = []
|
||||
|
||||
3
g4f/Provider/unfinished/__init__.py
Normal file
3
g4f/Provider/unfinished/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
from .MikuChat import MikuChat
|
||||
from .PerplexityAi import PerplexityAi
|
||||
from .Komo import Komo
|
||||
|
|
@ -1,12 +1,15 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from g4f import models
|
||||
from .Provider import BaseProvider, AsyncProvider
|
||||
from .typing import Any, CreateResult, Union
|
||||
from .Provider import BaseProvider
|
||||
from .typing import CreateResult, Union
|
||||
from .debug import logging
|
||||
from requests import get
|
||||
|
||||
logging = False
|
||||
version = '0.1.4.8'
|
||||
|
||||
|
||||
def check_pypi_version():
|
||||
try:
|
||||
response = get(f"https://pypi.org/pypi/g4f/json").json()
|
||||
|
|
|
|||
1
g4f/debug.py
Normal file
1
g4f/debug.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
logging = False
|
||||
|
|
@ -3,27 +3,28 @@ from dataclasses import dataclass
|
|||
from .typing import Union
|
||||
from .Provider import BaseProvider, RetryProvider
|
||||
from .Provider import (
|
||||
AItianhuSpace,
|
||||
ChatgptLogin,
|
||||
PerplexityAi,
|
||||
ChatgptDemo,
|
||||
ChatgptDuo,
|
||||
Vitalentum,
|
||||
ChatgptAi,
|
||||
ChatForAi,
|
||||
ChatBase,
|
||||
AItianhu,
|
||||
Wewordle,
|
||||
Liaobots,
|
||||
Yqcloud,
|
||||
Myshell,
|
||||
FreeGpt,
|
||||
Vercel,
|
||||
DeepAi,
|
||||
Aichat,
|
||||
AiAsk,
|
||||
Aivvm,
|
||||
GptGo,
|
||||
Ylokh,
|
||||
Bard,
|
||||
Aibn,
|
||||
Bing,
|
||||
You,
|
||||
H2o,
|
||||
)
|
||||
|
||||
|
|
@ -33,19 +34,25 @@ class Model:
|
|||
base_provider: str
|
||||
best_provider: Union[type[BaseProvider], RetryProvider] = None
|
||||
|
||||
# Config for HuggingChat, OpenAssistant
|
||||
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
|
||||
default = Model(
|
||||
name = "",
|
||||
base_provider = "",
|
||||
best_provider = RetryProvider([
|
||||
Bing, # Not fully GPT 3 or 4
|
||||
PerplexityAi, # Adds references to sources
|
||||
Wewordle, # Responds with markdown
|
||||
Yqcloud, # Answers short questions in chinese
|
||||
ChatBase, # Don't want to answer creatively
|
||||
ChatgptDuo, # Include search results
|
||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
|
||||
Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
|
||||
])
|
||||
)
|
||||
|
||||
# GPT-3.5 too, but all providers supports long responses and a custom timeouts
|
||||
gpt_35_long = Model(
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
AiAsk, Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo,
|
||||
FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud
|
||||
])
|
||||
)
|
||||
|
||||
|
|
@ -54,7 +61,7 @@ gpt_35_turbo = Model(
|
|||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh,
|
||||
Aibn, Aichat, Aivvm, ChatForAi, ChatgptAi, ChatgptLogin, DeepAi, FreeGpt, GptGo, Myshell, Ylokh,
|
||||
])
|
||||
)
|
||||
|
||||
|
|
|
|||
114
tool/create_provider.py
Normal file
114
tool/create_provider.py
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
|
||||
import sys, re
|
||||
from pathlib import Path
|
||||
from os import path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import g4f
|
||||
|
||||
def read_code(text):
|
||||
match = re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text)
|
||||
if match:
|
||||
return match.group("code")
|
||||
|
||||
def read_result(result):
|
||||
lines = []
|
||||
for line in result.split("\n"):
|
||||
if (line.startswith("```")):
|
||||
break
|
||||
if (line):
|
||||
lines.append(line)
|
||||
explanation = "\n".join(lines) if lines else ""
|
||||
return explanation, read_code(result)
|
||||
|
||||
def input_command():
|
||||
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
|
||||
contents = []
|
||||
while True:
|
||||
try:
|
||||
line = input()
|
||||
except:
|
||||
break
|
||||
contents.append(line)
|
||||
return "\n".join(contents)
|
||||
|
||||
name = input("Name: ")
|
||||
provider_path = f"g4f/Provider/{name}.py"
|
||||
|
||||
example = """
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class ChatgptDuo(AsyncGeneratorProvider):
|
||||
url = "https://chat-gpt.com"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"authority": "chat-gpt.com",
|
||||
"accept": "application/json",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/chat",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages),
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"purpose": "ask",
|
||||
}
|
||||
async with session.post(cls.url + "/api/chat", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for stream in response.content:
|
||||
if stream:
|
||||
yield stream.decode()
|
||||
"""
|
||||
|
||||
if not path.isfile(provider_path):
|
||||
command = input_command()
|
||||
|
||||
prompt = f"""
|
||||
Create a provider from a cURL command. The command is:
|
||||
```bash
|
||||
{command}
|
||||
```
|
||||
A example for a provider:
|
||||
```py
|
||||
{example}
|
||||
```
|
||||
The name for the provider class:
|
||||
{name}
|
||||
Replace "hello" with `format_prompt(messages)`.
|
||||
And replace "gpt-3.5-turbo" with `model`.
|
||||
"""
|
||||
|
||||
print("Create code...")
|
||||
response = g4f.ChatCompletion.create(
|
||||
model=g4f.models.gpt_35_long,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
auth=True,
|
||||
timeout=120,
|
||||
)
|
||||
print(response)
|
||||
explanation, code = read_result(response)
|
||||
if code:
|
||||
with open(provider_path, "w") as file:
|
||||
file.write(code)
|
||||
with open(f"g4f/Provider/__init__.py", "a") as file:
|
||||
file.write(f"\nfrom .{name} import {name}")
|
||||
else:
|
||||
with open(provider_path, "r") as file:
|
||||
code = file.read()
|
||||
Loading…
Add table
Add a link
Reference in a new issue