Update HuggingChat to new api

Impersonate Aivvm Provider
Add ChatForAi and FreeGpt Provider
Update AItianhuSpace Provider
Improve StreamRequest Support
Update get_event_loop Helper
This commit is contained in:
Heiner Lohaus 2023-10-01 06:38:11 +02:00
parent 423485fda6
commit 98895e5b09
9 changed files with 206 additions and 100 deletions

View file

@ -2,7 +2,7 @@ from __future__ import annotations
import random, json
from g4f.requests import AsyncSession, StreamRequest
from g4f.requests import AsyncSession
from .base_provider import AsyncGeneratorProvider, format_prompt
domains = {
@ -31,12 +31,9 @@ class AItianhuSpace(AsyncGeneratorProvider):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
domain = domains[model]
url = f'https://{rand}{domain}/api/chat-process'
url = f'https://{rand}{domain}'
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
async with AsyncSession(impersonate="chrome110", verify=False) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
@ -45,9 +42,17 @@ class AItianhuSpace(AsyncGeneratorProvider):
"top_p": 1,
**kwargs
}
async with StreamRequest(session, "POST", url, json=data) as response:
headers = {
"Authority": url,
"Accept": "application/json, text/plain, */*",
"Origin": url,
"Referer": f"{url}/"
}
async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
response.raise_for_status()
async for line in response.content:
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line.rstrip())
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
@ -56,7 +61,7 @@ class AItianhuSpace(AsyncGeneratorProvider):
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")
else:
raise RuntimeError("Response: {line}")
raise RuntimeError(f"Response: {line}")
@classmethod

View file

@ -1,8 +1,8 @@
from __future__ import annotations
import requests
from .base_provider import BaseProvider
from ..typing import CreateResult
from ..requests import AsyncSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
@ -16,7 +16,7 @@ models = {
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}
class Aivvm(BaseProvider):
class Aivvm(AsyncGeneratorProvider):
url = 'https://chat.aivvm.com'
supports_stream = True
working = True
@ -24,31 +24,18 @@ class Aivvm(BaseProvider):
supports_gpt_4 = True
@classmethod
def create_completion(cls,
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs
) -> CreateResult:
) -> AsyncGenerator:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
raise ValueError(f"Model is not supported: {model}")
headers = {
"accept" : "*/*",
"accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7",
"content-type" : "application/json",
"sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"",
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": "\"Bandóz\"",
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"Referer" : "https://chat.aivvm.com/",
"Referrer-Policy" : "same-origin",
}
json_data = {
"model" : models[model],
"messages" : messages,
@ -56,13 +43,11 @@ class Aivvm(BaseProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
response = requests.post(
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=None):
yield chunk.decode('utf-8')
async with AsyncSession(impersonate="chrome107") as session:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode('utf-8')
@classmethod
@property

62
g4f/Provider/ChatForAi.py Normal file
View file

@ -0,0 +1,62 @@
from __future__ import annotations
import time, hashlib
from ..typing import AsyncGenerator
from g4f.requests import AsyncSession
from .base_provider import AsyncGeneratorProvider
class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.com"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with AsyncSession(impersonate="chrome107") as session:
conversation_id = f"id_{int(time.time())}"
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"conversationId": conversation_id,
"conversationType": "chat_continuous",
"botId": "chat_continuous",
"globalSettings":{
"baseUrl": "https://api.openai.com",
"model": model if model else "gpt-3.5-turbo",
"messageHistorySize": 5,
"temperature": 0.7,
"top_p": 1,
**kwargs
},
"botSettings": {},
"prompt": prompt,
"messages": messages,
"sign": generate_signature(timestamp, conversation_id, prompt),
"timestamp": timestamp
}
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp, id, prompt):
data = f"{timestamp}:{id}:{prompt}:6B46K4pt"
return hashlib.sha256(data.encode()).hexdigest()

54
g4f/Provider/FreeGpt.py Normal file
View file

@ -0,0 +1,54 @@
from __future__ import annotations
import time, hashlib, random
from ..typing import AsyncGenerator
from g4f.requests import AsyncSession
from .base_provider import AsyncGeneratorProvider
domains = [
'https://k.aifree.site',
'https://p.aifree.site'
]
class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
async with AsyncSession(impersonate="chrome107") as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt)
}
url = random.choice(domains)
async with session.post(f"{url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View file

@ -1,6 +1,6 @@
from __future__ import annotations
import json
import json, uuid
from aiohttp import ClientSession
@ -12,7 +12,7 @@ class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat"
needs_auth = True
working = True
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
model = "meta-llama/Llama-2-70b-chat-hf"
@classmethod
async def create_async_generator(
@ -37,55 +37,25 @@ class HuggingChat(AsyncGeneratorProvider):
cookies=cookies,
headers=headers
) as session:
async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response:
async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response:
conversation_id = (await response.json())["conversationId"]
send = {
"id": str(uuid.uuid4()),
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.2,
"truncate": 1000,
"max_new_tokens": 1024,
"stop": ["</s>"],
"top_p": 0.95,
"repetition_penalty": 1.2,
"top_k": 50,
"return_full_text": False,
**kwargs
},
"stream": stream,
"options": {
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry": False,
"use_cache": False,
"web_search_id": ""
}
"is_retry": False,
"response_id": str(uuid.uuid4()),
"web_search": False
}
async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response:
if not stream:
data = await response.json()
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"].strip()
else:
raise RuntimeError(f"Response: {data}")
else:
start = "data:"
first = True
async for line in response.content:
line = line.decode("utf-8")
if line.startswith(start):
line = json.loads(line[len(start):-1])
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
async for line in response.content:
line = json.loads(line[:-1])
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "stream":
yield line["token"]
elif line["type"] == "finalAnswer":
break
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()

View file

@ -4,8 +4,9 @@ import json
import time
import base64
from curl_cffi.requests import AsyncSession
from fake_useragent import UserAgent
from .base_provider import AsyncProvider, format_prompt
from .base_provider import AsyncProvider, format_prompt, get_cookies
class PerplexityAi(AsyncProvider):
@ -23,18 +24,35 @@ class PerplexityAi(AsyncProvider):
**kwargs
) -> str:
url = cls.url + "/socket.io/?EIO=4&transport=polling"
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
headers = {
"User-Agent": UserAgent().random,
"Referer": f"{cls.url}/"
}
async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session:
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()
url_session = "https://www.perplexity.ai/api/auth/session"
response = await session.get(url_session)
response.raise_for_status()
response = await session.get(url, params={"t": timestamp()})
response.raise_for_status()
sid = json.loads(response.text[1:])["sid"]
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
print(session.cookies)
data = '40{"jwt":"anonymous-ask-user"}'
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
response.raise_for_status()
response = await session.get(url, params={"t": timestamp(), "sid": sid})
response.raise_for_status()
data = "424" + json.dumps([
"perplexity_ask",
format_prompt(messages),

View file

@ -10,6 +10,7 @@ from .Aivvm import Aivvm
from .Bard import Bard
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
from .ChatgptAi import ChatgptAi
from .ChatgptDuo import ChatgptDuo
from .ChatgptLogin import ChatgptLogin
@ -18,6 +19,7 @@ from .DeepAi import DeepAi
from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .FreeGpt import FreeGpt
from .GetGpt import GetGpt
from .GptGo import GptGo
from .H2o import H2o
@ -61,6 +63,7 @@ __all__ = [
'Bard',
'Bing',
'ChatBase',
'ChatForAi',
'ChatgptAi',
'ChatgptDuo',
'ChatgptLogin',
@ -69,6 +72,7 @@ __all__ = [
'DfeHub',
'EasyChat',
'Forefront',
'FreeGpt',
'GetGpt',
'GptGo',
'H2o',

View file

@ -1,18 +1,12 @@
from __future__ import annotations
import asyncio
import sys
from asyncio import AbstractEventLoop
import browser_cookie3
_cookies: dict[str, dict[str, str]] = {}
# Use own event_loop_policy with a selector event loop on windows.
if sys.platform == 'win32':
_event_loop_policy = asyncio.WindowsSelectorEventLoopPolicy()
else:
_event_loop_policy = asyncio.get_event_loop_policy()
# If event loop is already running, handle nested event loops
# If "nest_asyncio" is installed, patch the event loop.
@ -20,9 +14,9 @@ def get_event_loop() -> AbstractEventLoop:
try:
asyncio.get_running_loop()
except RuntimeError:
return _event_loop_policy.get_event_loop()
return asyncio.get_event_loop()
try:
event_loop = _event_loop_policy.get_event_loop()
event_loop = asyncio.get_event_loop()
if not hasattr(event_loop.__class__, "_nest_patched"):
import nest_asyncio
nest_asyncio.apply(event_loop)

View file

@ -1,6 +1,6 @@
from __future__ import annotations
import json, sys
import json, sys, asyncio
from functools import partialmethod
from aiohttp import StreamReader
@ -8,6 +8,9 @@ from aiohttp.base_protocol import BaseProtocol
from curl_cffi.requests import AsyncSession as BaseSession
from curl_cffi.requests import Response
from curl_cffi import AsyncCurl
is_newer_0_5_9 = hasattr(AsyncCurl, "remove_handle")
class StreamResponse:
@ -35,7 +38,7 @@ class StreamResponse:
class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
self.session = session
self.loop = session.loop
self.loop = session.loop if session.loop else asyncio.get_running_loop()
self.content = StreamReader(
BaseProtocol(session.loop),
sys.maxsize,
@ -51,10 +54,9 @@ class StreamRequest:
self.content.feed_data(data)
def on_done(self, task):
if not self.enter.done():
self.enter.set_result(None)
self.content.feed_eof()
self.curl.clean_after_perform()
self.curl.reset()
self.session.push_curl(self.curl)
async def __aenter__(self) -> StreamResponse:
self.curl = await self.session.pop_curl()
@ -66,18 +68,30 @@ class StreamRequest:
content_callback=self.on_content,
**self.options
)
await self.session.acurl.add_handle(self.curl, False)
self.handle = self.session.acurl._curl2future[self.curl]
if is_newer_0_5_9:
self.handle = self.session.acurl.add_handle(self.curl)
else:
await self.session.acurl.add_handle(self.curl, False)
self.handle = self.session.acurl._curl2future[self.curl]
self.handle.add_done_callback(self.on_done)
await self.enter
if is_newer_0_5_9:
response = self.session._parse_response(self.curl, _, header_buffer)
response.request = request
else:
response = self.session._parse_response(self.curl, request, _, header_buffer)
return StreamResponse(
self.session._parse_response(self.curl, request, _, header_buffer),
response,
self.content,
request
)
async def __aexit__(self, exc_type, exc, tb):
pass
if not self.handle.done():
self.session.acurl.set_result(self.curl)
self.curl.clean_after_perform()
self.curl.reset()
self.session.push_curl(self.curl)
class AsyncSession(BaseSession):
def request(