mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Add async support for H2o
Add format_prompt helper Fix create_completion in AsyncGeneratorProvider Move get_cookies from constructor to function Add ow HuggingChat implement Remove need auth form Liabots Add staic cache for access_token in OpenaiChat Add OpenAssistant provider Support stream and async in You Support async and add userId in Yqcloud Add log_time module
This commit is contained in:
parent
efd75a11b8
commit
7294abc890
15 changed files with 564 additions and 381 deletions
|
|
@ -1,12 +1,9 @@
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
import asyncio
|
|
||||||
|
|
||||||
from ..typing import Any, CreateResult
|
from .base_provider import AsyncProvider, get_cookies, format_prompt
|
||||||
from .base_provider import AsyncProvider, get_cookies
|
|
||||||
|
|
||||||
class Bard(AsyncProvider):
|
class Bard(AsyncProvider):
|
||||||
url = "https://bard.google.com"
|
url = "https://bard.google.com"
|
||||||
|
|
@ -19,15 +16,14 @@ class Bard(AsyncProvider):
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
cookies: dict = get_cookies(".google.com"), **kwargs: Any,) -> str:
|
cookies: dict = None,
|
||||||
|
**kwargs
|
||||||
formatted = "\n".join(
|
) -> str:
|
||||||
["%s: %s" % (message["role"], message["content"]) for message in messages]
|
prompt = format_prompt(messages)
|
||||||
)
|
|
||||||
prompt = f"{formatted}\nAssistant:"
|
|
||||||
|
|
||||||
if proxy and "://" not in proxy:
|
if proxy and "://" not in proxy:
|
||||||
proxy = f"http://{proxy}"
|
proxy = f"http://{proxy}"
|
||||||
|
if not cookies:
|
||||||
|
cookies = get_cookies(".google.com")
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'authority': 'bard.google.com',
|
'authority': 'bard.google.com',
|
||||||
|
|
@ -44,10 +40,11 @@ class Bard(AsyncProvider):
|
||||||
) as session:
|
) as session:
|
||||||
async with session.get(cls.url, proxy=proxy) as response:
|
async with session.get(cls.url, proxy=proxy) as response:
|
||||||
text = await response.text()
|
text = await response.text()
|
||||||
|
|
||||||
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
|
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
|
||||||
if match:
|
if not match:
|
||||||
snlm0e = match.group(1)
|
raise RuntimeError("No snlm0e value.")
|
||||||
|
snlm0e = match.group(1)
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
|
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
|
||||||
|
|
|
||||||
|
|
@ -15,8 +15,11 @@ class Bing(AsyncGeneratorProvider):
|
||||||
def create_async_generator(
|
def create_async_generator(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
cookies: dict = get_cookies(".bing.com"), **kwargs) -> AsyncGenerator:
|
cookies: dict = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
if not cookies:
|
||||||
|
cookies = get_cookies(".bing.com")
|
||||||
if len(messages) < 2:
|
if len(messages) < 2:
|
||||||
prompt = messages[0]["content"]
|
prompt = messages[0]["content"]
|
||||||
context = None
|
context = None
|
||||||
|
|
@ -273,15 +276,4 @@ async def stream_generate(
|
||||||
final = True
|
final = True
|
||||||
break
|
break
|
||||||
finally:
|
finally:
|
||||||
await delete_conversation(session, conversation)
|
await delete_conversation(session, conversation)
|
||||||
|
|
||||||
def run(generator: AsyncGenerator[Union[Any, str], Any]):
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
gen = generator.__aiter__()
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
yield loop.run_until_complete(gen.__anext__())
|
|
||||||
|
|
||||||
except StopAsyncIteration:
|
|
||||||
break
|
|
||||||
|
|
@ -1,78 +1,85 @@
|
||||||
import json, uuid, requests
|
import json
|
||||||
|
import uuid
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import Any, CreateResult
|
from ..typing import AsyncGenerator
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||||
|
|
||||||
|
|
||||||
class H2o(BaseProvider):
|
class H2o(AsyncGeneratorProvider):
|
||||||
url = "https://gpt-gm.h2o.ai"
|
url = "https://gpt-gm.h2o.ai"
|
||||||
working = True
|
working = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
|
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
|
||||||
|
|
||||||
@staticmethod
|
@classmethod
|
||||||
def create_completion(
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
stream: bool, **kwargs: Any) -> CreateResult:
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
conversation = ""
|
) -> AsyncGenerator:
|
||||||
for message in messages:
|
model = model if model else cls.model
|
||||||
conversation += "%s: %s\n" % (message["role"], message["content"])
|
|
||||||
conversation += "assistant: "
|
|
||||||
|
|
||||||
session = requests.Session()
|
|
||||||
|
|
||||||
headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"}
|
|
||||||
data = {
|
|
||||||
"ethicsModalAccepted" : "true",
|
|
||||||
"shareConversationsWithModelAuthors": "true",
|
|
||||||
"ethicsModalAcceptedAt" : "",
|
|
||||||
"activeModel" : model,
|
|
||||||
"searchEnabled" : "true",
|
|
||||||
}
|
|
||||||
|
|
||||||
session.post("https://gpt-gm.h2o.ai/settings",
|
|
||||||
headers=headers, data=data)
|
|
||||||
|
|
||||||
headers = {"Referer": "https://gpt-gm.h2o.ai/"}
|
headers = {"Referer": "https://gpt-gm.h2o.ai/"}
|
||||||
data = {"model": model}
|
|
||||||
|
|
||||||
response = session.post("https://gpt-gm.h2o.ai/conversation",
|
async with ClientSession(
|
||||||
headers=headers, json=data).json()
|
headers=headers
|
||||||
|
) as session:
|
||||||
if "conversationId" not in response:
|
data = {
|
||||||
return
|
"ethicsModalAccepted": "true",
|
||||||
|
"shareConversationsWithModelAuthors": "true",
|
||||||
|
"ethicsModalAcceptedAt": "",
|
||||||
|
"activeModel": model,
|
||||||
|
"searchEnabled": "true",
|
||||||
|
}
|
||||||
|
async with session.post(
|
||||||
|
"https://gpt-gm.h2o.ai/settings",
|
||||||
|
proxy=proxy,
|
||||||
|
data=data
|
||||||
|
) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
data = {
|
async with session.post(
|
||||||
"inputs": conversation,
|
"https://gpt-gm.h2o.ai/conversation",
|
||||||
"parameters": {
|
proxy=proxy,
|
||||||
"temperature" : kwargs.get("temperature", 0.4),
|
json={"model": model},
|
||||||
"truncate" : kwargs.get("truncate", 2048),
|
) as response:
|
||||||
"max_new_tokens" : kwargs.get("max_new_tokens", 1024),
|
response.raise_for_status()
|
||||||
"do_sample" : kwargs.get("do_sample", True),
|
conversationId = (await response.json())["conversationId"]
|
||||||
"repetition_penalty": kwargs.get("repetition_penalty", 1.2),
|
|
||||||
"return_full_text" : kwargs.get("return_full_text", False),
|
|
||||||
},
|
|
||||||
"stream" : True,
|
|
||||||
"options": {
|
|
||||||
"id" : kwargs.get("id", str(uuid.uuid4())),
|
|
||||||
"response_id" : kwargs.get("response_id", str(uuid.uuid4())),
|
|
||||||
"is_retry" : False,
|
|
||||||
"use_cache" : False,
|
|
||||||
"web_search_id": "",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
response = session.post(f"https://gpt-gm.h2o.ai/conversation/{response['conversationId']}",
|
data = {
|
||||||
headers=headers, json=data)
|
"inputs": format_prompt(messages),
|
||||||
|
"parameters": {
|
||||||
response.raise_for_status()
|
"temperature": 0.4,
|
||||||
response.encoding = "utf-8"
|
"truncate": 2048,
|
||||||
generated_text = response.text.replace("\n", "").split("data:")
|
"max_new_tokens": 1024,
|
||||||
generated_text = json.loads(generated_text[-1])
|
"do_sample": True,
|
||||||
|
"repetition_penalty": 1.2,
|
||||||
yield generated_text["generated_text"]
|
"return_full_text": False,
|
||||||
|
**kwargs
|
||||||
|
},
|
||||||
|
"stream": True,
|
||||||
|
"options": {
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"response_id": str(uuid.uuid4()),
|
||||||
|
"is_retry": False,
|
||||||
|
"use_cache": False,
|
||||||
|
"web_search_id": "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
async with session.post(
|
||||||
|
f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
|
||||||
|
proxy=proxy,
|
||||||
|
json=data
|
||||||
|
) as response:
|
||||||
|
start = "data:"
|
||||||
|
async for line in response.content:
|
||||||
|
line = line.decode("utf-8")
|
||||||
|
if line and line.startswith(start):
|
||||||
|
line = json.loads(line[len(start):-1])
|
||||||
|
if not line["token"]["special"]:
|
||||||
|
yield line["token"]["text"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@property
|
@property
|
||||||
|
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
||||||
has_module = False
|
|
||||||
try:
|
|
||||||
from hugchat.hugchat import ChatBot
|
|
||||||
except ImportError:
|
|
||||||
has_module = False
|
|
||||||
|
|
||||||
from .base_provider import BaseProvider, get_cookies
|
|
||||||
from g4f.typing import CreateResult
|
|
||||||
|
|
||||||
class Hugchat(BaseProvider):
|
|
||||||
url = "https://huggingface.co/chat/"
|
|
||||||
needs_auth = True
|
|
||||||
working = has_module
|
|
||||||
llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf']
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: list[dict[str, str]],
|
|
||||||
stream: bool = False,
|
|
||||||
proxy: str = None,
|
|
||||||
cookies: str = get_cookies(".huggingface.co"), **kwargs) -> CreateResult:
|
|
||||||
|
|
||||||
bot = ChatBot(
|
|
||||||
cookies=cookies)
|
|
||||||
|
|
||||||
if proxy and "://" not in proxy:
|
|
||||||
proxy = f"http://{proxy}"
|
|
||||||
bot.session.proxies = {"http": proxy, "https": proxy}
|
|
||||||
|
|
||||||
if model:
|
|
||||||
try:
|
|
||||||
if not isinstance(model, int):
|
|
||||||
model = cls.llms.index(model)
|
|
||||||
bot.switch_llm(model)
|
|
||||||
except:
|
|
||||||
raise RuntimeError(f"Model are not supported: {model}")
|
|
||||||
|
|
||||||
if len(messages) > 1:
|
|
||||||
formatted = "\n".join(
|
|
||||||
["%s: %s" % (message["role"], message["content"]) for message in messages]
|
|
||||||
)
|
|
||||||
prompt = f"{formatted}\nAssistant:"
|
|
||||||
else:
|
|
||||||
prompt = messages.pop()["content"]
|
|
||||||
|
|
||||||
try:
|
|
||||||
yield bot.chat(prompt, **kwargs)
|
|
||||||
finally:
|
|
||||||
bot.delete_conversation(bot.current_conversation)
|
|
||||||
bot.current_conversation = ""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
@property
|
|
||||||
def params(cls):
|
|
||||||
params = [
|
|
||||||
("model", "str"),
|
|
||||||
("messages", "list[dict[str, str]]"),
|
|
||||||
("stream", "bool"),
|
|
||||||
("proxy", "str"),
|
|
||||||
]
|
|
||||||
param = ", ".join([": ".join(p) for p in params])
|
|
||||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
||||||
107
g4f/Provider/HuggingChat.py
Normal file
107
g4f/Provider/HuggingChat.py
Normal file
|
|
@ -0,0 +1,107 @@
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from ..typing import AsyncGenerator
|
||||||
|
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
|
||||||
|
|
||||||
|
|
||||||
|
class HuggingChat(AsyncGeneratorProvider):
|
||||||
|
url = "https://huggingface.co/chat/"
|
||||||
|
needs_auth = True
|
||||||
|
working = True
|
||||||
|
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
stream: bool = True,
|
||||||
|
proxy: str = None,
|
||||||
|
cookies: dict = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
if not cookies:
|
||||||
|
cookies = get_cookies(".huggingface.co")
|
||||||
|
model = model if model else cls.model
|
||||||
|
if proxy and "://" not in proxy:
|
||||||
|
proxy = f"http://{proxy}"
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
async with ClientSession(
|
||||||
|
cookies=cookies,
|
||||||
|
headers=headers
|
||||||
|
) as session:
|
||||||
|
async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
|
||||||
|
conversation_id = (await response.json())["conversationId"]
|
||||||
|
|
||||||
|
send = {
|
||||||
|
"inputs": format_prompt(messages),
|
||||||
|
"parameters": {
|
||||||
|
"temperature": 0.2,
|
||||||
|
"truncate": 1000,
|
||||||
|
"max_new_tokens": 1024,
|
||||||
|
"stop": ["</s>"],
|
||||||
|
"top_p": 0.95,
|
||||||
|
"repetition_penalty": 1.2,
|
||||||
|
"top_k": 50,
|
||||||
|
"return_full_text": False,
|
||||||
|
**kwargs
|
||||||
|
},
|
||||||
|
"stream": stream,
|
||||||
|
"options": {
|
||||||
|
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
|
||||||
|
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
|
||||||
|
"is_retry": False,
|
||||||
|
"use_cache": False,
|
||||||
|
"web_search_id": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
start = "data:"
|
||||||
|
first = True
|
||||||
|
async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
|
||||||
|
async for line in response.content:
|
||||||
|
line = line.decode("utf-8")
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
if not stream:
|
||||||
|
try:
|
||||||
|
data = json.loads(line)
|
||||||
|
except json.decoder.JSONDecodeError:
|
||||||
|
raise RuntimeError(f"No json: {line}")
|
||||||
|
if "error" in data:
|
||||||
|
raise RuntimeError(data["error"])
|
||||||
|
elif isinstance(data, list):
|
||||||
|
yield data[0]["generated_text"]
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Response: {line}")
|
||||||
|
elif line.startswith(start):
|
||||||
|
line = json.loads(line[len(start):-1])
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
if "token" not in line:
|
||||||
|
raise RuntimeError(f"Response: {line}")
|
||||||
|
if not line["token"]["special"]:
|
||||||
|
if first:
|
||||||
|
yield line["token"]["text"].lstrip()
|
||||||
|
first = False
|
||||||
|
else:
|
||||||
|
yield line["token"]["text"]
|
||||||
|
|
||||||
|
async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def params(cls):
|
||||||
|
params = [
|
||||||
|
("model", "str"),
|
||||||
|
("messages", "list[dict[str, str]]"),
|
||||||
|
("stream", "bool"),
|
||||||
|
("proxy", "str"),
|
||||||
|
]
|
||||||
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||||
|
|
@ -1,59 +1,77 @@
|
||||||
import uuid, requests
|
import uuid
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import Any, CreateResult
|
from ..typing import AsyncGenerator
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
models = {
|
||||||
|
"gpt-4": {
|
||||||
|
"id": "gpt-4",
|
||||||
|
"name": "GPT-4",
|
||||||
|
"maxLength": 24000,
|
||||||
|
"tokenLimit": 8000,
|
||||||
|
},
|
||||||
|
"gpt-3.5-turbo": {
|
||||||
|
"id": "gpt-3.5-turbo",
|
||||||
|
"name": "GPT-3.5",
|
||||||
|
"maxLength": 12000,
|
||||||
|
"tokenLimit": 4000,
|
||||||
|
},
|
||||||
|
"gpt-3.5-turbo-16k": {
|
||||||
|
"id": "gpt-3.5-turbo-16k",
|
||||||
|
"name": "GPT-3.5-16k",
|
||||||
|
"maxLength": 48000,
|
||||||
|
"tokenLimit": 16000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
class Liaobots(BaseProvider):
|
class Liaobots(AsyncGeneratorProvider):
|
||||||
url: str = "https://liaobots.com"
|
url = "https://liaobots.com"
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
needs_auth = True
|
supports_gpt_35_turbo = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_4 = True
|
||||||
supports_gpt_4 = True
|
_auth_code = None
|
||||||
|
|
||||||
@staticmethod
|
@classmethod
|
||||||
def create_completion(
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
stream: bool, **kwargs: Any) -> CreateResult:
|
auth: str = None,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
if proxy and "://" not in proxy:
|
||||||
|
proxy = f"http://{proxy}"
|
||||||
headers = {
|
headers = {
|
||||||
"authority" : "liaobots.com",
|
"authority": "liaobots.com",
|
||||||
"content-type" : "application/json",
|
"content-type": "application/json",
|
||||||
"origin" : "https://liaobots.com",
|
"origin": "https://liaobots.com",
|
||||||
"referer" : "https://liaobots.com/",
|
"referer": "https://liaobots.com/",
|
||||||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||||
"x-auth-code" : str(kwargs.get("auth")),
|
|
||||||
}
|
|
||||||
|
|
||||||
models = {
|
|
||||||
"gpt-4": {
|
|
||||||
"id": "gpt-4",
|
|
||||||
"name": "GPT-4",
|
|
||||||
"maxLength": 24000,
|
|
||||||
"tokenLimit": 8000,
|
|
||||||
},
|
|
||||||
"gpt-3.5-turbo": {
|
|
||||||
"id": "gpt-3.5-turbo",
|
|
||||||
"name": "GPT-3.5",
|
|
||||||
"maxLength": 12000,
|
|
||||||
"tokenLimit": 4000,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
json_data = {
|
|
||||||
"conversationId": str(uuid.uuid4()),
|
|
||||||
"model" : models[model],
|
|
||||||
"messages" : messages,
|
|
||||||
"key" : "",
|
|
||||||
"prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
|
||||||
}
|
}
|
||||||
|
async with ClientSession(
|
||||||
|
headers=headers
|
||||||
|
) as session:
|
||||||
|
model = model if model in models else "gpt-3.5-turbo"
|
||||||
|
auth_code = auth if isinstance(auth, str) else cls._auth_code
|
||||||
|
if not auth_code:
|
||||||
|
async with session.post("https://liaobots.com/api/user", proxy=proxy, json={"authcode": ""}) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
auth_code = cls._auth_code = json.loads((await response.text()))["authCode"]
|
||||||
|
data = {
|
||||||
|
"conversationId": str(uuid.uuid4()),
|
||||||
|
"model": models[model],
|
||||||
|
"messages": messages,
|
||||||
|
"key": "",
|
||||||
|
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
||||||
|
}
|
||||||
|
async with session.post("https://liaobots.com/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
async for line in response.content:
|
||||||
|
yield line.decode("utf-8")
|
||||||
|
|
||||||
response = requests.post("https://liaobots.com/api/chat",
|
|
||||||
headers=headers, json=json_data, stream=True)
|
|
||||||
|
|
||||||
response.raise_for_status()
|
|
||||||
for token in response.iter_content(chunk_size=2046):
|
|
||||||
yield token.decode("utf-8")
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@property
|
@property
|
||||||
|
|
@ -62,6 +80,7 @@ class Liaobots(BaseProvider):
|
||||||
("model", "str"),
|
("model", "str"),
|
||||||
("messages", "list[dict[str, str]]"),
|
("messages", "list[dict[str, str]]"),
|
||||||
("stream", "bool"),
|
("stream", "bool"),
|
||||||
|
("proxy", "str"),
|
||||||
("auth", "str"),
|
("auth", "str"),
|
||||||
]
|
]
|
||||||
param = ", ".join([": ".join(p) for p in params])
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
|
|
||||||
98
g4f/Provider/OpenAssistant.py
Normal file
98
g4f/Provider/OpenAssistant.py
Normal file
|
|
@ -0,0 +1,98 @@
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from ..typing import Any, AsyncGenerator
|
||||||
|
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
|
||||||
|
|
||||||
|
class OpenAssistant(AsyncGeneratorProvider):
|
||||||
|
url = "https://open-assistant.io/chat"
|
||||||
|
needs_auth = True
|
||||||
|
working = True
|
||||||
|
model = "OA_SFT_Llama_30B_6"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
proxy: str = None,
|
||||||
|
cookies: dict = None,
|
||||||
|
**kwargs: Any
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
if proxy and "://" not in proxy:
|
||||||
|
proxy = f"http://{proxy}"
|
||||||
|
if not cookies:
|
||||||
|
cookies = get_cookies("open-assistant.io")
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
async with ClientSession(
|
||||||
|
cookies=cookies,
|
||||||
|
headers=headers
|
||||||
|
) as session:
|
||||||
|
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
|
||||||
|
chat_id = (await response.json())["id"]
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"chat_id": chat_id,
|
||||||
|
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
|
||||||
|
"parent_id": None
|
||||||
|
}
|
||||||
|
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
|
||||||
|
parent_id = (await response.json())["id"]
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"chat_id": chat_id,
|
||||||
|
"parent_id": parent_id,
|
||||||
|
"model_config_name": model if model else cls.model,
|
||||||
|
"sampling_parameters":{
|
||||||
|
"top_k": 50,
|
||||||
|
"top_p": None,
|
||||||
|
"typical_p": None,
|
||||||
|
"temperature": 0.35,
|
||||||
|
"repetition_penalty": 1.1111111111111112,
|
||||||
|
"max_new_tokens": 1024,
|
||||||
|
**kwargs
|
||||||
|
},
|
||||||
|
"plugins":[]
|
||||||
|
}
|
||||||
|
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
|
||||||
|
data = await response.json()
|
||||||
|
if "id" in data:
|
||||||
|
message_id = data["id"]
|
||||||
|
elif "message" in data:
|
||||||
|
raise RuntimeError(data["message"])
|
||||||
|
else:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
params = {
|
||||||
|
'chat_id': chat_id,
|
||||||
|
'message_id': message_id,
|
||||||
|
}
|
||||||
|
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
|
||||||
|
start = "data: "
|
||||||
|
async for line in response.content:
|
||||||
|
line = line.decode("utf-8")
|
||||||
|
if line and line.startswith(start):
|
||||||
|
line = json.loads(line[len(start):])
|
||||||
|
if line["event_type"] == "token":
|
||||||
|
yield line["text"]
|
||||||
|
|
||||||
|
params = {
|
||||||
|
'chat_id': chat_id,
|
||||||
|
}
|
||||||
|
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def params(cls):
|
||||||
|
params = [
|
||||||
|
("model", "str"),
|
||||||
|
("messages", "list[dict[str, str]]"),
|
||||||
|
("stream", "bool"),
|
||||||
|
("proxy", "str"),
|
||||||
|
]
|
||||||
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||||
|
|
@ -4,8 +4,11 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
has_module = False
|
has_module = False
|
||||||
|
|
||||||
from .base_provider import AsyncGeneratorProvider, get_cookies
|
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncGenerator
|
||||||
|
from httpx import AsyncClient
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
class OpenaiChat(AsyncGeneratorProvider):
|
class OpenaiChat(AsyncGeneratorProvider):
|
||||||
url = "https://chat.openai.com"
|
url = "https://chat.openai.com"
|
||||||
|
|
@ -14,6 +17,7 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
supports_gpt_4 = True
|
supports_gpt_4 = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
_access_token = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
|
@ -21,9 +25,9 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
access_token: str = None,
|
access_token: str = _access_token,
|
||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
**kwargs
|
**kwargs: dict
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
|
|
||||||
config = {"access_token": access_token, "model": model}
|
config = {"access_token": access_token, "model": model}
|
||||||
|
|
@ -37,21 +41,12 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||||
)
|
)
|
||||||
|
|
||||||
if not access_token:
|
if not access_token:
|
||||||
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
||||||
response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
|
cls._access_token = await get_access_token(bot.session, cookies)
|
||||||
access_token = response.json()["accessToken"]
|
bot.set_access_token(cls._access_token)
|
||||||
bot.set_access_token(access_token)
|
|
||||||
|
|
||||||
if len(messages) > 1:
|
|
||||||
formatted = "\n".join(
|
|
||||||
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
|
|
||||||
)
|
|
||||||
prompt = f"{formatted}\nAssistant:"
|
|
||||||
else:
|
|
||||||
prompt = messages.pop()["content"]
|
|
||||||
|
|
||||||
returned = None
|
returned = None
|
||||||
async for message in bot.ask(prompt):
|
async for message in bot.ask(format_prompt(messages)):
|
||||||
message = message["message"]
|
message = message["message"]
|
||||||
if returned:
|
if returned:
|
||||||
if message.startswith(returned):
|
if message.startswith(returned):
|
||||||
|
|
@ -61,6 +56,9 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||||
else:
|
else:
|
||||||
yield message
|
yield message
|
||||||
returned = message
|
returned = message
|
||||||
|
|
||||||
|
await bot.delete_conversation(bot.conversation_id)
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@property
|
@property
|
||||||
|
|
@ -73,3 +71,12 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||||
]
|
]
|
||||||
param = ", ".join([": ".join(p) for p in params])
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||||
|
|
||||||
|
|
||||||
|
async def get_access_token(session: AsyncClient, cookies: dict):
|
||||||
|
response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
|
||||||
|
response.raise_for_status()
|
||||||
|
try:
|
||||||
|
return response.json()["accessToken"]
|
||||||
|
except json.decoder.JSONDecodeError:
|
||||||
|
raise RuntimeError(f"Response: {response.text}")
|
||||||
|
|
@ -1,55 +1,37 @@
|
||||||
import urllib.parse, json
|
from aiohttp import ClientSession
|
||||||
|
import json
|
||||||
|
|
||||||
from curl_cffi import requests
|
from ..typing import AsyncGenerator
|
||||||
from ..typing import Any, CreateResult
|
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||||
from .base_provider import BaseProvider
|
|
||||||
|
|
||||||
|
|
||||||
class You(BaseProvider):
|
class You(AsyncGeneratorProvider):
|
||||||
url = "https://you.com"
|
url = "https://you.com"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
supports_stream = True
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_completion(
|
async def create_async_generator(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
stream: bool, **kwargs: Any) -> CreateResult:
|
cookies: dict = None,
|
||||||
|
**kwargs,
|
||||||
url_param = _create_url_param(messages, kwargs.get("history", []))
|
) -> AsyncGenerator:
|
||||||
headers = _create_header()
|
if not cookies:
|
||||||
|
cookies = get_cookies("you.com")
|
||||||
response = requests.get(f"https://you.com/api/streamingSearch?{url_param}",
|
headers = {
|
||||||
headers=headers, impersonate="chrome107")
|
"Accept": "text/event-stream",
|
||||||
|
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
|
||||||
response.raise_for_status()
|
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0"
|
||||||
|
}
|
||||||
start = 'data: {"youChatToken": '
|
async with ClientSession(headers=headers, cookies=cookies) as session:
|
||||||
for line in response.content.splitlines():
|
async with session.get(
|
||||||
line = line.decode('utf-8')
|
"https://you.com/api/streamingSearch",
|
||||||
if line.startswith(start):
|
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
|
||||||
yield json.loads(line[len(start): -1])
|
) as response:
|
||||||
|
start = 'data: {"youChatToken": '
|
||||||
def _create_url_param(messages: list[dict[str, str]], history: list[dict[str, str]]):
|
async for line in response.content:
|
||||||
prompt = ""
|
line = line.decode('utf-8')
|
||||||
for message in messages:
|
if line.startswith(start):
|
||||||
prompt += "%s: %s\n" % (message["role"], message["content"])
|
yield json.loads(line[len(start): -2])
|
||||||
prompt += "assistant:"
|
|
||||||
chat = _convert_chat(history)
|
|
||||||
param = {"q": prompt, "domain": "youchat", "chat": chat}
|
|
||||||
return urllib.parse.urlencode(param)
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_chat(messages: list[dict[str, str]]):
|
|
||||||
message_iter = iter(messages)
|
|
||||||
return [
|
|
||||||
{"question": user["content"], "answer": assistant["content"]}
|
|
||||||
for user, assistant in zip(message_iter, message_iter)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _create_header():
|
|
||||||
return {
|
|
||||||
"accept": "text/event-stream",
|
|
||||||
"referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
|
|
||||||
}
|
|
||||||
|
|
@ -1,29 +1,27 @@
|
||||||
import requests
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import Any, CreateResult
|
from .base_provider import AsyncProvider, format_prompt
|
||||||
from .base_provider import BaseProvider
|
|
||||||
|
|
||||||
|
|
||||||
class Yqcloud(BaseProvider):
|
class Yqcloud(AsyncProvider):
|
||||||
url = "https://chat9.yqcloud.top/"
|
url = "https://chat9.yqcloud.top/"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_completion(
|
async def create_async(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
stream: bool, **kwargs: Any) -> CreateResult:
|
proxy: str = None,
|
||||||
|
**kwargs,
|
||||||
headers = _create_header()
|
) -> str:
|
||||||
payload = _create_payload(messages)
|
async with ClientSession(
|
||||||
|
headers=_create_header()
|
||||||
response = requests.post("https://api.aichatos.cloud/api/generateStream",
|
) as session:
|
||||||
headers=headers, json=payload)
|
payload = _create_payload(messages)
|
||||||
|
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
response.encoding = 'utf-8'
|
return await response.text()
|
||||||
yield response.text
|
|
||||||
|
|
||||||
|
|
||||||
def _create_header():
|
def _create_header():
|
||||||
|
|
@ -35,15 +33,11 @@ def _create_header():
|
||||||
|
|
||||||
|
|
||||||
def _create_payload(messages: list[dict[str, str]]):
|
def _create_payload(messages: list[dict[str, str]]):
|
||||||
prompt = ""
|
|
||||||
for message in messages:
|
|
||||||
prompt += "%s: %s\n" % (message["role"], message["content"])
|
|
||||||
prompt += "assistant:"
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"prompt" : prompt,
|
"prompt": format_prompt(messages),
|
||||||
"network" : True,
|
"network": True,
|
||||||
"system" : "",
|
"system": "",
|
||||||
"withoutContext": False,
|
"withoutContext": False,
|
||||||
"stream" : False,
|
"stream": False,
|
||||||
}
|
"userId": "#/chat/1693025544336"
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,11 +13,12 @@ from .EasyChat import EasyChat
|
||||||
from .Forefront import Forefront
|
from .Forefront import Forefront
|
||||||
from .GetGpt import GetGpt
|
from .GetGpt import GetGpt
|
||||||
from .H2o import H2o
|
from .H2o import H2o
|
||||||
from .Hugchat import Hugchat
|
from .HuggingChat import HuggingChat
|
||||||
from .Liaobots import Liaobots
|
from .Liaobots import Liaobots
|
||||||
from .Lockchat import Lockchat
|
from .Lockchat import Lockchat
|
||||||
from .Opchatgpts import Opchatgpts
|
from .Opchatgpts import Opchatgpts
|
||||||
from .OpenaiChat import OpenaiChat
|
from .OpenaiChat import OpenaiChat
|
||||||
|
from .OpenAssistant import OpenAssistant
|
||||||
from .Raycast import Raycast
|
from .Raycast import Raycast
|
||||||
from .Theb import Theb
|
from .Theb import Theb
|
||||||
from .Vercel import Vercel
|
from .Vercel import Vercel
|
||||||
|
|
@ -48,12 +49,13 @@ __all__ = [
|
||||||
'Forefront',
|
'Forefront',
|
||||||
'GetGpt',
|
'GetGpt',
|
||||||
'H2o',
|
'H2o',
|
||||||
'Hugchat',
|
'HuggingChat',
|
||||||
'Liaobots',
|
'Liaobots',
|
||||||
'Lockchat',
|
'Lockchat',
|
||||||
'Opchatgpts',
|
'Opchatgpts',
|
||||||
'Raycast',
|
'Raycast',
|
||||||
'OpenaiChat',
|
'OpenaiChat',
|
||||||
|
'OpenAssistant',
|
||||||
'Theb',
|
'Theb',
|
||||||
'Vercel',
|
'Vercel',
|
||||||
'Wewordle',
|
'Wewordle',
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,7 @@ from ..typing import Any, CreateResult, AsyncGenerator, Union
|
||||||
|
|
||||||
import browser_cookie3
|
import browser_cookie3
|
||||||
import asyncio
|
import asyncio
|
||||||
from time import time
|
|
||||||
import math
|
|
||||||
|
|
||||||
class BaseProvider(ABC):
|
class BaseProvider(ABC):
|
||||||
url: str
|
url: str
|
||||||
|
|
@ -48,6 +47,17 @@ def get_cookies(cookie_domain: str) -> dict:
|
||||||
return _cookies[cookie_domain]
|
return _cookies[cookie_domain]
|
||||||
|
|
||||||
|
|
||||||
|
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
|
||||||
|
if add_special_tokens or len(messages) > 1:
|
||||||
|
formatted = "\n".join(
|
||||||
|
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
|
||||||
|
)
|
||||||
|
return f"{formatted}\nAssistant:"
|
||||||
|
else:
|
||||||
|
return messages.pop()["content"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class AsyncProvider(BaseProvider):
|
class AsyncProvider(BaseProvider):
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_completion(
|
def create_completion(
|
||||||
|
|
@ -72,20 +82,19 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
stream: bool = True, **kwargs: Any) -> CreateResult:
|
stream: bool = True,
|
||||||
|
**kwargs
|
||||||
if stream:
|
) -> CreateResult:
|
||||||
yield from run_generator(cls.create_async_generator(model, messages, **kwargs))
|
yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs))
|
||||||
else:
|
|
||||||
yield from AsyncProvider.create_completion(cls=cls, model=model, messages=messages, **kwargs)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async(
|
async def create_async(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]], **kwargs: Any) -> str:
|
messages: list[dict[str, str]],
|
||||||
|
**kwargs
|
||||||
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)]
|
) -> str:
|
||||||
|
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)]
|
||||||
if chunks:
|
if chunks:
|
||||||
return "".join(chunks)
|
return "".join(chunks)
|
||||||
|
|
||||||
|
|
@ -93,8 +102,9 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def create_async_generator(
|
def create_async_generator(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]]) -> AsyncGenerator:
|
messages: list[dict[str, str]],
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,13 @@ class Model:
|
||||||
base_provider: str
|
base_provider: str
|
||||||
best_provider: type[BaseProvider]
|
best_provider: type[BaseProvider]
|
||||||
|
|
||||||
|
# Config for HuggingChat, OpenAssistant
|
||||||
|
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
|
||||||
|
default = Model(
|
||||||
|
name="",
|
||||||
|
base_provider="huggingface",
|
||||||
|
best_provider=H2o,
|
||||||
|
)
|
||||||
|
|
||||||
# GPT-3.5 / GPT-4
|
# GPT-3.5 / GPT-4
|
||||||
gpt_35_turbo = Model(
|
gpt_35_turbo = Model(
|
||||||
|
|
|
||||||
25
testing/log_time.py
Normal file
25
testing/log_time.py
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
from time import time
|
||||||
|
|
||||||
|
|
||||||
|
async def log_time_async(method: callable, **kwargs):
|
||||||
|
start = time()
|
||||||
|
result = await method(**kwargs)
|
||||||
|
secs = f"{round(time() - start, 2)} secs"
|
||||||
|
if result:
|
||||||
|
return " ".join([result, secs])
|
||||||
|
return secs
|
||||||
|
|
||||||
|
|
||||||
|
def log_time_yield(method: callable, **kwargs):
|
||||||
|
start = time()
|
||||||
|
result = yield from method(**kwargs)
|
||||||
|
yield f" {round(time() - start, 2)} secs"
|
||||||
|
|
||||||
|
|
||||||
|
def log_time(method: callable, **kwargs):
|
||||||
|
start = time()
|
||||||
|
result = method(**kwargs)
|
||||||
|
secs = f"{round(time() - start, 2)} secs"
|
||||||
|
if result:
|
||||||
|
return " ".join([result, secs])
|
||||||
|
return secs
|
||||||
|
|
@ -1,83 +1,25 @@
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import asyncio
|
import asyncio
|
||||||
from time import time
|
|
||||||
|
|
||||||
sys.path.append(str(Path(__file__).parent.parent))
|
sys.path.append(str(Path(__file__).parent.parent))
|
||||||
|
|
||||||
import g4f
|
import g4f
|
||||||
|
from testing.log_time import log_time, log_time_async, log_time_yield
|
||||||
|
|
||||||
providers = [g4f.Provider.OpenaiChat, g4f.Provider.Bard, g4f.Provider.Bing]
|
|
||||||
|
|
||||||
# Async support
|
_providers = [
|
||||||
async def log_time_async(method: callable, **kwargs):
|
g4f.Provider.H2o,
|
||||||
start = time()
|
g4f.Provider.You,
|
||||||
result = await method(**kwargs)
|
g4f.Provider.HuggingChat,
|
||||||
secs = f"{round(time() - start, 2)} secs"
|
g4f.Provider.OpenAssistant,
|
||||||
if result:
|
g4f.Provider.Bing,
|
||||||
return " ".join([result, secs])
|
g4f.Provider.Bard
|
||||||
return secs
|
]
|
||||||
|
|
||||||
def log_time_yield(method: callable, **kwargs):
|
_instruct = "Hello, tell about you in one sentence."
|
||||||
start = time()
|
|
||||||
result = yield from method(**kwargs)
|
|
||||||
yield f" {round(time() - start, 2)} secs"
|
|
||||||
|
|
||||||
def log_time(method: callable, **kwargs):
|
_example = """
|
||||||
start = time()
|
|
||||||
result = method(**kwargs)
|
|
||||||
secs = f"{round(time() - start, 2)} secs"
|
|
||||||
if result:
|
|
||||||
return " ".join([result, secs])
|
|
||||||
return secs
|
|
||||||
|
|
||||||
async def run_async():
|
|
||||||
responses = []
|
|
||||||
for provider in providers:
|
|
||||||
responses.append(log_time_async(
|
|
||||||
provider.create_async,
|
|
||||||
model=None,
|
|
||||||
messages=[{"role": "user", "content": "Hello"}],
|
|
||||||
log_time=True
|
|
||||||
))
|
|
||||||
responses = await asyncio.gather(*responses)
|
|
||||||
for idx, provider in enumerate(providers):
|
|
||||||
print(f"{provider.__name__}:", responses[idx])
|
|
||||||
print("Async Total:", asyncio.run(log_time_async(run_async)))
|
|
||||||
|
|
||||||
# Streaming support:
|
|
||||||
def run_stream():
|
|
||||||
for provider in providers:
|
|
||||||
print(f"{provider.__name__}: ", end="")
|
|
||||||
for response in log_time_yield(
|
|
||||||
provider.create_completion,
|
|
||||||
model=None,
|
|
||||||
messages=[{"role": "user", "content": "Hello"}],
|
|
||||||
):
|
|
||||||
print(response, end="")
|
|
||||||
print()
|
|
||||||
print("Stream Total:", log_time(run_stream))
|
|
||||||
|
|
||||||
# No streaming support:
|
|
||||||
def create_completion():
|
|
||||||
for provider in providers:
|
|
||||||
print(f"{provider.__name__}:", end=" ")
|
|
||||||
for response in log_time_yield(
|
|
||||||
g4f.Provider.Bard.create_completion,
|
|
||||||
model=None,
|
|
||||||
messages=[{"role": "user", "content": "Hello"}],
|
|
||||||
):
|
|
||||||
print(response, end="")
|
|
||||||
print()
|
|
||||||
print("No Stream Total:", log_time(create_completion))
|
|
||||||
|
|
||||||
for response in g4f.Provider.Hugchat.create_completion(
|
|
||||||
model=None,
|
|
||||||
messages=[{"role": "user", "content": "Hello, tell about you."}],
|
|
||||||
):
|
|
||||||
print("Hugchat:", response)
|
|
||||||
|
|
||||||
"""
|
|
||||||
OpenaiChat: Hello! How can I assist you today? 2.0 secs
|
OpenaiChat: Hello! How can I assist you today? 2.0 secs
|
||||||
Bard: Hello! How can I help you today? 3.44 secs
|
Bard: Hello! How can I help you today? 3.44 secs
|
||||||
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
|
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
|
||||||
|
|
@ -92,4 +34,63 @@ OpenaiChat: Hello! How can I help you today? 3.28 secs
|
||||||
Bard: Hello there! How can I help you today? 3.58 secs
|
Bard: Hello there! How can I help you today? 3.58 secs
|
||||||
Bing: Hello! How can I help you today? 3.28 secs
|
Bing: Hello! How can I help you today? 3.28 secs
|
||||||
No Stream Total: 10.14 secs
|
No Stream Total: 10.14 secs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
print("Yqcloud:", end="")
|
||||||
|
for response in log_time_yield(
|
||||||
|
g4f.ChatCompletion.create,
|
||||||
|
model=g4f.models.gpt_35_turbo,
|
||||||
|
messages=[{"role": "user", "content": _instruct}],
|
||||||
|
provider=g4f.Provider.Yqcloud,
|
||||||
|
#cookies=g4f.get_cookies(".huggingface.co"),
|
||||||
|
stream=True,
|
||||||
|
auth=True
|
||||||
|
):
|
||||||
|
print(response, end="")
|
||||||
|
print()
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
async def run_async():
|
||||||
|
responses = [
|
||||||
|
log_time_async(
|
||||||
|
provider.create_async,
|
||||||
|
model=None,
|
||||||
|
messages=[{"role": "user", "content": _instruct}],
|
||||||
|
)
|
||||||
|
for provider in _providers
|
||||||
|
]
|
||||||
|
responses = await asyncio.gather(*responses)
|
||||||
|
for idx, provider in enumerate(_providers):
|
||||||
|
print(f"{provider.__name__}:", responses[idx])
|
||||||
|
print("Async Total:", asyncio.run(log_time_async(run_async)))
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def run_stream():
|
||||||
|
for provider in _providers:
|
||||||
|
print(f"{provider.__name__}: ", end="")
|
||||||
|
for response in log_time_yield(
|
||||||
|
provider.create_completion,
|
||||||
|
model=None,
|
||||||
|
messages=[{"role": "user", "content": _instruct}],
|
||||||
|
):
|
||||||
|
print(response, end="")
|
||||||
|
print()
|
||||||
|
print("Stream Total:", log_time(run_stream))
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
def create_no_stream():
|
||||||
|
for provider in _providers:
|
||||||
|
print(f"{provider.__name__}:", end=" ")
|
||||||
|
for response in log_time_yield(
|
||||||
|
provider.create_completion,
|
||||||
|
model=None,
|
||||||
|
messages=[{"role": "user", "content": _instruct}],
|
||||||
|
stream=False
|
||||||
|
):
|
||||||
|
print(response, end="")
|
||||||
|
print()
|
||||||
|
print("No Stream Total:", log_time(create_no_stream))
|
||||||
|
print()
|
||||||
Loading…
Add table
Add a link
Reference in a new issue