mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-15 14:51:19 -08:00
add cool testing for gpt-3.5 and and gpt-4
This commit is contained in:
parent
119817c963
commit
58c45522ea
5 changed files with 112 additions and 30 deletions
67
etc/testing/test_all.py
Normal file
67
etc/testing/test_all.py
Normal file
|
|
@ -0,0 +1,67 @@
|
||||||
|
import asyncio
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
|
import g4f
|
||||||
|
|
||||||
|
|
||||||
|
async def test(model: g4f.Model):
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
for response in g4f.ChatCompletion.create(
|
||||||
|
model=model,
|
||||||
|
messages=[{"role": "user", "content": "write a poem about a tree"}],
|
||||||
|
temperature=0.1,
|
||||||
|
stream=True
|
||||||
|
):
|
||||||
|
print(response, end="")
|
||||||
|
|
||||||
|
print()
|
||||||
|
except:
|
||||||
|
for response in await g4f.ChatCompletion.create_async(
|
||||||
|
model=model,
|
||||||
|
messages=[{"role": "user", "content": "write a poem about a tree"}],
|
||||||
|
temperature=0.1,
|
||||||
|
stream=True
|
||||||
|
):
|
||||||
|
print(response, end="")
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
print(model.name, "not working:", e)
|
||||||
|
print(e.__traceback__.tb_next)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
async def start_test():
|
||||||
|
models_to_test = [
|
||||||
|
# GPT-3.5 4K Context
|
||||||
|
g4f.models.gpt_35_turbo,
|
||||||
|
g4f.models.gpt_35_turbo_0613,
|
||||||
|
|
||||||
|
# GPT-3.5 16K Context
|
||||||
|
g4f.models.gpt_35_turbo_16k,
|
||||||
|
g4f.models.gpt_35_turbo_16k_0613,
|
||||||
|
|
||||||
|
# GPT-4 8K Context
|
||||||
|
g4f.models.gpt_4,
|
||||||
|
g4f.models.gpt_4_0613,
|
||||||
|
|
||||||
|
# GPT-4 32K Context
|
||||||
|
g4f.models.gpt_4_32k,
|
||||||
|
g4f.models.gpt_4_32k_0613,
|
||||||
|
]
|
||||||
|
|
||||||
|
models_working = []
|
||||||
|
|
||||||
|
for model in models_to_test:
|
||||||
|
if await test(model):
|
||||||
|
models_working.append(model.name)
|
||||||
|
|
||||||
|
print("working models:", models_working)
|
||||||
|
|
||||||
|
|
||||||
|
asyncio.run(start_test())
|
||||||
|
|
@ -7,10 +7,10 @@ import g4f, asyncio
|
||||||
|
|
||||||
print("create:", end=" ", flush=True)
|
print("create:", end=" ", flush=True)
|
||||||
for response in g4f.ChatCompletion.create(
|
for response in g4f.ChatCompletion.create(
|
||||||
model=g4f.models.default,
|
model=g4f.models.gpt_4_32k_0613,
|
||||||
provider=g4f.Provider.GptForLove,
|
provider=g4f.Provider.Aivvm,
|
||||||
messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}],
|
messages=[{"role": "user", "content": "write a poem about a tree"}],
|
||||||
temperature=0.0,
|
temperature=0.1,
|
||||||
stream=True
|
stream=True
|
||||||
):
|
):
|
||||||
print(response, end="", flush=True)
|
print(response, end="", flush=True)
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ import requests
|
||||||
|
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import BaseProvider
|
||||||
from ..typing import CreateResult
|
from ..typing import CreateResult
|
||||||
|
from json import dumps
|
||||||
|
|
||||||
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
||||||
models = {
|
models = {
|
||||||
|
|
@ -35,20 +36,6 @@ class Aivvm(BaseProvider):
|
||||||
elif model not in models:
|
elif model not in models:
|
||||||
raise ValueError(f"Model is not supported: {model}")
|
raise ValueError(f"Model is not supported: {model}")
|
||||||
|
|
||||||
headers = {
|
|
||||||
"accept" : "*/*",
|
|
||||||
"accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7",
|
|
||||||
"content-type" : "application/json",
|
|
||||||
"sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"",
|
|
||||||
"sec-ch-ua-mobile" : "?0",
|
|
||||||
"sec-ch-ua-platform": "\"Bandóz\"",
|
|
||||||
"sec-fetch-dest" : "empty",
|
|
||||||
"sec-fetch-mode" : "cors",
|
|
||||||
"sec-fetch-site" : "same-origin",
|
|
||||||
"Referer" : "https://chat.aivvm.com/",
|
|
||||||
"Referrer-Policy" : "same-origin",
|
|
||||||
}
|
|
||||||
|
|
||||||
json_data = {
|
json_data = {
|
||||||
"model" : models[model],
|
"model" : models[model],
|
||||||
"messages" : messages,
|
"messages" : messages,
|
||||||
|
|
@ -57,12 +44,29 @@ class Aivvm(BaseProvider):
|
||||||
"temperature" : kwargs.get("temperature", 0.7)
|
"temperature" : kwargs.get("temperature", 0.7)
|
||||||
}
|
}
|
||||||
|
|
||||||
response = requests.post(
|
headers = {
|
||||||
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
|
"accept" : "text/event-stream",
|
||||||
|
"accept-language" : "en-US,en;q=0.9",
|
||||||
|
"content-type" : "application/json",
|
||||||
|
"content-length" : str(len(dumps(json_data))),
|
||||||
|
"sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
|
||||||
|
"sec-ch-ua-mobile" : "?0",
|
||||||
|
"sec-ch-ua-platform": "\"Windows\"",
|
||||||
|
"sec-fetch-dest" : "empty",
|
||||||
|
"sec-fetch-mode" : "cors",
|
||||||
|
"sec-fetch-site" : "same-origin",
|
||||||
|
"sec-gpc" : "1",
|
||||||
|
"referrer" : "https://chat.aivvm.com/"
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
for chunk in response.iter_content(chunk_size=None):
|
for chunk in response.iter_content():
|
||||||
yield chunk.decode('utf-8')
|
try:
|
||||||
|
yield chunk.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
yield chunk.decode("unicode-escape")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@property
|
@property
|
||||||
|
|
@ -74,4 +78,4 @@ class Aivvm(BaseProvider):
|
||||||
('temperature', 'float'),
|
('temperature', 'float'),
|
||||||
]
|
]
|
||||||
param = ', '.join([': '.join(p) for p in params])
|
param = ', '.join([': '.join(p) for p in params])
|
||||||
return f'g4f.provider.{cls.__name__} supports: ({param})'
|
return f'g4f.provider.{cls.__name__} supports: ({param})'
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,10 @@ f = function () {
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
async for stream in response.content.iter_any():
|
async for stream in response.content.iter_any():
|
||||||
if stream:
|
if stream:
|
||||||
yield stream.decode()
|
try:
|
||||||
|
yield stream.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
yield stream.decode("unicode-escape")
|
||||||
|
|
||||||
|
|
||||||
def get_api_key(user_agent: str):
|
def get_api_key(user_agent: str):
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@ from .Provider import (
|
||||||
GptGod,
|
GptGod,
|
||||||
AiAsk,
|
AiAsk,
|
||||||
GptGo,
|
GptGo,
|
||||||
|
Aivvm,
|
||||||
Ylokh,
|
Ylokh,
|
||||||
Bard,
|
Bard,
|
||||||
Aibn,
|
Aibn,
|
||||||
|
|
@ -72,7 +73,9 @@ gpt_35_turbo = Model(
|
||||||
gpt_4 = Model(
|
gpt_4 = Model(
|
||||||
name = 'gpt-4',
|
name = 'gpt-4',
|
||||||
base_provider = 'openai',
|
base_provider = 'openai',
|
||||||
best_provider = Bing
|
best_provider = RetryProvider([
|
||||||
|
Aivvm, Bing
|
||||||
|
])
|
||||||
)
|
)
|
||||||
|
|
||||||
# Bard
|
# Bard
|
||||||
|
|
@ -165,26 +168,31 @@ gpt_35_turbo_16k = Model(
|
||||||
|
|
||||||
gpt_35_turbo_16k_0613 = Model(
|
gpt_35_turbo_16k_0613 = Model(
|
||||||
name = 'gpt-3.5-turbo-16k-0613',
|
name = 'gpt-3.5-turbo-16k-0613',
|
||||||
base_provider = 'openai')
|
base_provider = 'openai',
|
||||||
|
best_provider = Aivvm)
|
||||||
|
|
||||||
gpt_35_turbo_0613 = Model(
|
gpt_35_turbo_0613 = Model(
|
||||||
name = 'gpt-3.5-turbo-0613',
|
name = 'gpt-3.5-turbo-0613',
|
||||||
base_provider = 'openai'
|
base_provider = 'openai',
|
||||||
|
best_provider = Aivvm
|
||||||
)
|
)
|
||||||
|
|
||||||
gpt_4_0613 = Model(
|
gpt_4_0613 = Model(
|
||||||
name = 'gpt-4-0613',
|
name = 'gpt-4-0613',
|
||||||
base_provider = 'openai'
|
base_provider = 'openai',
|
||||||
|
best_provider = Aivvm
|
||||||
)
|
)
|
||||||
|
|
||||||
gpt_4_32k = Model(
|
gpt_4_32k = Model(
|
||||||
name = 'gpt-4-32k',
|
name = 'gpt-4-32k',
|
||||||
base_provider = 'openai'
|
base_provider = 'openai',
|
||||||
|
best_provider = Aivvm
|
||||||
)
|
)
|
||||||
|
|
||||||
gpt_4_32k_0613 = Model(
|
gpt_4_32k_0613 = Model(
|
||||||
name = 'gpt-4-32k-0613',
|
name = 'gpt-4-32k-0613',
|
||||||
base_provider = 'openai'
|
base_provider = 'openai',
|
||||||
|
best_provider = Aivvm
|
||||||
)
|
)
|
||||||
|
|
||||||
text_ada_001 = Model(
|
text_ada_001 = Model(
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue