Comprehensive Update: New Providers, Model Enhancements, and Functionality Improvements

This commit is contained in:
kqlio67 2024-07-25 09:21:55 +03:00
parent f3f3ed5e9c
commit 29c13e26cd
15 changed files with 486 additions and 161 deletions

71
g4f/Provider/Allyfy.py Normal file
View file

@ -0,0 +1,71 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class Allyfy(AsyncGeneratorProvider):
url = "https://chatbot.allyfy.chat"
api_endpoint = "/api/v1/message/stream/super/chat"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json;charset=utf-8",
"dnt": "1",
"origin": "https://www.allyfy.chat",
"priority": "u=1, i",
"referer": "https://www.allyfy.chat/",
"referrer": "https://www.allyfy.chat",
'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"messages": [{"content": prompt, "role": "user"}],
"content": prompt,
"baseInfo": {
"clientId": "q08kdrde1115003lyedfoir6af0yy531",
"pid": "38281",
"channelId": "100000",
"locale": "en-US",
"localZone": 180,
"packageName": "com.cch.allyfy.webh",
}
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = []
async for line in response.content:
line = line.decode().strip()
if line.startswith("data:"):
data_content = line[5:]
if data_content == "[DONE]":
break
try:
json_data = json.loads(data_content)
if "content" in json_data:
full_response.append(json_data["content"])
except json.JSONDecodeError:
continue
yield "".join(full_response)

75
g4f/Provider/ChatGot.py Normal file
View file

@ -0,0 +1,75 @@
from __future__ import annotations
import time
from hashlib import sha256
from aiohttp import BaseConnector, ClientSession
from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ChatGot(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
working = True
supports_message_history = True
default_model = 'gemini-pro'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs,
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Connection": "keep-alive",
"TE": "trailers",
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages": [
{
"role": "model" if message["role"] == "assistant" else "user",
"parts": [{"text": message["content"]}],
}
for message in messages
],
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(
f"{cls.url}/api/generate", json=data, proxy=proxy
) as response:
if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(
f"Response {response.status}: Rate limit reached"
)
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")
def generate_signature(time: int, text: str, secret: str = ""):
message = f"{time}:{text}:{secret}"
return sha256(message.encode()).hexdigest()

View file

@ -1,22 +1,18 @@
from __future__ import annotations
import re
import json
from aiohttp import ClientSession
from ..typing import Messages, AsyncResult
from ..requests import get_args_from_browser
from ..webdriver import WebDriver
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string
from .helper import format_prompt
class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org"
supports_message_history = True
supports_gpt_35_turbo = True
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
working = True
_wpnonce = None
_context_id = None
supports_gpt_4 = True
@classmethod
async def create_async_generator(
@ -24,49 +20,52 @@ class Chatgpt4Online(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
webdriver: WebDriver = None,
**kwargs
) -> AsyncResult:
args = get_args_from_browser(f"{cls.url}/chat/", webdriver, proxy=proxy)
async with ClientSession(**args) as session:
if not cls._wpnonce:
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
result = re.search(r'restNonce":"(.*?)"', response)
if result:
cls._wpnonce = result.group(1)
else:
raise RuntimeError("No nonce found")
result = re.search(r'contextId":(.*?),', response)
if result:
cls._context_id = result.group(1)
else:
raise RuntimeError("No contextId found")
data = {
"botId":"default",
"customId":None,
"session":"N/A",
"chatId":get_random_string(11),
"contextId":cls._context_id,
"messages":messages[:-1],
"newMessage":messages[-1]["content"],
"newImageId":None,
"stream":True
headers = {
"accept": "text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"priority": "u=1, i",
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"x-wp-nonce": "d9505e9877",
}
async with session.post(
f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
json=data,
proxy=proxy,
headers={"x-wp-nonce": cls._wpnonce}
) as response:
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"botId": "default",
"newMessage": prompt,
"stream": True,
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:])
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
full_response = ""
async for chunk in response.content.iter_any():
if chunk:
try:
# Extract the JSON object from the chunk
for line in chunk.decode().splitlines():
if line.startswith("data: "):
json_data = json.loads(line[6:])
if json_data["type"] == "live":
full_response += json_data["data"]
elif json_data["type"] == "end":
final_data = json.loads(json_data["data"])
full_response = final_data["reply"]
break
except json.JSONDecodeError:
continue
yield full_response

View file

@ -13,10 +13,10 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.chatgot.one/"
url = "https://gemini-pro.chat/"
working = True
supports_message_history = True
default_model = ''
default_model = 'gemini-pro'
@classmethod
async def create_async_generator(

View file

@ -13,8 +13,9 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
supports_stream = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',

View file

@ -14,16 +14,17 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
needs_auth = True
supports_message_history = True
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
models = [
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
'CohereForAI/c4ai-command-r-plus',
'meta-llama/Meta-Llama-3-70B-Instruct',
'mistralai/Mixtral-8x7B-Instruct-v0.1',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
'01-ai/Yi-1.5-34B-Chat',
'mistralai/Mistral-7B-Instruct-v0.2',
'microsoft/Phi-3-mini-4k-instruct',
]
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@classmethod
async def create_async_generator(

View file

@ -10,14 +10,23 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5-Turbo",
"gpt-4o-mini-free": {
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 48000,
"tokenLimit": 14000,
"context": "16K",
"maxLength": 31200,
"tokenLimit": 7800,
"context": "8K",
},
"gpt-4o-mini": {
"id": "gpt-4o-mini",
"name": "GPT-4o-Mini",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
"gpt-4o-free": {
"context": "8K",
@ -91,6 +100,15 @@ models = {
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-5-sonnet-20240620": {
"id": "claude-3-5-sonnet-20240620",
"name": "Claude-3.5-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-haiku-20240307": {
"id": "claude-3-haiku-20240307",
"name": "Claude-3-Haiku",
@ -155,10 +173,21 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = "gpt-3.5-turbo"
default_model = "gpt-4o"
models = list(models.keys())
model_aliases = {
"claude-v2": "claude-2.0"
"gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-free",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-opus": "claude-3-opus-20240229-aws",
"claude-3-opus": "claude-3-opus-20240229-gcp",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3-haiku": "claude-3-haiku-20240307",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gemini-pro": "gemini-1.5-pro-latest",
"gemini-pro": "gemini-1.0-pro-latest",
"gemini-flash": "gemini-1.5-flash-latest",
}
_auth_code = ""
_cookie_jar = None

View file

@ -15,21 +15,8 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = "mixtral-8x7b-instruct"
models = [
"llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat",
"dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct",
"llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it",
"related"
"llama-3-sonar-large-32k-online", "llama-3-sonar-small-32k-online", "llama-3-sonar-large-32k-chat", "llama-3-sonar-small-32k-chat", "llama-3-8b-instruct", "llama-3-70b-instruct", "gemma-2-9b-it", "gemma-2-27b-it", "nemotron-4-340b-instruct", "mixtral-8x7b-instruct",
]
model_aliases = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
"mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
"codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
"llava-v1.5-7b": "llava-v1.5-7b-wrapper",
"databricks/dbrx-instruct": "dbrx-instruct",
"meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
"meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
}
@classmethod
async def create_async_generator(

View file

@ -11,6 +11,7 @@ class Pi(AbstractProvider):
working = True
supports_stream = True
_session = None
default_model = "pi"
@classmethod
def create_completion(

View file

@ -14,40 +14,46 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
parent = "Replicate"
working = True
default_model = 'stability-ai/sdxl'
default_model = 'stability-ai/stable-diffusion-3'
models = [
# image
'stability-ai/sdxl',
'ai-forever/kandinsky-2.2',
# Models for image generation
'stability-ai/stable-diffusion-3',
'bytedance/sdxl-lightning-4step',
'playgroundai/playground-v2.5-1024px-aesthetic',
# text
'meta/llama-2-70b-chat',
'mistralai/mistral-7b-instruct-v0.2'
# Models for image generation
'meta/meta-llama-3-70b-instruct',
'mistralai/mixtral-8x7b-instruct-v0.1',
'google-deepmind/gemma-2b-it',
]
versions = {
# image
'stability-ai/sdxl': [
"39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b",
"2b017d9b67edd2ee1401238df49d75da53c523f36e363881e057f5dc3ed3c5b2",
"7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc"
# Model versions for generating images
'stability-ai/stable-diffusion-3': [
"527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
],
'ai-forever/kandinsky-2.2': [
"ad9d7879fbffa2874e1d909d1d37d9bc682889cc65b31f7bb00d2362619f194a"
'bytedance/sdxl-lightning-4step': [
"5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
],
'playgroundai/playground-v2.5-1024px-aesthetic': [
"a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
],
# Text
'meta/llama-2-70b-chat': [
"dp-542693885b1777c98ef8c5a98f2005e7"
# Model versions for text generation
'meta/meta-llama-3-70b-instruct': [
"dp-cf04fe09351e25db628e8b6181276547"
],
'mistralai/mistral-7b-instruct-v0.2': [
'mistralai/mixtral-8x7b-instruct-v0.1': [
"dp-89e00f489d498885048e94f9809fbc76"
],
'google-deepmind/gemma-2b-it': [
"dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
]
}
image_models = {"stability-ai/sdxl", "ai-forever/kandinsky-2.2"}
text_models = {"meta/llama-2-70b-chat", "mistralai/mistral-7b-instruct-v0.2"}
image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
@classmethod
async def create_async_generator(

View file

@ -24,27 +24,27 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image_models = ["dall-e"]
models = [
default_model,
"gpt-4o-mini",
"gpt-4o",
"gpt-4",
"gpt-4-turbo",
"claude-instant",
"claude-2",
"gpt-4",
"claude-3.5-sonnet",
"claude-3-opus",
"claude-3-sonnet",
"claude-3-haiku",
"gemini-pro",
"claude-2",
"llama-3.1-70b",
"llama-3",
"gemini-1-5-flash",
"gemini-1-5-pro",
"gemini-1-0-pro",
"databricks-dbrx-instruct",
"command-r",
"command-r-plus",
"llama3",
"zephyr",
"dolphin-2.5",
default_vision_model,
*image_models
]
model_aliases = {
"claude-v2": "claude-2",
}
_cookies = None
_cookies_used = 0
_telemetry_ids = []

View file

@ -11,10 +11,12 @@ from .selenium import *
from .needs_auth import *
from .AI365VIP import AI365VIP
from .Allyfy import Allyfy
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Blackbox import Blackbox
from .ChatGot import ChatGot
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptFree import ChatgptFree

View file

@ -16,6 +16,7 @@ class Openai(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
supports_message_history = True
supports_system_message = True
default_model = ""
@classmethod
async def create_async_generator(

View file

@ -61,7 +61,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
default_model = None
default_vision_model = "gpt-4o"
models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "auto"]
models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo", "gpt-4o", "gpt-4o-mini", "auto"]
model_aliases = {
"text-davinci-002-render-sha": "gpt-3.5-turbo",
"": "gpt-3.5-turbo",

View file

@ -5,9 +5,12 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
AI365VIP,
Allyfy,
Bing,
Blackbox,
ChatGot,
Chatgpt4o,
Chatgpt4Online,
ChatgptFree,
DDG,
DeepInfra,
@ -84,6 +87,7 @@ gpt_35_long = Model(
DDG,
AI365VIP,
Pizzagpt,
Allyfy,
])
)
@ -107,6 +111,7 @@ gpt_35_turbo = Model(
DDG,
AI365VIP,
Pizzagpt,
Allyfy,
])
)
@ -133,7 +138,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = IterListProvider([
Bing, Liaobots,
Bing, Liaobots, Chatgpt4Online
])
)
@ -165,7 +170,15 @@ gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'openai',
best_provider = IterListProvider([
You, Liaobots, Chatgpt4o, AI365VIP
You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat
])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'openai',
best_provider = IterListProvider([
Liaobots, OpenaiChat, You,
])
)
@ -185,12 +198,6 @@ meta = Model(
best_provider = MetaAI
)
llama_2_70b_chat = Model(
name = "meta/llama-2-70b-chat",
base_provider = "meta",
best_provider = IterListProvider([ReplicateHome])
)
llama3_8b_instruct = Model(
name = "meta-llama/Meta-Llama-3-8B-Instruct",
base_provider = "meta",
@ -200,7 +207,19 @@ llama3_8b_instruct = Model(
llama3_70b_instruct = Model(
name = "meta-llama/Meta-Llama-3-70B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, HuggingChat, DDG])
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate, DDG, ReplicateHome])
)
llama_3_1_70b_Instruct = Model(
name = "meta-llama/Meta-Llama-3.1-70B-Instruct",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
llama_3_1_405b_Instruct_FP8 = Model(
name = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
base_provider = "meta",
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
codellama_34b_instruct = Model(
@ -220,13 +239,13 @@ codellama_70b_instruct = Model(
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG])
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG, ReplicateHome])
)
mistral_7b_v02 = Model(
name = "mistralai/Mistral-7B-Instruct-v0.2",
base_provider = "huggingface",
best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat, ReplicateHome])
best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat])
)
@ -265,10 +284,22 @@ gemini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google',
best_provider = IterListProvider([GeminiPro, You, GeminiProChat])
best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots])
)
gemini_flash = Model(
name = 'gemini-flash',
base_provider = 'Google',
best_provider = IterListProvider([Liaobots])
)
# gemma
gemma_2b_it = Model(
name = 'gemma-2b-it',
base_provider = 'Google',
best_provider = IterListProvider([ReplicateHome])
)
gemma_2_9b_it = Model(
name = 'gemma-2-9b-it',
base_provider = 'Google',
@ -283,28 +314,46 @@ gemma_2_27b_it = Model(
### Anthropic ###
claude_v2 = Model(
name = 'claude-v2',
base_provider = 'anthropic',
best_provider = IterListProvider([Vercel])
claude_2 = Model(
name = 'claude-2',
base_provider = 'Anthropic',
best_provider = IterListProvider([You])
)
claude_2_0 = Model(
name = 'claude-2.0',
base_provider = 'Anthropic',
best_provider = IterListProvider([Liaobots])
)
claude_2_1 = Model(
name = 'claude-2.1',
base_provider = 'Anthropic',
best_provider = IterListProvider([Liaobots])
)
claude_3_opus = Model(
name = 'claude-3-opus',
base_provider = 'anthropic',
best_provider = You
base_provider = 'Anthropic',
best_provider = IterListProvider([You, Liaobots])
)
claude_3_sonnet = Model(
name = 'claude-3-sonnet',
base_provider = 'anthropic',
best_provider = You
base_provider = 'Anthropic',
best_provider = IterListProvider([You, Liaobots])
)
claude_3_5_sonnet = Model(
name = 'claude-3-5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Liaobots])
)
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'anthropic',
best_provider = IterListProvider([DDG, AI365VIP])
base_provider = 'Anthropic',
best_provider = IterListProvider([DDG, AI365VIP, Liaobots])
)
@ -348,6 +397,58 @@ command_r_plus = Model(
)
### iFlytek ###
SparkDesk_v1_1 = Model(
name = 'SparkDesk-v1.1',
base_provider = 'iFlytek',
best_provider = IterListProvider([FreeChatgpt])
)
### DeepSeek ###
deepseek_coder = Model(
name = 'deepseek-coder',
base_provider = 'DeepSeek',
best_provider = IterListProvider([FreeChatgpt])
)
deepseek_chat = Model(
name = 'deepseek-chat',
base_provider = 'DeepSeek',
best_provider = IterListProvider([FreeChatgpt])
)
### Qwen ###
Qwen2_7B_Instruct = Model(
name = 'Qwen2-7B-Instruct',
base_provider = 'Qwen',
best_provider = IterListProvider([FreeChatgpt])
)
### Zhipu AI ###
glm4_9B_chat = Model(
name = 'glm4-9B-chat',
base_provider = 'Zhipu AI',
best_provider = IterListProvider([FreeChatgpt])
)
chatglm3_6B = Model(
name = 'chatglm3-6B',
base_provider = 'Zhipu AI',
best_provider = IterListProvider([FreeChatgpt])
)
### 01-ai ###
Yi_1_5_9B_Chat = Model(
name = 'Yi-1.5-9B-Chat',
base_provider = '01-ai',
best_provider = IterListProvider([FreeChatgpt])
)
### Other ###
pi = Model(
name = 'pi',
@ -364,14 +465,27 @@ pi = Model(
sdxl = Model(
name = 'stability-ai/sdxl',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
best_provider = IterListProvider([DeepInfraImage])
)
### AI Forever ###
kandinsky_2_2 = Model(
name = 'ai-forever/kandinsky-2.2',
base_provider = 'AI Forever',
stable_diffusion_3 = Model(
name = 'stability-ai/stable-diffusion-3',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
sdxl_lightning_4step = Model(
name = 'bytedance/sdxl-lightning-4step',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
playground_v2_5_1024px_aesthetic = Model(
name = 'playgroundai/playground-v2.5-1024px-aesthetic',
base_provider = 'Stability AI',
best_provider = IterListProvider([ReplicateHome])
)
@ -400,6 +514,7 @@ class ModelUtils:
# gpt-4
'gpt-4o' : gpt_4o,
'gpt-4o-mini' : gpt_4o_mini,
'gpt-4' : gpt_4,
'gpt-4-0613' : gpt_4_0613,
'gpt-4-32k' : gpt_4_32k,
@ -410,11 +525,13 @@ class ModelUtils:
### Meta ###
"meta-ai": meta,
'llama-2-70b-chat': llama_2_70b_chat,
'llama3-8b': llama3_8b_instruct, # alias
'llama3-70b': llama3_70b_instruct, # alias
'llama3-8b-instruct' : llama3_8b_instruct,
'llama3-70b-instruct': llama3_70b_instruct,
'llama-3.1-70b-Instruct': llama_3_1_70b_Instruct,
'llama-3.1-405B-Instruct-FP8': llama_3_1_405b_Instruct_FP8,
'codellama-34b-instruct': codellama_34b_instruct,
'codellama-70b-instruct': codellama_70b_instruct,
@ -441,16 +558,21 @@ class ModelUtils:
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
'gemini-flash': gemini_flash,
# gemma
'gemma-2b-it': gemma_2b_it,
'gemma-2-9b-it': gemma_2_9b_it,
'gemma-2-27b-it': gemma_2_27b_it,
### Anthropic ###
'claude-v2': claude_v2,
'claude-2': claude_2,
'claude-2.0': claude_2_0,
'claude-2.1': claude_2_1,
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-5-sonnet': claude_3_5_sonnet,
'claude-3-haiku': claude_3_haiku,
@ -478,6 +600,28 @@ class ModelUtils:
'gigachat': gigachat,
### iFlytek ###
'SparkDesk-v1.1': SparkDesk_v1_1,
### DeepSeek ###
'deepseek-coder': deepseek_coder,
'deepseek-chat': deepseek_chat,
### ### Qwen ### ###
'Qwen2-7B-Instruct': Qwen2_7B_Instruct,
### Zhipu AI ###
'glm4-9B-chat': glm4_9B_chat,
'chatglm3-6B': chatglm3_6B,
### 01-ai ###
'Yi-1.5-9B-Chat': Yi_1_5_9B_Chat,
# Other
'pi': pi,
@ -489,9 +633,17 @@ class ModelUtils:
### Stability AI ###
'sdxl': sdxl,
'stable-diffusion-3': stable_diffusion_3,
### ByteDance ###
'sdxl-lightning-4step': sdxl_lightning_4step,
### ByteDance ###
'sdxl-lightning-4step': sdxl_lightning_4step,
### Playground ###
'playground-v2.5-1024px-aesthetic': playground_v2_5_1024px_aesthetic,
### AI Forever ###
'kandinsky-2.2': kandinsky_2_2,
}
_all_models = list(ModelUtils.convert.keys())