mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Update model list / providers
This commit is contained in:
parent
3703e72073
commit
1994481872
8 changed files with 75 additions and 143 deletions
|
|
@ -9,7 +9,7 @@ from .helper import format_prompt
|
|||
|
||||
class Cohere(AbstractProvider):
|
||||
url = "https://cohereforai-c4ai-command-r-plus.hf.space"
|
||||
working = True
|
||||
working = False
|
||||
supports_gpt_35_turbo = False
|
||||
supports_gpt_4 = False
|
||||
supports_stream = True
|
||||
|
|
|
|||
|
|
@ -16,8 +16,11 @@ class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
supports_message_history = True
|
||||
|
||||
default_model = "gpt-3.5-turbo-0125"
|
||||
models = ["gpt-3.5-turbo-0125", "claude-instant-1.2"]
|
||||
model_aliases = {"gpt-3.5-turbo": "gpt-3.5-turbo-0125"}
|
||||
models = ["gpt-3.5-turbo-0125", "claude-3-haiku-20240307"]
|
||||
model_aliases = {
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
|
||||
"claude-3-haiku": "claude-3-haiku-20240307"
|
||||
}
|
||||
|
||||
status_url = "https://duckduckgo.com/duckchat/v1/status"
|
||||
chat_url = "https://duckduckgo.com/duckchat/v1/chat"
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|||
|
||||
class Llama(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://www.llama2.ai"
|
||||
working = True
|
||||
working = False
|
||||
supports_message_history = True
|
||||
default_model = "meta/meta-llama-3-70b-instruct"
|
||||
models = [
|
||||
|
|
|
|||
|
|
@ -21,11 +21,14 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"related"
|
||||
]
|
||||
model_aliases = {
|
||||
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
|
||||
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
|
||||
"mistralai/Mistral-7B-Instruct-v0.2": "mistral-7b-instruct",
|
||||
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
|
||||
"codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
|
||||
"llava-v1.5-7b": "llava-v1.5-7b-wrapper",
|
||||
'databricks/dbrx-instruct': "dbrx-instruct"
|
||||
"databricks/dbrx-instruct": "dbrx-instruct",
|
||||
"meta-llama/Meta-Llama-3-70B-Instruct": "llama-3-70b-instruct",
|
||||
"meta-llama/Meta-Llama-3-8B-Instruct": "llama-3-8b-instruct"
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from ..providers.types import BaseProvider, ProviderType
|
||||
from ..providers.retry_provider import RetryProvider, IterProvider
|
||||
from ..providers.retry_provider import RetryProvider, IterListProvider
|
||||
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
|
||||
from ..providers.create_images import CreateImagesProvider
|
||||
|
||||
|
|
|
|||
|
|
@ -373,6 +373,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
except NoValidHarFileError as e:
|
||||
if cls._api_key is None and cls.needs_auth:
|
||||
raise e
|
||||
cls._create_request_args()
|
||||
|
||||
if cls.default_model is None:
|
||||
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
|
||||
|
|
@ -420,7 +421,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
**requirements["proofofwork"],
|
||||
user_agent=cls._headers["user-agent"],
|
||||
proofTokens=proofTokens
|
||||
)
|
||||
)
|
||||
if debug.logging:
|
||||
print(
|
||||
'Arkose:', False if not need_arkose else arkose_token[:12]+"...",
|
||||
|
|
|
|||
154
g4f/models.py
154
g4f/models.py
|
|
@ -2,27 +2,27 @@ from __future__ import annotations
|
|||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from .Provider import RetryProvider, ProviderType
|
||||
from .Provider import IterListProvider, ProviderType
|
||||
from .Provider import (
|
||||
Aichatos,
|
||||
Bing,
|
||||
Blackbox,
|
||||
Chatgpt4Online,
|
||||
ChatgptAi,
|
||||
ChatgptNext,
|
||||
Cohere,
|
||||
Cnote,
|
||||
DeepInfra,
|
||||
DuckDuckGo,
|
||||
Ecosia,
|
||||
Feedough,
|
||||
FreeGpt,
|
||||
Gemini,
|
||||
GeminiProChat,
|
||||
GeminiPro,
|
||||
GigaChat,
|
||||
HuggingChat,
|
||||
HuggingFace,
|
||||
Koala,
|
||||
Liaobots,
|
||||
Llama,
|
||||
MetaAI,
|
||||
OpenaiChat,
|
||||
PerplexityLabs,
|
||||
Replicate,
|
||||
|
|
@ -32,7 +32,6 @@ from .Provider import (
|
|||
Reka
|
||||
)
|
||||
|
||||
|
||||
@dataclass(unsafe_hash=True)
|
||||
class Model:
|
||||
"""
|
||||
|
|
@ -55,12 +54,12 @@ class Model:
|
|||
default = Model(
|
||||
name = "",
|
||||
base_provider = "",
|
||||
best_provider = RetryProvider([
|
||||
best_provider = IterListProvider([
|
||||
Bing,
|
||||
ChatgptAi,
|
||||
You,
|
||||
Chatgpt4Online,
|
||||
OpenaiChat
|
||||
OpenaiChat,
|
||||
Ecosia,
|
||||
])
|
||||
)
|
||||
|
||||
|
|
@ -68,11 +67,14 @@ default = Model(
|
|||
gpt_35_long = Model(
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
best_provider = IterListProvider([
|
||||
FreeGpt,
|
||||
You,
|
||||
ChatgptNext,
|
||||
OpenaiChat,
|
||||
Koala,
|
||||
Ecosia,
|
||||
DuckDuckGo,
|
||||
])
|
||||
)
|
||||
|
||||
|
|
@ -80,7 +82,7 @@ gpt_35_long = Model(
|
|||
gpt_35_turbo = Model(
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
best_provider = IterListProvider([
|
||||
FreeGpt,
|
||||
You,
|
||||
ChatgptNext,
|
||||
|
|
@ -95,7 +97,7 @@ gpt_35_turbo = Model(
|
|||
gpt_4 = Model(
|
||||
name = 'gpt-4',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
best_provider = IterListProvider([
|
||||
Bing, Liaobots,
|
||||
])
|
||||
)
|
||||
|
|
@ -103,8 +105,8 @@ gpt_4 = Model(
|
|||
gpt_4o = Model(
|
||||
name = 'gpt-4o',
|
||||
base_provider = 'openai',
|
||||
best_provider = RetryProvider([
|
||||
You
|
||||
best_provider = IterListProvider([
|
||||
You, Liaobots
|
||||
])
|
||||
)
|
||||
|
||||
|
|
@ -120,46 +122,22 @@ gigachat = Model(
|
|||
best_provider = GigaChat
|
||||
)
|
||||
|
||||
gigachat_plus = Model(
|
||||
name = 'GigaChat-Plus',
|
||||
base_provider = 'gigachat',
|
||||
best_provider = GigaChat
|
||||
)
|
||||
|
||||
gigachat_pro = Model(
|
||||
name = 'GigaChat-Pro',
|
||||
base_provider = 'gigachat',
|
||||
best_provider = GigaChat
|
||||
)
|
||||
|
||||
llama2_7b = Model(
|
||||
name = "meta-llama/Llama-2-7b-chat-hf",
|
||||
base_provider = 'meta',
|
||||
best_provider = RetryProvider([Llama, DeepInfra])
|
||||
)
|
||||
|
||||
llama2_13b = Model(
|
||||
name = "meta-llama/Llama-2-13b-chat-hf",
|
||||
base_provider = 'meta',
|
||||
best_provider = RetryProvider([Llama, DeepInfra])
|
||||
)
|
||||
|
||||
llama2_70b = Model(
|
||||
name = "meta-llama/Llama-2-70b-chat-hf",
|
||||
meta = Model(
|
||||
name = "meta",
|
||||
base_provider = "meta",
|
||||
best_provider = RetryProvider([Llama, DeepInfra])
|
||||
best_provider = MetaAI
|
||||
)
|
||||
|
||||
llama3_8b_instruct = Model(
|
||||
name = "meta-llama/Meta-Llama-3-8B-Instruct",
|
||||
base_provider = "meta",
|
||||
best_provider = RetryProvider([Llama, DeepInfra, Replicate])
|
||||
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
|
||||
)
|
||||
|
||||
llama3_70b_instruct = Model(
|
||||
name = "meta-llama/Meta-Llama-3-70B-Instruct",
|
||||
base_provider = "meta",
|
||||
best_provider = RetryProvider([Llama, DeepInfra])
|
||||
best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
|
||||
)
|
||||
|
||||
codellama_34b_instruct = Model(
|
||||
|
|
@ -171,61 +149,30 @@ codellama_34b_instruct = Model(
|
|||
codellama_70b_instruct = Model(
|
||||
name = "codellama/CodeLlama-70b-Instruct-hf",
|
||||
base_provider = "meta",
|
||||
best_provider = RetryProvider([DeepInfra, PerplexityLabs])
|
||||
best_provider = IterListProvider([DeepInfra, PerplexityLabs])
|
||||
)
|
||||
|
||||
# Mistral
|
||||
mixtral_8x7b = Model(
|
||||
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
base_provider = "huggingface",
|
||||
best_provider = RetryProvider([DeepInfra, HuggingFace, PerplexityLabs])
|
||||
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
|
||||
)
|
||||
|
||||
mistral_7b = Model(
|
||||
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
||||
base_provider = "huggingface",
|
||||
best_provider = RetryProvider([HuggingChat, HuggingFace, PerplexityLabs])
|
||||
best_provider = IterListProvider([HuggingChat, HuggingFace, PerplexityLabs])
|
||||
)
|
||||
|
||||
mistral_7b_v02 = Model(
|
||||
name = "mistralai/Mistral-7B-Instruct-v0.2",
|
||||
base_provider = "huggingface",
|
||||
best_provider = DeepInfra
|
||||
)
|
||||
|
||||
mixtral_8x22b = Model(
|
||||
name = "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
||||
base_provider = "huggingface",
|
||||
best_provider = DeepInfra
|
||||
)
|
||||
|
||||
# Misc models
|
||||
dolphin_mixtral_8x7b = Model(
|
||||
name = "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
||||
base_provider = "huggingface",
|
||||
best_provider = DeepInfra
|
||||
)
|
||||
|
||||
lzlv_70b = Model(
|
||||
name = "lizpreciatior/lzlv_70b_fp16_hf",
|
||||
base_provider = "huggingface",
|
||||
best_provider = DeepInfra
|
||||
)
|
||||
|
||||
airoboros_70b = Model(
|
||||
name = "deepinfra/airoboros-70b",
|
||||
base_provider = "huggingface",
|
||||
best_provider = DeepInfra
|
||||
)
|
||||
|
||||
openchat_35 = Model(
|
||||
name = "openchat/openchat_3.5",
|
||||
base_provider = "huggingface",
|
||||
best_provider = DeepInfra
|
||||
best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs])
|
||||
)
|
||||
|
||||
# Bard
|
||||
gemini = bard = palm = Model(
|
||||
gemini = Model(
|
||||
name = 'gemini',
|
||||
base_provider = 'google',
|
||||
best_provider = Gemini
|
||||
|
|
@ -234,7 +181,7 @@ gemini = bard = palm = Model(
|
|||
claude_v2 = Model(
|
||||
name = 'claude-v2',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = RetryProvider([Vercel])
|
||||
best_provider = IterListProvider([Vercel])
|
||||
)
|
||||
|
||||
claude_3_opus = Model(
|
||||
|
|
@ -249,6 +196,12 @@ claude_3_sonnet = Model(
|
|||
best_provider = You
|
||||
)
|
||||
|
||||
claude_3_haiku = Model(
|
||||
name = 'claude-3-haiku',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = DuckDuckGo
|
||||
)
|
||||
|
||||
gpt_35_turbo_16k = Model(
|
||||
name = 'gpt-3.5-turbo-16k',
|
||||
base_provider = 'openai',
|
||||
|
|
@ -288,7 +241,7 @@ gpt_4_32k_0613 = Model(
|
|||
gemini_pro = Model(
|
||||
name = 'gemini-pro',
|
||||
base_provider = 'google',
|
||||
best_provider = RetryProvider([GeminiProChat, You])
|
||||
best_provider = IterListProvider([GeminiPro, You])
|
||||
)
|
||||
|
||||
pi = Model(
|
||||
|
|
@ -300,13 +253,13 @@ pi = Model(
|
|||
dbrx_instruct = Model(
|
||||
name = 'databricks/dbrx-instruct',
|
||||
base_provider = 'mistral',
|
||||
best_provider = RetryProvider([DeepInfra, PerplexityLabs])
|
||||
best_provider = IterListProvider([DeepInfra, PerplexityLabs])
|
||||
)
|
||||
|
||||
command_r_plus = Model(
|
||||
name = 'CohereForAI/c4ai-command-r-plus',
|
||||
base_provider = 'mistral',
|
||||
best_provider = RetryProvider([HuggingChat, Cohere])
|
||||
best_provider = IterListProvider([HuggingChat])
|
||||
)
|
||||
|
||||
blackbox = Model(
|
||||
|
|
@ -334,9 +287,8 @@ class ModelUtils:
|
|||
'gpt-3.5-turbo-0613' : gpt_35_turbo_0613,
|
||||
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
|
||||
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
|
||||
|
||||
'gpt-3.5-long': gpt_35_long,
|
||||
|
||||
|
||||
# gpt-4
|
||||
'gpt-4o' : gpt_4o,
|
||||
'gpt-4' : gpt_4,
|
||||
|
|
@ -345,52 +297,38 @@ class ModelUtils:
|
|||
'gpt-4-32k-0613' : gpt_4_32k_0613,
|
||||
'gpt-4-turbo' : gpt_4_turbo,
|
||||
|
||||
# Llama
|
||||
'llama2-7b' : llama2_7b,
|
||||
'llama2-13b': llama2_13b,
|
||||
'llama2-70b': llama2_70b,
|
||||
|
||||
'llama3-8b' : llama3_8b_instruct, # alias
|
||||
"meta-ai": meta,
|
||||
'llama3-8b': llama3_8b_instruct, # alias
|
||||
'llama3-70b': llama3_70b_instruct, # alias
|
||||
'llama3-8b-instruct' : llama3_8b_instruct,
|
||||
'llama3-70b-instruct': llama3_70b_instruct,
|
||||
|
||||
|
||||
'codellama-34b-instruct': codellama_34b_instruct,
|
||||
'codellama-70b-instruct': codellama_70b_instruct,
|
||||
|
||||
# GigaChat
|
||||
'gigachat' : gigachat,
|
||||
'gigachat_plus': gigachat_plus,
|
||||
'gigachat_pro' : gigachat_pro,
|
||||
|
||||
# Mistral Opensource
|
||||
'mixtral-8x7b': mixtral_8x7b,
|
||||
'mistral-7b': mistral_7b,
|
||||
'mistral-7b-v02': mistral_7b_v02,
|
||||
'mixtral-8x22b': mixtral_8x22b,
|
||||
'dolphin-mixtral-8x7b': dolphin_mixtral_8x7b,
|
||||
|
||||
|
||||
# google gemini
|
||||
'gemini': gemini,
|
||||
'gemini-pro': gemini_pro,
|
||||
|
||||
|
||||
# anthropic
|
||||
'claude-v2': claude_v2,
|
||||
'claude-3-opus': claude_3_opus,
|
||||
'claude-3-sonnet': claude_3_sonnet,
|
||||
|
||||
'claude-3-haiku': claude_3_haiku,
|
||||
|
||||
# reka core
|
||||
'reka-core': reka_core,
|
||||
'reka': reka_core,
|
||||
'Reka Core': reka_core,
|
||||
|
||||
|
||||
# other
|
||||
'blackbox': blackbox,
|
||||
'command-r+': command_r_plus,
|
||||
'dbrx-instruct': dbrx_instruct,
|
||||
'lzlv-70b': lzlv_70b,
|
||||
'airoboros-70b': airoboros_70b,
|
||||
'openchat_3.5': openchat_35,
|
||||
'gigachat': gigachat,
|
||||
'pi': pi
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,11 +4,11 @@ import asyncio
|
|||
import random
|
||||
|
||||
from ..typing import Type, List, CreateResult, Messages, Iterator, AsyncResult
|
||||
from .types import BaseProvider, BaseRetryProvider
|
||||
from .types import BaseProvider, BaseRetryProvider, ProviderType
|
||||
from .. import debug
|
||||
from ..errors import RetryProviderError, RetryNoProviderError
|
||||
|
||||
class NewBaseRetryProvider(BaseRetryProvider):
|
||||
class IterListProvider(BaseRetryProvider):
|
||||
def __init__(
|
||||
self,
|
||||
providers: List[Type[BaseProvider]],
|
||||
|
|
@ -45,21 +45,17 @@ class NewBaseRetryProvider(BaseRetryProvider):
|
|||
Raises:
|
||||
Exception: Any exception encountered during the completion process.
|
||||
"""
|
||||
providers = [p for p in self.providers if stream and p.supports_stream] if stream else self.providers
|
||||
if self.shuffle:
|
||||
random.shuffle(providers)
|
||||
|
||||
exceptions = {}
|
||||
started: bool = False
|
||||
|
||||
for provider in providers:
|
||||
for provider in self.get_providers(stream):
|
||||
self.last_provider = provider
|
||||
try:
|
||||
if debug.logging:
|
||||
print(f"Using {provider.__name__} provider")
|
||||
for token in provider.create_completion(model, messages, stream, **kwargs):
|
||||
yield token
|
||||
started = True
|
||||
started = True
|
||||
if started:
|
||||
return
|
||||
except Exception as e:
|
||||
|
|
@ -87,13 +83,9 @@ class NewBaseRetryProvider(BaseRetryProvider):
|
|||
Raises:
|
||||
Exception: Any exception encountered during the asynchronous completion process.
|
||||
"""
|
||||
providers = self.providers
|
||||
if self.shuffle:
|
||||
random.shuffle(providers)
|
||||
|
||||
exceptions = {}
|
||||
|
||||
for provider in providers:
|
||||
for provider in self.get_providers(False):
|
||||
self.last_provider = provider
|
||||
try:
|
||||
if debug.logging:
|
||||
|
|
@ -109,8 +101,8 @@ class NewBaseRetryProvider(BaseRetryProvider):
|
|||
|
||||
raise_exceptions(exceptions)
|
||||
|
||||
def get_providers(self, stream: bool):
|
||||
providers = [p for p in self.providers if stream and p.supports_stream] if stream else self.providers
|
||||
def get_providers(self, stream: bool) -> list[ProviderType]:
|
||||
providers = [p for p in self.providers if p.supports_stream] if stream else self.providers
|
||||
if self.shuffle:
|
||||
random.shuffle(providers)
|
||||
return providers
|
||||
|
|
@ -138,7 +130,7 @@ class NewBaseRetryProvider(BaseRetryProvider):
|
|||
else:
|
||||
for token in provider.create_completion(model, messages, stream, **kwargs):
|
||||
yield token
|
||||
started = True
|
||||
started = True
|
||||
if started:
|
||||
return
|
||||
except Exception as e:
|
||||
|
|
@ -150,7 +142,7 @@ class NewBaseRetryProvider(BaseRetryProvider):
|
|||
|
||||
raise_exceptions(exceptions)
|
||||
|
||||
class RetryProvider(NewBaseRetryProvider):
|
||||
class RetryProvider(IterListProvider):
|
||||
def __init__(
|
||||
self,
|
||||
providers: List[Type[BaseProvider]],
|
||||
|
|
@ -188,11 +180,10 @@ class RetryProvider(NewBaseRetryProvider):
|
|||
Raises:
|
||||
Exception: Any exception encountered during the completion process.
|
||||
"""
|
||||
providers = self.get_providers(stream)
|
||||
if self.single_provider_retry and len(providers) == 1:
|
||||
if self.single_provider_retry:
|
||||
exceptions = {}
|
||||
started: bool = False
|
||||
provider = providers[0]
|
||||
provider = self.providers[0]
|
||||
self.last_provider = provider
|
||||
for attempt in range(self.max_retries):
|
||||
try:
|
||||
|
|
@ -200,7 +191,7 @@ class RetryProvider(NewBaseRetryProvider):
|
|||
print(f"Using {provider.__name__} provider (attempt {attempt + 1})")
|
||||
for token in provider.create_completion(model, messages, stream, **kwargs):
|
||||
yield token
|
||||
started = True
|
||||
started = True
|
||||
if started:
|
||||
return
|
||||
except Exception as e:
|
||||
|
|
@ -229,14 +220,10 @@ class RetryProvider(NewBaseRetryProvider):
|
|||
Raises:
|
||||
Exception: Any exception encountered during the asynchronous completion process.
|
||||
"""
|
||||
providers = self.providers
|
||||
if self.shuffle:
|
||||
random.shuffle(providers)
|
||||
|
||||
exceptions = {}
|
||||
|
||||
if self.single_provider_retry and len(providers) == 1:
|
||||
provider = providers[0]
|
||||
if self.single_provider_retry:
|
||||
provider = self.providers[0]
|
||||
self.last_provider = provider
|
||||
for attempt in range(self.max_retries):
|
||||
try:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue