mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
- Changed default model in commit.py from "gpt-4o" to "claude-3.7-sonnet" - Fixed ARTA provider by adding proper auth token handling and form data submission - Updated Blackbox provider to use OpenRouter models instead of premium models - Improved DDG provider with simplified authentication and better error handling - Updated DeepInfraChat provider with new models and aliases - Removed non-working providers: Goabror, Jmuz, OIVSCode, AllenAI, ChatGptEs, FreeRouter, Glider - Moved non-working providers to the not_working directory - Added BlackboxPro provider in needs_auth directory with premium model support - Updated Liaobots provider with new models and improved authentication - Renamed Microsoft_Phi_4 to Microsoft_Phi_4_Multimodal for clarity - Updated LambdaChat provider with direct API implementation instead of HuggingChat - Updated models.py with new model definitions and provider mappings - Removed BlackForestLabs_Flux1Schnell from HuggingSpace providers - Updated model aliases across multiple providers for better compatibility - Fixed Dynaspark provider endpoint URL to prevent spam detection
197 lines
9.7 KiB
Python
197 lines
9.7 KiB
Python
from __future__ import annotations
|
|
|
|
from ..typing import AsyncResult, Messages, MediaListType
|
|
from ..errors import ModelNotFoundError
|
|
from ..providers.retry_provider import IterListProvider
|
|
from ..image import is_data_an_audio
|
|
from ..providers.response import JsonConversation, ProviderInfo
|
|
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
|
|
from ..Provider.hf_space import HuggingSpace
|
|
from .. import Provider
|
|
from .. import models
|
|
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
|
|
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
|
|
class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
|
default_model = "default"
|
|
working = True
|
|
|
|
@classmethod
|
|
def get_models(cls, ignored: list[str] = []) -> list[str]:
|
|
if not cls.models:
|
|
cls.audio_models = {}
|
|
cls.image_models = []
|
|
cls.vision_models = []
|
|
cls.video_models = []
|
|
model_with_providers = {
|
|
model: [
|
|
provider for provider in providers
|
|
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored
|
|
] for model, (_, providers) in models.__models__.items()
|
|
}
|
|
model_with_providers = {
|
|
model: providers for model, providers in model_with_providers.items()
|
|
if providers
|
|
}
|
|
cls.models_count = {
|
|
model: len(providers) for model, providers in model_with_providers.items() if len(providers) > 1
|
|
}
|
|
all_models = [cls.default_model] + list(model_with_providers.keys())
|
|
for provider in [OpenaiChat, PollinationsAI, HuggingSpace, Cloudflare, PerplexityLabs, Gemini, Grok]:
|
|
if not provider.working or getattr(provider, "parent", provider.__name__) in ignored:
|
|
continue
|
|
if provider == PollinationsAI:
|
|
all_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model not in all_models])
|
|
cls.audio_models.update({f"{provider.__name__}:{model}": [] for model in provider.get_models() if model in provider.audio_models})
|
|
cls.image_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.image_models])
|
|
cls.vision_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.vision_models])
|
|
else:
|
|
all_models.extend(provider.get_models())
|
|
cls.image_models.extend(provider.image_models)
|
|
cls.vision_models.extend(provider.vision_models)
|
|
cls.video_models.extend(provider.video_models)
|
|
if CopilotAccount.working and CopilotAccount.parent not in ignored:
|
|
all_models.extend(list(CopilotAccount.model_aliases.keys()))
|
|
if PollinationsAI.working and PollinationsAI.__name__ not in ignored:
|
|
all_models.extend(list(PollinationsAI.model_aliases.keys()))
|
|
def clean_name(name: str) -> str:
|
|
return name.split("/")[-1].split(":")[0].lower(
|
|
).replace("-instruct", ""
|
|
).replace("-chat", ""
|
|
).replace("-08-2024", ""
|
|
).replace("-03-2025", ""
|
|
).replace("-20250219", ""
|
|
).replace("-20241022", ""
|
|
).replace("-20240904", ""
|
|
).replace("-2025-04-16", ""
|
|
).replace("-2025-04-14", ""
|
|
).replace("-0125", ""
|
|
).replace("-2407", ""
|
|
).replace("-2501", ""
|
|
).replace("-0324", ""
|
|
).replace("-2409", ""
|
|
).replace("-2410", ""
|
|
).replace("-2411", ""
|
|
).replace("-1119", ""
|
|
).replace("-0919", ""
|
|
).replace("-02-24", ""
|
|
).replace("-03-25", ""
|
|
).replace("-03-26", ""
|
|
).replace("-01-21", ""
|
|
).replace("-002", ""
|
|
).replace("_", "."
|
|
).replace("c4ai-", ""
|
|
).replace("-preview", ""
|
|
).replace("-experimental", ""
|
|
).replace("-v1", ""
|
|
).replace("-fp8", ""
|
|
).replace("-bf16", ""
|
|
).replace("-hf", ""
|
|
).replace("llama3", "llama-3")
|
|
for provider in [HarProvider, LambdaChat, DeepInfraChat]:
|
|
if not provider.working or getattr(provider, "parent", provider.__name__) in ignored:
|
|
continue
|
|
model_map = {clean_name(model): model for model in provider.get_models()}
|
|
if not provider.model_aliases:
|
|
provider.model_aliases = {}
|
|
provider.model_aliases.update(model_map)
|
|
all_models.extend(list(model_map.keys()))
|
|
cls.image_models.extend([clean_name(model) for model in provider.image_models])
|
|
cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
|
|
cls.video_models.extend([clean_name(model) for model in provider.video_models])
|
|
for provider in [Microsoft_Phi_4_Multimodal, PollinationsAI]:
|
|
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
|
|
cls.audio_models.update(provider.audio_models)
|
|
cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)})
|
|
cls.models = list(dict.fromkeys([model if model else cls.default_model for model in all_models]))
|
|
return cls.models
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
stream: bool = True,
|
|
media: MediaListType = None,
|
|
ignored: list[str] = [],
|
|
conversation: JsonConversation = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
cls.get_models(ignored=ignored)
|
|
providers = []
|
|
if model and ":" in model:
|
|
providers = model.split(":")
|
|
model = providers.pop()
|
|
providers = [getattr(Provider, provider) for provider in providers]
|
|
elif not model or model == cls.default_model:
|
|
model = ""
|
|
has_image = False
|
|
has_audio = False
|
|
if not has_audio and media is not None:
|
|
for media_data, filename in media:
|
|
if is_data_an_audio(media_data, filename):
|
|
has_audio = True
|
|
break
|
|
has_image = True
|
|
if "audio" in kwargs or "audio" in kwargs.get("modalities", []):
|
|
providers = [PollinationsAI, EdgeTTS, gTTS]
|
|
elif has_audio:
|
|
providers = [PollinationsAI, Microsoft_Phi_4_Multimodal, MarkItDown]
|
|
elif has_image:
|
|
providers = models.default_vision.best_provider.providers
|
|
else:
|
|
providers = models.default.best_provider.providers
|
|
elif model in Provider.__map__:
|
|
provider = Provider.__map__[model]
|
|
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
|
|
model = None
|
|
providers.append(provider)
|
|
else:
|
|
for provider in [
|
|
OpenaiChat, Cloudflare, HarProvider, PerplexityLabs, Gemini, Grok, DeepSeekAPI, FreeRouter, Blackbox,
|
|
HuggingSpace, LambdaChat, CopilotAccount, PollinationsAI, DeepInfraChat
|
|
]:
|
|
if provider.working:
|
|
if not model or model in provider.get_models() or model in provider.model_aliases:
|
|
providers.append(provider)
|
|
if model in models.__models__:
|
|
for provider in models.__models__[model][1]:
|
|
providers.append(provider)
|
|
providers = [provider for provider in providers if provider.working and getattr(provider, "parent", provider.__name__) not in ignored]
|
|
if len(providers) == 0:
|
|
raise ModelNotFoundError(f"Model {model} not found in any provider.")
|
|
if len(providers) == 1:
|
|
provider = providers[0]
|
|
if conversation is not None:
|
|
child_conversation = getattr(conversation, provider.__name__, None)
|
|
if child_conversation is not None:
|
|
kwargs["conversation"] = JsonConversation(**child_conversation)
|
|
yield ProviderInfo(**provider.get_dict(), model=model)
|
|
async for chunk in provider.get_async_create_function()(
|
|
model,
|
|
messages,
|
|
stream=stream,
|
|
media=media,
|
|
**kwargs
|
|
):
|
|
if isinstance(chunk, JsonConversation):
|
|
if conversation is None:
|
|
conversation = JsonConversation()
|
|
setattr(conversation, provider.__name__, chunk.get_dict())
|
|
yield conversation
|
|
else:
|
|
yield chunk
|
|
return
|
|
async for chunk in IterListProvider(providers).get_async_create_function()(
|
|
model,
|
|
messages,
|
|
stream=stream,
|
|
media=media,
|
|
**kwargs
|
|
):
|
|
yield chunk
|
|
|
|
setattr(Provider, "AnyProvider", AnyProvider)
|
|
Provider.__map__["AnyProvider"] = AnyProvider
|
|
Provider.__providers__.append(AnyProvider)
|