mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 10:40:43 -08:00
active_by_default
This commit is contained in:
parent
2d3b215dbb
commit
6f8c5ea62b
18 changed files with 18 additions and 3175 deletions
|
|
@ -39,6 +39,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
url = "https://playground.ai.cloudflare.com"
|
||||
working = has_curl_cffi
|
||||
use_nodriver = True
|
||||
active_by_default = True
|
||||
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
|
||||
models_url = "https://playground.ai.cloudflare.com/api/models"
|
||||
supports_stream = True
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin):
|
|||
|
||||
working = True
|
||||
supports_stream = True
|
||||
active_by_default = True
|
||||
|
||||
default_model = "Copilot"
|
||||
models = [default_model, "Think Deeper"]
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ class PenguinAI(OpenaiTemplate):
|
|||
url = "https://penguinai.tech"
|
||||
api_base = "https://api.penguinai.tech/v1"
|
||||
working = True
|
||||
active_by_default = True
|
||||
|
||||
default_model = "gpt-3.5-turbo"
|
||||
default_vision_model = "gpt-4o"
|
||||
|
|
|
|||
|
|
@ -64,6 +64,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
label = "Pollinations AI"
|
||||
url = "https://pollinations.ai"
|
||||
login_url = "https://auth.pollinations.ai"
|
||||
active_by_default = True
|
||||
|
||||
working = True
|
||||
supports_system_message = True
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from .PollinationsAI import PollinationsAI
|
|||
class PollinationsImage(PollinationsAI):
|
||||
label = "PollinationsImage"
|
||||
parent = PollinationsAI.__name__
|
||||
active_by_default = False
|
||||
default_model = "flux"
|
||||
default_vision_model = None
|
||||
default_image_model = default_model
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ class Together(OpenaiTemplate):
|
|||
activation_endpoint = "https://www.codegeneration.ai/activate-v2"
|
||||
models_endpoint = "https://api.together.xyz/v1/models"
|
||||
|
||||
active_by_default = True
|
||||
working = True
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ class HarProvider(AsyncAuthedProvider, ProviderModelMixin):
|
|||
url = "https://legacy.lmarena.ai"
|
||||
api_endpoint = "/queue/join?"
|
||||
working = True
|
||||
active_by_default = True
|
||||
default_model = LegacyLMArena.default_model
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class DeepInfra(OpenaiTemplate):
|
|||
api_base = "https://api.deepinfra.com/v1/openai"
|
||||
working = True
|
||||
needs_auth = True
|
||||
|
||||
active_by_default = True
|
||||
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
||||
default_image_model = "stabilityai/sd3.5"
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
|||
from .OpenaiAPI import OpenaiAPI
|
||||
|
||||
class DeepSeek(OpenaiAPI):
|
||||
label = "DeepSeek"
|
||||
label = "DeepSeek API"
|
||||
url = "https://platform.deepseek.com"
|
||||
login_url = "https://platform.deepseek.com/api_keys"
|
||||
working = True
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ except ImportError:
|
|||
has_dsk = False
|
||||
|
||||
class DeepSeekAPI(AsyncAuthedProvider, ProviderModelMixin):
|
||||
label = "DeepSeek"
|
||||
url = "https://chat.deepseek.com"
|
||||
working = has_dsk
|
||||
active_by_default = has_dsk
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
url = "https://ai.google.dev"
|
||||
login_url = "https://aistudio.google.com/u/0/apikey"
|
||||
api_base = "https://generativelanguage.googleapis.com/v1beta"
|
||||
active_by_default = True
|
||||
|
||||
working = True
|
||||
supports_message_history = True
|
||||
|
|
|
|||
|
|
@ -79,7 +79,6 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
url = "https://beta.lmarena.ai"
|
||||
api_endpoint = "https://beta.lmarena.ai/api/stream/create-evaluation"
|
||||
working = True
|
||||
active_by_default = has_nodriver
|
||||
|
||||
default_model = list(text_models.keys())[0]
|
||||
models = list(text_models) + list(image_models)
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
origin = f"https://{domain}"
|
||||
url = f"{origin}/chat"
|
||||
|
||||
working = True
|
||||
working = False
|
||||
use_nodriver = True
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ class HuggingFaceMedia(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
url = "https://huggingface.co"
|
||||
working = True
|
||||
needs_auth = True
|
||||
|
||||
model_aliases = image_model_aliases
|
||||
|
||||
tasks = ["text-to-image", "text-to-video"]
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
url = "https://huggingface.co"
|
||||
login_url = "https://huggingface.co/settings/tokens"
|
||||
working = True
|
||||
supports_message_history = True
|
||||
active_by_default = True
|
||||
|
||||
@classmethod
|
||||
def get_models(cls, **kwargs) -> list[str]:
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ class Api:
|
|||
"vision": getattr(provider, "default_vision_model", None) is not None,
|
||||
"nodriver": getattr(provider, "use_nodriver", False),
|
||||
"hf_space": getattr(provider, "hf_space", False),
|
||||
"active_by_default": not provider.needs_auth if provider.active_by_default is None else provider.active_by_default,
|
||||
"active_by_default": False if provider.active_by_default is None else provider.active_by_default,
|
||||
"auth": provider.needs_auth,
|
||||
"login_url": getattr(provider, "login_url", None),
|
||||
} for provider in Provider.__providers__ if provider.working and safe_get_models(provider)]
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -12,7 +12,7 @@ from ..Provider.hf_space import HuggingSpace
|
|||
from ..Provider import Cloudflare, Gemini, GeminiPro, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
|
||||
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer0501, OIVSCodeSer2, TeachAnything, OperaAria, Startnest
|
||||
from ..Provider import Together, WeWordle, Yqcloud, Chatai, ImageLabs, LegacyLMArena, LMArenaBeta, Free2GPT
|
||||
from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM, Video
|
||||
from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM, PenguinAI
|
||||
from ..Provider import HarProvider, HuggingFace, HuggingFaceMedia
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .. import Provider
|
||||
|
|
@ -24,7 +24,7 @@ PROVIERS_LIST_1 = [
|
|||
CopilotAccount, OpenaiChat, Cloudflare, PerplexityLabs, Gemini, Grok, DeepSeekAPI, Blackbox, OpenAIFM,
|
||||
OIVSCodeSer2, OIVSCodeSer0501, TeachAnything, WeWordle, Yqcloud, Chatai, Free2GPT, ImageLabs,
|
||||
# Has lazy loading model lists
|
||||
PollinationsAI, HarProvider, LegacyLMArena, LMArenaBeta, LambdaChat, DeepInfraChat,
|
||||
PollinationsAI, HarProvider, LegacyLMArena, LMArenaBeta, LambdaChat, DeepInfraChat, PenguinAI,
|
||||
HuggingSpace, HuggingFace, HuggingFaceMedia, GeminiPro, Together, PuterJS, OperaAria, Startnest
|
||||
]
|
||||
|
||||
|
|
@ -342,6 +342,7 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
|||
|
||||
class AnyProvider(AsyncGeneratorProvider, AnyModelProviderMixin):
|
||||
working = True
|
||||
active_by_default = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue