mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Update models list
This commit is contained in:
parent
fe79b11070
commit
a62a1b6e71
21 changed files with 1188 additions and 1188 deletions
|
|
@ -1,8 +1,9 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import random
|
import requests
|
||||||
from .template import OpenaiTemplate
|
from .template import OpenaiTemplate
|
||||||
from ..errors import ModelNotFoundError
|
from ..errors import ModelNotFoundError
|
||||||
|
from ..config import DEFAULT_MODEL
|
||||||
from .. import debug
|
from .. import debug
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -14,74 +15,34 @@ class DeepInfraChat(OpenaiTemplate):
|
||||||
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
|
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
|
||||||
working = True
|
working = True
|
||||||
|
|
||||||
default_model = 'deepseek-ai/DeepSeek-V3-0324'
|
default_model = DEFAULT_MODEL
|
||||||
default_vision_model = 'microsoft/Phi-4-multimodal-instruct'
|
default_vision_model = DEFAULT_MODEL
|
||||||
vision_models = [
|
vision_models = [
|
||||||
default_vision_model,
|
default_vision_model,
|
||||||
'meta-llama/Llama-3.2-90B-Vision-Instruct'
|
'meta-llama/Llama-3.2-90B-Vision-Instruct',
|
||||||
|
'openai/gpt-oss-120b',
|
||||||
|
'openai/gpt-oss-20b',
|
||||||
]
|
]
|
||||||
models = [
|
|
||||||
# cognitivecomputations
|
|
||||||
'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
|
|
||||||
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
|
|
||||||
|
|
||||||
# deepinfra
|
@classmethod
|
||||||
'deepinfra/airoboros-70b',
|
def get_models(cls, **kwargs):
|
||||||
|
if not cls.models:
|
||||||
|
url = 'https://api.deepinfra.com/models/featured'
|
||||||
|
response = requests.get(url)
|
||||||
|
models = response.json()
|
||||||
|
|
||||||
|
cls.models = []
|
||||||
|
cls.image_models = []
|
||||||
|
|
||||||
|
for model in models:
|
||||||
|
if model["type"] == "text-generation":
|
||||||
|
cls.models.append(model['model_name'])
|
||||||
|
elif model["reported_type"] == "text-to-image":
|
||||||
|
cls.image_models.append(model['model_name'])
|
||||||
|
|
||||||
|
cls.models.extend(cls.image_models)
|
||||||
|
|
||||||
# deepseek-ai
|
return cls.models
|
||||||
default_model,
|
|
||||||
'deepseek-ai/DeepSeek-V3-0324-Turbo',
|
|
||||||
|
|
||||||
'deepseek-ai/DeepSeek-R1-0528-Turbo',
|
|
||||||
'deepseek-ai/DeepSeek-R1-0528',
|
|
||||||
|
|
||||||
'deepseek-ai/DeepSeek-Prover-V2-671B',
|
|
||||||
|
|
||||||
'deepseek-ai/DeepSeek-V3',
|
|
||||||
|
|
||||||
'deepseek-ai/DeepSeek-R1',
|
|
||||||
'deepseek-ai/DeepSeek-R1-Turbo',
|
|
||||||
'deepseek-ai/DeepSeek-R1-Distill-Llama-70B',
|
|
||||||
'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
|
|
||||||
|
|
||||||
# google (gemma)
|
|
||||||
'google/gemma-1.1-7b-it',
|
|
||||||
'google/gemma-2-9b-it',
|
|
||||||
'google/gemma-2-27b-it',
|
|
||||||
'google/gemma-3-4b-it',
|
|
||||||
'google/gemma-3-12b-it',
|
|
||||||
'google/gemma-3-27b-it',
|
|
||||||
|
|
||||||
# google (codegemma)
|
|
||||||
'google/codegemma-7b-it',
|
|
||||||
|
|
||||||
# lizpreciatior
|
|
||||||
'lizpreciatior/lzlv_70b_fp16_hf',
|
|
||||||
|
|
||||||
# meta-llama
|
|
||||||
'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
|
|
||||||
'meta-llama/Llama-4-Scout-17B-16E-Instruct',
|
|
||||||
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
|
||||||
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
|
||||||
'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
|
||||||
|
|
||||||
# microsoft
|
|
||||||
'microsoft/phi-4-reasoning-plus',
|
|
||||||
'microsoft/phi-4',
|
|
||||||
|
|
||||||
'microsoft/WizardLM-2-8x22B',
|
|
||||||
'microsoft/WizardLM-2-7B',
|
|
||||||
|
|
||||||
# mistralai
|
|
||||||
'mistralai/Mistral-Small-3.1-24B-Instruct-2503',
|
|
||||||
|
|
||||||
# Qwen
|
|
||||||
'Qwen/Qwen3-235B-A22B',
|
|
||||||
'Qwen/Qwen3-30B-A3B',
|
|
||||||
'Qwen/Qwen3-32B',
|
|
||||||
'Qwen/Qwen3-14B',
|
|
||||||
'Qwen/QwQ-32B',
|
|
||||||
] + vision_models
|
|
||||||
|
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
# cognitivecomputations
|
# cognitivecomputations
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ from __future__ import annotations
|
||||||
from ..typing import AsyncResult, Messages
|
from ..typing import AsyncResult, Messages
|
||||||
from ..providers.response import JsonConversation, Reasoning, TitleGeneration
|
from ..providers.response import JsonConversation, Reasoning, TitleGeneration
|
||||||
from ..requests import StreamSession, raise_for_status
|
from ..requests import StreamSession, raise_for_status
|
||||||
|
from ..config import DEFAULT_MODEL
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .helper import get_last_user_message
|
from .helper import get_last_user_message
|
||||||
|
|
||||||
|
|
@ -16,6 +17,9 @@ class GptOss(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
default_model = "gpt-oss-120b"
|
default_model = "gpt-oss-120b"
|
||||||
models = [default_model, "gpt-oss-20b"]
|
models = [default_model, "gpt-oss-20b"]
|
||||||
|
model_aliases = {
|
||||||
|
DEFAULT_MODEL: default_model,
|
||||||
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
|
@ -27,8 +31,7 @@ class GptOss(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if not model:
|
model = cls.get_model(model)
|
||||||
model = cls.default_model
|
|
||||||
user_message = get_last_user_message(messages)
|
user_message = get_last_user_message(messages)
|
||||||
cookies = {}
|
cookies = {}
|
||||||
if conversation is None:
|
if conversation is None:
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ from ..errors import ModelNotFoundError, ResponseError
|
||||||
from .. import debug
|
from .. import debug
|
||||||
|
|
||||||
class LegacyLMArena(AsyncGeneratorProvider, ProviderModelMixin):
|
class LegacyLMArena(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
label = "Legacy LM Arena"
|
label = "LMArena (Legacy)"
|
||||||
url = "https://legacy.lmarena.ai"
|
url = "https://legacy.lmarena.ai"
|
||||||
api_endpoint = "/queue/join?"
|
api_endpoint = "/queue/join?"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -113,6 +113,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"flux-schnell": "flux",
|
"flux-schnell": "flux",
|
||||||
"flux-pro": "flux",
|
"flux-pro": "flux",
|
||||||
"flux": "flux",
|
"flux": "flux",
|
||||||
|
"flux-kontext": "kontext",
|
||||||
}
|
}
|
||||||
swap_models = {value: key for key, value in model_aliases.items()}
|
swap_models = {value: key for key, value in model_aliases.items()}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,9 @@ class Azure(OpenaiTemplate):
|
||||||
audio_models = ["gpt-4o-mini-audio-preview"]
|
audio_models = ["gpt-4o-mini-audio-preview"]
|
||||||
vision_models = ["gpt-4.1", "o4-mini", "model-router", "flux.1-kontext-pro"]
|
vision_models = ["gpt-4.1", "o4-mini", "model-router", "flux.1-kontext-pro"]
|
||||||
image_models = ["flux-1.1-pro", "flux.1-kontext-pro"]
|
image_models = ["flux-1.1-pro", "flux.1-kontext-pro"]
|
||||||
|
model_aliases = {
|
||||||
|
"flux-kontext": "flux-1-kontext-pro"
|
||||||
|
}
|
||||||
model_extra_body = {
|
model_extra_body = {
|
||||||
"gpt-4o-mini-audio-preview": {
|
"gpt-4o-mini-audio-preview": {
|
||||||
"audio": {
|
"audio": {
|
||||||
|
|
@ -66,6 +69,8 @@ class Azure(OpenaiTemplate):
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if not model:
|
if not model:
|
||||||
model = os.environ.get("AZURE_DEFAULT_MODEL", cls.default_model)
|
model = os.environ.get("AZURE_DEFAULT_MODEL", cls.default_model)
|
||||||
|
if model in cls.model_aliases:
|
||||||
|
model = cls.model_aliases[model]
|
||||||
if not api_endpoint:
|
if not api_endpoint:
|
||||||
if not cls.routes:
|
if not cls.routes:
|
||||||
cls.get_models()
|
cls.get_models()
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,9 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import requests
|
|
||||||
from ...typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages
|
||||||
from ...requests import StreamSession, raise_for_status
|
from ...requests import StreamSession, raise_for_status
|
||||||
from ...providers.response import ImageResponse
|
from ...providers.response import ImageResponse
|
||||||
|
from ...config import DEFAULT_MODEL
|
||||||
from ..template import OpenaiTemplate
|
from ..template import OpenaiTemplate
|
||||||
from ..DeepInfraChat import DeepInfraChat
|
from ..DeepInfraChat import DeepInfraChat
|
||||||
from ..helper import format_media_prompt
|
from ..helper import format_media_prompt
|
||||||
|
|
@ -14,28 +14,15 @@ class DeepInfra(OpenaiTemplate):
|
||||||
api_base = "https://api.deepinfra.com/v1/openai"
|
api_base = "https://api.deepinfra.com/v1/openai"
|
||||||
working = True
|
working = True
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
default_model = DEFAULT_MODEL
|
||||||
default_image_model = "stabilityai/sd3.5"
|
vision_models = DeepInfraChat.vision_models
|
||||||
model_aliases = DeepInfraChat.model_aliases
|
model_aliases = DeepInfraChat.model_aliases
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls, **kwargs):
|
def get_models(cls, **kwargs):
|
||||||
if not cls.models:
|
if not cls.models:
|
||||||
url = 'https://api.deepinfra.com/models/featured'
|
cls.models = DeepInfraChat.get_models()
|
||||||
response = requests.get(url)
|
cls.image_models = DeepInfraChat.image_models
|
||||||
models = response.json()
|
|
||||||
|
|
||||||
cls.models = []
|
|
||||||
cls.image_models = []
|
|
||||||
|
|
||||||
for model in models:
|
|
||||||
if model["type"] == "text-generation":
|
|
||||||
cls.models.append(model['model_name'])
|
|
||||||
elif model["reported_type"] == "text-to-image":
|
|
||||||
cls.image_models.append(model['model_name'])
|
|
||||||
|
|
||||||
cls.models.extend(cls.image_models)
|
|
||||||
|
|
||||||
return cls.models
|
return cls.models
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
|
||||||
|
|
@ -497,7 +497,7 @@ class GeminiCLI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
login_url = "https://github.com/GewoonJaap/gemini-cli-openai"
|
login_url = "https://github.com/GewoonJaap/gemini-cli-openai"
|
||||||
|
|
||||||
default_model = "gemini-2.5-pro"
|
default_model = "gemini-2.5-pro"
|
||||||
fallback_models = [
|
models = [
|
||||||
"gemini-2.5-pro",
|
"gemini-2.5-pro",
|
||||||
"gemini-2.5-flash",
|
"gemini-2.5-flash",
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from ..template import OpenaiTemplate
|
from ..template import OpenaiTemplate
|
||||||
|
from ...config import DEFAULT_MODEL
|
||||||
|
|
||||||
class Groq(OpenaiTemplate):
|
class Groq(OpenaiTemplate):
|
||||||
url = "https://console.groq.com/playground"
|
url = "https://console.groq.com/playground"
|
||||||
|
|
@ -9,7 +10,7 @@ class Groq(OpenaiTemplate):
|
||||||
working = True
|
working = True
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
default_model = "mixtral-8x7b-32768"
|
default_model = DEFAULT_MODEL
|
||||||
fallback_models = [
|
fallback_models = [
|
||||||
"distil-whisper-large-v3-en",
|
"distil-whisper-large-v3-en",
|
||||||
"gemma2-9b-it",
|
"gemma2-9b-it",
|
||||||
|
|
@ -30,4 +31,8 @@ class Groq(OpenaiTemplate):
|
||||||
"whisper-large-v3",
|
"whisper-large-v3",
|
||||||
"whisper-large-v3-turbo",
|
"whisper-large-v3-turbo",
|
||||||
]
|
]
|
||||||
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
|
model_aliases = {
|
||||||
|
"mixtral-8x7b": "mixtral-8x7b-32768",
|
||||||
|
"llama2-70b": "llama2-70b-4096",
|
||||||
|
"moonshotai/Kimi-K2-Instruct": "moonshotai/kimi-k2-Instruct"
|
||||||
|
}
|
||||||
|
|
@ -126,6 +126,9 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
|
|
||||||
default_model = list(text_models.keys())[0]
|
default_model = list(text_models.keys())[0]
|
||||||
models = list(text_models) + list(image_models)
|
models = list(text_models) + list(image_models)
|
||||||
|
model_aliases = {
|
||||||
|
"flux-kontext": "flux-1-kontext-pro",
|
||||||
|
}
|
||||||
image_models = list(image_models)
|
image_models = list(image_models)
|
||||||
vision_models = vision_models
|
vision_models = vision_models
|
||||||
|
|
||||||
|
|
@ -162,6 +165,8 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
is_image_model = model in image_models
|
is_image_model = model in image_models
|
||||||
if not model:
|
if not model:
|
||||||
model = cls.default_model
|
model = cls.default_model
|
||||||
|
if model in cls.model_aliases:
|
||||||
|
model = cls.model_aliases[model]
|
||||||
if model in image_models:
|
if model in image_models:
|
||||||
model = image_models[model]
|
model = image_models[model]
|
||||||
elif model in text_models:
|
elif model in text_models:
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from ..template import OpenaiTemplate
|
from ..template import OpenaiTemplate
|
||||||
|
from ...config import DEFAULT_MODEL
|
||||||
|
|
||||||
class OpenRouter(OpenaiTemplate):
|
class OpenRouter(OpenaiTemplate):
|
||||||
label = "OpenRouter"
|
label = "OpenRouter"
|
||||||
|
|
@ -9,4 +10,5 @@ class OpenRouter(OpenaiTemplate):
|
||||||
api_base = "https://openrouter.ai/api/v1"
|
api_base = "https://openrouter.ai/api/v1"
|
||||||
working = True
|
working = True
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
|
default_model = DEFAULT_MODEL
|
||||||
|
|
@ -1,11 +1,8 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import requests
|
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages, MediaListType
|
|
||||||
from ..template import OpenaiTemplate
|
from ..template import OpenaiTemplate
|
||||||
from ...requests import StreamSession, raise_for_status
|
from ...config import DEFAULT_MODEL
|
||||||
from ...errors import ModelNotFoundError
|
from ...errors import ModelNotFoundError
|
||||||
from ... import debug
|
from ... import debug
|
||||||
|
|
||||||
|
|
@ -23,17 +20,18 @@ class Together(OpenaiTemplate):
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
||||||
default_model = 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8'
|
default_model = DEFAULT_MODEL
|
||||||
default_vision_model = default_model
|
default_vision_model = default_model
|
||||||
default_image_model = 'black-forest-labs/FLUX.1.1-pro'
|
default_image_model = 'black-forest-labs/FLUX.1.1-pro'
|
||||||
vision_models = [
|
vision_models = [
|
||||||
|
default_vision_model,
|
||||||
'Qwen/Qwen2-VL-72B-Instruct',
|
'Qwen/Qwen2-VL-72B-Instruct',
|
||||||
'Qwen/Qwen2.5-VL-72B-Instruct',
|
'Qwen/Qwen2.5-VL-72B-Instruct',
|
||||||
'arcee-ai/virtuoso-medium-v2',
|
'arcee-ai/virtuoso-medium-v2',
|
||||||
'arcee_ai/arcee-spotlight',
|
'arcee_ai/arcee-spotlight',
|
||||||
'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
|
'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
|
||||||
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||||
default_vision_model,
|
'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
|
||||||
'meta-llama/Llama-4-Scout-17B-16E-Instruct',
|
'meta-llama/Llama-4-Scout-17B-16E-Instruct',
|
||||||
'meta-llama/Llama-Vision-Free',
|
'meta-llama/Llama-Vision-Free',
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,6 @@
|
||||||
default_model = "openai/gpt-oss-120b"
|
from ....config import DEFAULT_MODEL
|
||||||
|
|
||||||
|
default_model = DEFAULT_MODEL
|
||||||
default_image_model = "black-forest-labs/FLUX.1-dev"
|
default_image_model = "black-forest-labs/FLUX.1-dev"
|
||||||
image_models = [
|
image_models = [
|
||||||
default_image_model,
|
default_image_model,
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,7 @@ from ...image import use_aspect_ratio
|
||||||
from ...image.copy_images import save_response_media
|
from ...image.copy_images import save_response_media
|
||||||
from ...providers.response import FinishReason, ToolCalls, Usage, ImageResponse, ProviderInfo, AudioResponse, Reasoning
|
from ...providers.response import FinishReason, ToolCalls, Usage, ImageResponse, ProviderInfo, AudioResponse, Reasoning
|
||||||
from ...tools.media import render_messages
|
from ...tools.media import render_messages
|
||||||
|
from ...tools.run_tools import AuthManager
|
||||||
from ...errors import MissingAuthError
|
from ...errors import MissingAuthError
|
||||||
from ... import debug
|
from ... import debug
|
||||||
|
|
||||||
|
|
@ -33,16 +34,18 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
|
||||||
api_base = cls.api_base
|
api_base = cls.api_base
|
||||||
if api_key is None and cls.api_key is not None:
|
if api_key is None and cls.api_key is not None:
|
||||||
api_key = cls.api_key
|
api_key = cls.api_key
|
||||||
|
if not api_key:
|
||||||
|
api_key = AuthManager.load_api_key(cls)
|
||||||
if api_key is not None:
|
if api_key is not None:
|
||||||
headers["authorization"] = f"Bearer {api_key}"
|
headers["authorization"] = f"Bearer {api_key}"
|
||||||
response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
|
response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
|
||||||
raise_for_status(response)
|
raise_for_status(response)
|
||||||
data = response.json()
|
data = response.json()
|
||||||
data = data.get("data") if isinstance(data, dict) else data
|
data = data.get("data") if isinstance(data, dict) else data
|
||||||
cls.image_models = [model.get("id") for model in data if model.get("image")]
|
cls.image_models = [model.get("id", model.get("name")) for model in data if model.get("image")]
|
||||||
cls.vision_models = cls.vision_models.copy()
|
cls.vision_models = cls.vision_models.copy()
|
||||||
cls.vision_models += [model.get("id") for model in data if model.get("vision")]
|
cls.vision_models += [model.get("id", model.get("name")) for model in data if model.get("vision")]
|
||||||
cls.models = [model.get("id") for model in data]
|
cls.models = [model.get("id", model.get("name")) for model in data]
|
||||||
if cls.sort_models:
|
if cls.sort_models:
|
||||||
cls.models.sort()
|
cls.models.sort()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
||||||
|
|
@ -275,8 +275,9 @@ class Api:
|
||||||
return ErrorResponse.from_message(e.detail, e.status_code, e.headers)
|
return ErrorResponse.from_message(e.detail, e.status_code, e.headers)
|
||||||
if user is None:
|
if user is None:
|
||||||
ip = request.headers.get("X-Forwarded-For", "")[:4].strip(":.")
|
ip = request.headers.get("X-Forwarded-For", "")[:4].strip(":.")
|
||||||
user = request.headers.get("Cf-Ipcountry", "")
|
country = request.headers.get("Cf-Ipcountry", "")
|
||||||
user = f"{user}:{ip}" if user else ip
|
user = request.headers.get("x-user", ip)
|
||||||
|
user = f"{country}:{user}" if country else user
|
||||||
request = update_headers(request, user)
|
request = update_headers(request, user)
|
||||||
response = await call_next(request)
|
response = await call_next(request)
|
||||||
return response
|
return response
|
||||||
|
|
|
||||||
|
|
@ -365,7 +365,7 @@ class Completions:
|
||||||
messages = [{"role": "user", "content": messages}]
|
messages = [{"role": "user", "content": messages}]
|
||||||
resolve_media(kwargs, image, image_name)
|
resolve_media(kwargs, image, image_name)
|
||||||
if hasattr(model, "name"):
|
if hasattr(model, "name"):
|
||||||
model = model.name
|
model = model.get_long_name()
|
||||||
if provider is None:
|
if provider is None:
|
||||||
provider = self.provider
|
provider = self.provider
|
||||||
if provider is None:
|
if provider is None:
|
||||||
|
|
@ -686,7 +686,7 @@ class AsyncCompletions:
|
||||||
messages = [{"role": "user", "content": messages}]
|
messages = [{"role": "user", "content": messages}]
|
||||||
resolve_media(kwargs, image, image_name)
|
resolve_media(kwargs, image, image_name)
|
||||||
if hasattr(model, "name"):
|
if hasattr(model, "name"):
|
||||||
model = model.name
|
model = model.get_long_name()
|
||||||
if provider is None:
|
if provider is None:
|
||||||
provider = self.provider
|
provider = self.provider
|
||||||
if provider is None:
|
if provider is None:
|
||||||
|
|
@ -781,7 +781,7 @@ class AsyncResponses():
|
||||||
input[idx] = {"role": message["role"], "content": message["content"]}
|
input[idx] = {"role": message["role"], "content": message["content"]}
|
||||||
resolve_media(kwargs)
|
resolve_media(kwargs)
|
||||||
if hasattr(model, "name"):
|
if hasattr(model, "name"):
|
||||||
model = model.name
|
model = model.get_long_name()
|
||||||
if provider is None:
|
if provider is None:
|
||||||
provider = self.provider
|
provider = self.provider
|
||||||
if provider is None:
|
if provider is None:
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ def get_model_and_provider(model : Union[Model, str],
|
||||||
provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__
|
provider_name = provider.__name__ if hasattr(provider, "__name__") else type(provider).__name__
|
||||||
|
|
||||||
if isinstance(model, Model):
|
if isinstance(model, Model):
|
||||||
model = model.name
|
model = model.get_long_name()
|
||||||
|
|
||||||
if not ignore_working and not provider.working:
|
if not ignore_working and not provider.working:
|
||||||
raise ProviderNotWorkingError(f"{provider_name} is not working")
|
raise ProviderNotWorkingError(f"{provider_name} is not working")
|
||||||
|
|
|
||||||
|
|
@ -21,4 +21,5 @@ GITHUB_REPOSITORY = f"xtekky/{ORGANIZATION}"
|
||||||
STATIC_DOMAIN = f"{PACKAGE_NAME}.dev"
|
STATIC_DOMAIN = f"{PACKAGE_NAME}.dev"
|
||||||
STATIC_URL = f"https://{STATIC_DOMAIN}/"
|
STATIC_URL = f"https://{STATIC_DOMAIN}/"
|
||||||
DIST_DIR = f"./{STATIC_DOMAIN}/dist"
|
DIST_DIR = f"./{STATIC_DOMAIN}/dist"
|
||||||
DOWNLOAD_URL = f"https://raw.githubusercontent.com/{ORGANIZATION}/{STATIC_DOMAIN}/refs/heads/main/"
|
DOWNLOAD_URL = f"https://raw.githubusercontent.com/{ORGANIZATION}/{STATIC_DOMAIN}/refs/heads/main/"
|
||||||
|
DEFAULT_MODEL = "openai/gpt-oss-120b"
|
||||||
|
|
@ -12,11 +12,12 @@ from .Provider import (
|
||||||
Copilot,
|
Copilot,
|
||||||
DeepInfraChat,
|
DeepInfraChat,
|
||||||
Free2GPT,
|
Free2GPT,
|
||||||
|
GptOss,
|
||||||
HuggingSpace,
|
HuggingSpace,
|
||||||
Grok,
|
Grok,
|
||||||
DeepseekAI_JanusPro7b,
|
DeepseekAI_JanusPro7b,
|
||||||
DeepSeekAPI,
|
|
||||||
ImageLabs,
|
ImageLabs,
|
||||||
|
Kimi,
|
||||||
LambdaChat,
|
LambdaChat,
|
||||||
OIVSCodeSer2,
|
OIVSCodeSer2,
|
||||||
OIVSCodeSer0501,
|
OIVSCodeSer0501,
|
||||||
|
|
@ -32,18 +33,23 @@ from .Provider import (
|
||||||
Yqcloud,
|
Yqcloud,
|
||||||
|
|
||||||
### Needs Auth ###
|
### Needs Auth ###
|
||||||
|
Azure,
|
||||||
BingCreateImages,
|
BingCreateImages,
|
||||||
CopilotAccount,
|
CopilotAccount,
|
||||||
Gemini,
|
Gemini,
|
||||||
|
GeminiCLI,
|
||||||
GeminiPro,
|
GeminiPro,
|
||||||
HuggingChat,
|
HuggingChat,
|
||||||
HuggingFace,
|
HuggingFace,
|
||||||
HuggingFaceMedia,
|
HuggingFaceMedia,
|
||||||
HuggingFaceAPI,
|
HuggingFaceAPI,
|
||||||
|
LMArenaBeta,
|
||||||
|
Groq,
|
||||||
MetaAI,
|
MetaAI,
|
||||||
MicrosoftDesigner,
|
MicrosoftDesigner,
|
||||||
OpenaiAccount,
|
OpenaiAccount,
|
||||||
OpenaiChat,
|
OpenaiChat,
|
||||||
|
OpenRouter,
|
||||||
)
|
)
|
||||||
|
|
||||||
class ModelRegistry:
|
class ModelRegistry:
|
||||||
|
|
@ -115,6 +121,11 @@ class Model:
|
||||||
name: str
|
name: str
|
||||||
base_provider: str
|
base_provider: str
|
||||||
best_provider: ProviderType = None
|
best_provider: ProviderType = None
|
||||||
|
long_name: Optional[str] = None
|
||||||
|
|
||||||
|
def get_long_name(self) -> str:
|
||||||
|
"""Get the long name of the model, if available."""
|
||||||
|
return self.long_name if self.long_name else self.name
|
||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
"""Auto-register model after initialization"""
|
"""Auto-register model after initialization"""
|
||||||
|
|
@ -278,6 +289,13 @@ gpt_4_5 = Model(
|
||||||
best_provider = OpenaiChat
|
best_provider = OpenaiChat
|
||||||
)
|
)
|
||||||
|
|
||||||
|
gpt_oss_120b = Model(
|
||||||
|
name = 'gpt-oss-120b',
|
||||||
|
long_name = 'openai/gpt-oss-120b',
|
||||||
|
base_provider = 'OpenAI',
|
||||||
|
best_provider = IterListProvider([GptOss, Together, DeepInfraChat, HuggingFace, OpenRouter, Groq])
|
||||||
|
)
|
||||||
|
|
||||||
# dall-e
|
# dall-e
|
||||||
dall_e_3 = ImageModel(
|
dall_e_3 = ImageModel(
|
||||||
name = 'dall-e-3',
|
name = 'dall-e-3',
|
||||||
|
|
@ -510,13 +528,13 @@ gemini_2_0_flash_thinking_with_apps = Model(
|
||||||
gemini_2_5_flash = Model(
|
gemini_2_5_flash = Model(
|
||||||
name = 'gemini-2.5-flash',
|
name = 'gemini-2.5-flash',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = IterListProvider([Gemini, GeminiPro])
|
best_provider = IterListProvider([Gemini, GeminiPro, GeminiCLI])
|
||||||
)
|
)
|
||||||
|
|
||||||
gemini_2_5_pro = Model(
|
gemini_2_5_pro = Model(
|
||||||
name = 'gemini-2.5-pro',
|
name = 'gemini-2.5-pro',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = IterListProvider([Gemini])
|
best_provider = IterListProvider([Gemini, GeminiPro, GeminiCLI])
|
||||||
)
|
)
|
||||||
|
|
||||||
# codegemma
|
# codegemma
|
||||||
|
|
@ -846,6 +864,13 @@ grok_3_r1 = Model(
|
||||||
best_provider = Grok
|
best_provider = Grok
|
||||||
)
|
)
|
||||||
|
|
||||||
|
kimi = Model(
|
||||||
|
name = 'kimi-k2',
|
||||||
|
base_provider = 'kimi.com',
|
||||||
|
best_provider = IterListProvider([Kimi, HuggingFace, DeepInfraChat, Groq]),
|
||||||
|
long_name = "moonshotai/Kimi-K2-Instruct"
|
||||||
|
)
|
||||||
|
|
||||||
### Perplexity AI ###
|
### Perplexity AI ###
|
||||||
sonar = Model(
|
sonar = Model(
|
||||||
name = 'sonar',
|
name = 'sonar',
|
||||||
|
|
@ -990,9 +1015,9 @@ flux_canny = ImageModel(
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_kontext_max = ImageModel(
|
flux_kontext_max = ImageModel(
|
||||||
name = 'flux-kontext-max',
|
name = 'flux-kontext',
|
||||||
base_provider = 'Black Forest Labs',
|
base_provider = 'Black Forest Labs',
|
||||||
best_provider = Together
|
best_provider = IterListProvider([PollinationsAI, Azure, LMArenaBeta, Together])
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_dev_lora = ImageModel(
|
flux_dev_lora = ImageModel(
|
||||||
|
|
@ -1001,18 +1026,6 @@ flux_dev_lora = ImageModel(
|
||||||
best_provider = Together
|
best_provider = Together
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_kontext_pro = ImageModel(
|
|
||||||
name = 'flux-kontext-pro',
|
|
||||||
base_provider = 'Black Forest Labs',
|
|
||||||
best_provider = Together
|
|
||||||
)
|
|
||||||
|
|
||||||
flux_kontext_dev = ImageModel(
|
|
||||||
name = 'flux-kontext-dev',
|
|
||||||
base_provider = 'Black Forest Labs',
|
|
||||||
best_provider = Together
|
|
||||||
)
|
|
||||||
|
|
||||||
class ModelUtils:
|
class ModelUtils:
|
||||||
"""
|
"""
|
||||||
Utility class for mapping string identifiers to Model instances.
|
Utility class for mapping string identifiers to Model instances.
|
||||||
|
|
|
||||||
File diff suppressed because one or more lines are too long
|
|
@ -118,10 +118,10 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
||||||
"default": {provider.__name__: "" for provider in models.default.best_provider.providers},
|
"default": {provider.__name__: "" for provider in models.default.best_provider.providers},
|
||||||
}
|
}
|
||||||
cls.model_map.update({
|
cls.model_map.update({
|
||||||
model: {
|
model.name: {
|
||||||
provider.__name__: model for provider in providers
|
provider.__name__: model.get_long_name() for provider in providers
|
||||||
if provider.working
|
if provider.working
|
||||||
} for model, (_, providers) in models.__models__.items()
|
} for _, (model, providers) in models.__models__.items()
|
||||||
})
|
})
|
||||||
|
|
||||||
# Process special providers
|
# Process special providers
|
||||||
|
|
@ -149,9 +149,10 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
||||||
cls.model_map[model].update({provider.__name__: model})
|
cls.model_map[model].update({provider.__name__: model})
|
||||||
else:
|
else:
|
||||||
for model in provider.get_models():
|
for model in provider.get_models():
|
||||||
if model not in cls.model_map:
|
cleaned = clean_name(model)
|
||||||
cls.model_map[model] = {}
|
if cleaned not in cls.model_map:
|
||||||
cls.model_map[model].update({provider.__name__: model})
|
cls.model_map[cleaned] = {}
|
||||||
|
cls.model_map[cleaned].update({provider.__name__: model})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
debug.error(f"Error getting models for provider {provider.__name__}:", e)
|
debug.error(f"Error getting models for provider {provider.__name__}:", e)
|
||||||
continue
|
continue
|
||||||
|
|
@ -164,28 +165,6 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
||||||
if hasattr(provider, 'video_models'):
|
if hasattr(provider, 'video_models'):
|
||||||
cls.video_models.extend(provider.video_models)
|
cls.video_models.extend(provider.video_models)
|
||||||
|
|
||||||
# Clean model names function
|
|
||||||
def clean_name(name: str) -> str:
|
|
||||||
name = name.split("/")[-1].split(":")[0].lower()
|
|
||||||
# Date patterns
|
|
||||||
name = re.sub(r'-\d{4}-\d{2}-\d{2}', '', name)
|
|
||||||
# name = re.sub(r'-\d{3,8}', '', name)
|
|
||||||
name = re.sub(r'-\d{2}-\d{2}', '', name)
|
|
||||||
name = re.sub(r'-[0-9a-f]{8}$', '', name)
|
|
||||||
# Version patterns
|
|
||||||
name = re.sub(r'-(instruct|chat|preview|experimental|v\d+|fp8|bf16|hf|free|tput)$', '', name)
|
|
||||||
# Other replacements
|
|
||||||
name = name.replace("_", ".")
|
|
||||||
name = name.replace("c4ai-", "")
|
|
||||||
name = name.replace("meta-llama-", "llama-")
|
|
||||||
name = name.replace("llama3", "llama-3")
|
|
||||||
name = name.replace("flux.1-", "flux-")
|
|
||||||
name = name.replace("qwen1-", "qwen-1")
|
|
||||||
name = name.replace("qwen2-", "qwen-2")
|
|
||||||
name = name.replace("qwen3-", "qwen-3")
|
|
||||||
name = name.replace("stable-diffusion-3.5-large", "sd-3.5-large")
|
|
||||||
return name
|
|
||||||
|
|
||||||
for provider in PROVIERS_LIST_3:
|
for provider in PROVIERS_LIST_3:
|
||||||
if not provider.working:
|
if not provider.working:
|
||||||
continue
|
continue
|
||||||
|
|
@ -421,6 +400,28 @@ class AnyProvider(AsyncGeneratorProvider, AnyModelProviderMixin):
|
||||||
):
|
):
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
|
# Clean model names function
|
||||||
|
def clean_name(name: str) -> str:
|
||||||
|
name = name.split("/")[-1].split(":")[0].lower()
|
||||||
|
# Date patterns
|
||||||
|
name = re.sub(r'-\d{4}-\d{2}-\d{2}', '', name)
|
||||||
|
# name = re.sub(r'-\d{3,8}', '', name)
|
||||||
|
name = re.sub(r'-\d{2}-\d{2}', '', name)
|
||||||
|
name = re.sub(r'-[0-9a-f]{8}$', '', name)
|
||||||
|
# Version patterns
|
||||||
|
name = re.sub(r'-(instruct|chat|preview|experimental|v\d+|fp8|bf16|hf|free|tput)$', '', name)
|
||||||
|
# Other replacements
|
||||||
|
name = name.replace("_", ".")
|
||||||
|
name = name.replace("c4ai-", "")
|
||||||
|
name = name.replace("meta-llama-", "llama-")
|
||||||
|
name = name.replace("llama3", "llama-3")
|
||||||
|
name = name.replace("flux.1-", "flux-")
|
||||||
|
name = name.replace("qwen1-", "qwen-1")
|
||||||
|
name = name.replace("qwen2-", "qwen-2")
|
||||||
|
name = name.replace("qwen3-", "qwen-3")
|
||||||
|
name = name.replace("stable-diffusion-3.5-large", "sd-3.5-large")
|
||||||
|
return name
|
||||||
|
|
||||||
setattr(Provider, "AnyProvider", AnyProvider)
|
setattr(Provider, "AnyProvider", AnyProvider)
|
||||||
Provider.__map__["AnyProvider"] = AnyProvider
|
Provider.__map__["AnyProvider"] = AnyProvider
|
||||||
Provider.__providers__.append(AnyProvider)
|
Provider.__providers__.append(AnyProvider)
|
||||||
|
|
|
||||||
|
|
@ -366,7 +366,7 @@ class ImageResponse(MediaResponse):
|
||||||
if self.get("width") and self.get("height"):
|
if self.get("width") and self.get("height"):
|
||||||
return "\n".join([
|
return "\n".join([
|
||||||
f'<a href="{html.escape(url)}" data-width="{self.get("width")}" data-height="{self.get("height")}" data-source="{html.escape(self.get("source_url", ""))}">'
|
f'<a href="{html.escape(url)}" data-width="{self.get("width")}" data-height="{self.get("height")}" data-source="{html.escape(self.get("source_url", ""))}">'
|
||||||
+ f'<img src="{url.replace("/media/", "/thumbnail/")}" alt="{html.escape(self.alt)}"></a>'
|
+ f'<img src="{url.replace("/media/", "/thumbnail/")}" alt="{html.escape(" ".join(self.alt.split()))}"></a>'
|
||||||
for url in self.get_list()
|
for url in self.get_list()
|
||||||
])
|
])
|
||||||
return format_images_markdown(self.urls, self.alt, self.get("preview"))
|
return format_images_markdown(self.urls, self.alt, self.get("preview"))
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue