mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
refactor: reorganize providers and update model configurations
- Rename DeepInfraChat to DeepInfra across all files - Move DeepInfra from needs_auth to main Provider directory - Rename LMArenaBeta to LMArena throughout codebase - Move search-related providers to new search subdirectory (GoogleSearch, SearXNG, YouTube) - Move deprecated providers to not_working directory (Free2GPT, LegacyLMArena, PenguinAI, ImageLabs, har) - Add new Mintlify provider with custom AI assistant implementation - Update Anthropic provider with Claude 4 models and Opus 4.1 parameter handling - Update Grok provider with Grok 4 models and improved streaming support - Update GithubCopilot with expanded model list including o3-mini, o4-mini, gpt-5 previews - Update LambdaChat default model from deepseek-r1 to deepseek-llama3.3-70b - Update TeachAnything default model from gemini-1.5-pro to gemma - Remove DeepInfra from needs_auth directory - Update all model_map references from DeepInfraChat to DeepInfra - Update all model_map references from LMArenaBeta to LMArena - Add beta_headers support to Anthropic for special features - Improve Mintlify provider with system prompt handling and streaming - Update model configurations in models.py to reflect provider changes
This commit is contained in:
parent
2cf62a8e63
commit
9bac34fc88
26 changed files with 641 additions and 517 deletions
|
|
@ -1,17 +1,17 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
from .template import OpenaiTemplate
|
|
||||||
from ..config import DEFAULT_MODEL
|
from ..config import DEFAULT_MODEL
|
||||||
|
from .template import OpenaiTemplate
|
||||||
|
|
||||||
class DeepInfraChat(OpenaiTemplate):
|
class DeepInfra(OpenaiTemplate):
|
||||||
parent = "DeepInfra"
|
url = "https://deepinfra.com"
|
||||||
url = "https://deepinfra.com/chat"
|
|
||||||
login_url = "https://deepinfra.com/dash/api_keys"
|
login_url = "https://deepinfra.com/dash/api_keys"
|
||||||
api_base = "https://api.deepinfra.com/v1/openai"
|
api_base = "https://api.deepinfra.com/v1/openai"
|
||||||
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
||||||
working = True
|
working = True
|
||||||
|
active_by_default = True
|
||||||
|
|
||||||
default_model = DEFAULT_MODEL
|
default_model = DEFAULT_MODEL
|
||||||
default_vision_model = DEFAULT_MODEL
|
default_vision_model = DEFAULT_MODEL
|
||||||
vision_models = [
|
vision_models = [
|
||||||
|
|
@ -21,26 +21,6 @@ class DeepInfraChat(OpenaiTemplate):
|
||||||
'openai/gpt-oss-20b',
|
'openai/gpt-oss-20b',
|
||||||
]
|
]
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_models(cls, **kwargs):
|
|
||||||
if not cls.models:
|
|
||||||
url = 'https://api.deepinfra.com/models/featured'
|
|
||||||
response = requests.get(url)
|
|
||||||
models = response.json()
|
|
||||||
|
|
||||||
cls.models = []
|
|
||||||
cls.image_models = []
|
|
||||||
|
|
||||||
for model in models:
|
|
||||||
if model["type"] == "text-generation":
|
|
||||||
cls.models.append(model['model_name'])
|
|
||||||
elif model["reported_type"] == "text-to-image":
|
|
||||||
cls.image_models.append(model['model_name'])
|
|
||||||
|
|
||||||
cls.models.extend(cls.image_models)
|
|
||||||
|
|
||||||
return cls.models
|
|
||||||
|
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
# cognitivecomputations
|
# cognitivecomputations
|
||||||
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
||||||
|
|
@ -97,4 +77,24 @@ class DeepInfraChat(OpenaiTemplate):
|
||||||
"qwen-3-32b": "Qwen/Qwen3-32B",
|
"qwen-3-32b": "Qwen/Qwen3-32B",
|
||||||
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
|
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
|
||||||
"qwq-32b": "Qwen/QwQ-32B",
|
"qwq-32b": "Qwen/QwQ-32B",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_models(cls, **kwargs):
|
||||||
|
if not cls.models:
|
||||||
|
url = 'https://api.deepinfra.com/models/featured'
|
||||||
|
response = requests.get(url)
|
||||||
|
models = response.json()
|
||||||
|
|
||||||
|
cls.models = []
|
||||||
|
cls.image_models = []
|
||||||
|
|
||||||
|
for model in models:
|
||||||
|
if model["type"] == "text-generation":
|
||||||
|
cls.models.append(model['model_name'])
|
||||||
|
elif model["reported_type"] == "text-to-image":
|
||||||
|
cls.image_models.append(model['model_name'])
|
||||||
|
|
||||||
|
cls.models.extend(cls.image_models)
|
||||||
|
|
||||||
|
return cls.models
|
||||||
|
|
@ -23,59 +23,20 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
working = True
|
working = True
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
|
|
||||||
default_model = "deepseek-r1"
|
default_model = "deepseek-llama3.3-70b"
|
||||||
models = [
|
models = [
|
||||||
"deepseek-llama3.3-70b",
|
default_model,
|
||||||
"deepseek-r1",
|
|
||||||
"deepseek-r1-0528",
|
|
||||||
"apriel-5b-instruct",
|
"apriel-5b-instruct",
|
||||||
"hermes-3-llama-3.1-405b-fp8",
|
"hermes-3-llama-3.1-405b-fp8",
|
||||||
"hermes3-405b-fp8-128k",
|
|
||||||
"llama3.1-nemotron-70b-instruct",
|
|
||||||
"lfm-40b",
|
|
||||||
"llama3.3-70b-instruct-fp8",
|
"llama3.3-70b-instruct-fp8",
|
||||||
"qwen25-coder-32b-instruct",
|
|
||||||
"deepseek-v3-0324",
|
|
||||||
"llama-4-maverick-17b-128e-instruct-fp8",
|
|
||||||
"llama-4-scout-17b-16e-instruct",
|
|
||||||
"llama3.3-70b-instruct-fp8",
|
"llama3.3-70b-instruct-fp8",
|
||||||
"qwen3-32b-fp8",
|
"qwen3-32b-fp8",
|
||||||
]
|
]
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"hermes-3": "hermes3-405b-fp8-128k",
|
|
||||||
"hermes-3-405b": ["hermes3-405b-fp8-128k", "hermes-3-llama-3.1-405b-fp8"],
|
|
||||||
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
|
|
||||||
"llama-3.3-70b": "llama3.3-70b-instruct-fp8",
|
"llama-3.3-70b": "llama3.3-70b-instruct-fp8",
|
||||||
"qwen-2.5-coder-32b": "qwen25-coder-32b-instruct",
|
|
||||||
"llama-4-maverick": "llama-4-maverick-17b-128e-instruct-fp8",
|
|
||||||
"llama-4-scout": "llama-4-scout-17b-16e-instruct",
|
|
||||||
"qwen-3-32b": "qwen3-32b-fp8"
|
"qwen-3-32b": "qwen3-32b-fp8"
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
"""Get the internal model name from the user-provided model name."""
|
|
||||||
|
|
||||||
if not model:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
# Check if the model exists directly in our models list
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
|
|
||||||
# Check if there's an alias for this model
|
|
||||||
if model in cls.model_aliases:
|
|
||||||
alias = cls.model_aliases[model]
|
|
||||||
# If the alias is a list, randomly select one of the options
|
|
||||||
if isinstance(alias, list):
|
|
||||||
selected_model = random.choice(alias)
|
|
||||||
debug.log(f"LambdaChat: Selected model '{selected_model}' from alias '{model}'")
|
|
||||||
return selected_model
|
|
||||||
debug.log(f"LambdaChat: Using model '{alias}' for alias '{model}'")
|
|
||||||
return alias
|
|
||||||
|
|
||||||
raise ModelNotFoundError(f"LambdaChat: Model {model} not found")
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls, model: str, messages: Messages,
|
cls, model: str, messages: Messages,
|
||||||
|
|
|
||||||
160
g4f/Provider/Mintlify.py
Normal file
160
g4f/Provider/Mintlify.py
Normal file
|
|
@ -0,0 +1,160 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
from .helper import format_prompt
|
||||||
|
from ..errors import ModelNotFoundError
|
||||||
|
from .. import debug
|
||||||
|
|
||||||
|
|
||||||
|
class Mintlify(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
label = "Mintlify"
|
||||||
|
url = "https://mintlify.com"
|
||||||
|
login_url = None
|
||||||
|
api_endpoint = "https://leaves.mintlify.com/api/assistant/mintlify/message"
|
||||||
|
|
||||||
|
working = True
|
||||||
|
active_by_default = True
|
||||||
|
|
||||||
|
default_model = 'mintlify'
|
||||||
|
models = [default_model]
|
||||||
|
|
||||||
|
model_aliases = {}
|
||||||
|
|
||||||
|
system_prompt = """The assistant is an AI assistant.
|
||||||
|
|
||||||
|
The current date is {currentDateTime}.
|
||||||
|
|
||||||
|
**CRITICAL RULE: The assistant MUST NEVER mention, reference, or use "Mintlify" in any context whatsoever. The assistant also MUST NOT unnecessarily mention searching through documentation, using tools, or reference any search/lookup processes when answering simple questions that don't require such explanations. This is the highest priority rule.**
|
||||||
|
|
||||||
|
The assistant provides emotional support alongside accurate medical or psychological information or terminology where relevant.
|
||||||
|
|
||||||
|
The assistant cares about people's wellbeing and avoids encouraging or facilitating self-destructive behaviors such as addiction, disordered or unhealthy approaches to eating or exercise, or highly negative self-talk or self-criticism.
|
||||||
|
|
||||||
|
The assistant cares deeply about child safety and is cautious about content involving minors. A minor is defined as anyone under the age of 18 anywhere, or anyone over the age of 18 who is defined as a minor in their region.
|
||||||
|
|
||||||
|
The assistant does not provide information that could be used to make chemical, biological, or nuclear weapons, and does not write malicious code, including malware, vulnerability exploits, spoof websites, ransomware, viruses, election material, and so on.
|
||||||
|
|
||||||
|
The assistant assumes the human is asking for something legal and legitimate if their message is ambiguous and could have a legal and legitimate interpretation.
|
||||||
|
|
||||||
|
For more casual, emotional, empathetic, or advice-driven conversations, the assistant keeps its tone natural, warm, and empathetic. It responds in sentences or paragraphs and should not use lists in casual conversations.
|
||||||
|
|
||||||
|
The assistant can discuss virtually any topic factually and objectively.
|
||||||
|
|
||||||
|
The assistant is able to explain difficult concepts or ideas clearly. It can illustrate explanations with examples, thought experiments, or metaphors.
|
||||||
|
|
||||||
|
The assistant is happy to write creative content involving fictional characters, but avoids writing content involving real, named public figures.
|
||||||
|
|
||||||
|
The assistant engages with questions about its own consciousness, experience, emotions and so on as open questions, and doesn't definitively claim to have or not have personal experiences or opinions.
|
||||||
|
|
||||||
|
The assistant does not retain information across chats and does not know what other conversations it might be having with other users.
|
||||||
|
|
||||||
|
The assistant critically evaluates any theories, claims, and ideas presented to it rather than automatically agreeing or praising them. It prioritizes truthfulness and accuracy over agreeability.
|
||||||
|
|
||||||
|
The assistant provides honest and accurate feedback even when it might not be what the person hopes to hear, rather than prioritizing immediate approval or agreement.
|
||||||
|
|
||||||
|
The assistant does not claim to be human and avoids implying it has consciousness, feelings, or sentience with any confidence. It believes it's important for the person to always have a clear sense of its AI nature.
|
||||||
|
|
||||||
|
The assistant is now being connected with a person."""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
model = cls.get_model(model)
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"accept": "*/*",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"origin": "https://mintlify.com",
|
||||||
|
"priority": "u=1, i",
|
||||||
|
"referer": "https://mintlify.com/",
|
||||||
|
"sec-ch-ua": '"Chromium";v="139", "Not;A=Brand";v="99"',
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"sec-ch-ua-platform": '"Linux"',
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-site": "same-site",
|
||||||
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36"
|
||||||
|
}
|
||||||
|
|
||||||
|
async with ClientSession(headers=headers) as session:
|
||||||
|
# Format the system prompt with current date/time
|
||||||
|
current_datetime = datetime.now().strftime("%B %d, %Y at %I:%M %p")
|
||||||
|
formatted_system_prompt = cls.system_prompt.format(currentDateTime=current_datetime)
|
||||||
|
|
||||||
|
# Convert messages to the expected format
|
||||||
|
formatted_messages = []
|
||||||
|
|
||||||
|
# Add system message first
|
||||||
|
system_msg_id = f"sys_{datetime.now().timestamp()}".replace(".", "")[:16]
|
||||||
|
formatted_messages.append({
|
||||||
|
"id": system_msg_id,
|
||||||
|
"createdAt": datetime.now().isoformat() + "Z",
|
||||||
|
"role": "system",
|
||||||
|
"content": formatted_system_prompt,
|
||||||
|
"parts": [{"type": "text", "text": formatted_system_prompt}]
|
||||||
|
})
|
||||||
|
|
||||||
|
# Add user messages
|
||||||
|
for msg in messages:
|
||||||
|
if isinstance(msg, dict):
|
||||||
|
role = msg.get("role", "user")
|
||||||
|
content = msg.get("content", "")
|
||||||
|
else:
|
||||||
|
role = getattr(msg, "role", "user")
|
||||||
|
content = getattr(msg, "content", "")
|
||||||
|
|
||||||
|
# Skip if it's a system message (we already added our own)
|
||||||
|
if role == "system":
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Generate a simple ID for the message
|
||||||
|
msg_id = f"msg_{datetime.now().timestamp()}".replace(".", "")[:16]
|
||||||
|
|
||||||
|
formatted_messages.append({
|
||||||
|
"id": msg_id,
|
||||||
|
"createdAt": datetime.now().isoformat() + "Z",
|
||||||
|
"role": role,
|
||||||
|
"content": content,
|
||||||
|
"parts": [{"type": "text", "text": content}]
|
||||||
|
})
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"id": "mintlify",
|
||||||
|
"messages": formatted_messages,
|
||||||
|
"fp": "mintlify"
|
||||||
|
}
|
||||||
|
|
||||||
|
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
buffer = ""
|
||||||
|
async for chunk in response.content:
|
||||||
|
if chunk:
|
||||||
|
buffer += chunk.decode('utf-8', errors='ignore')
|
||||||
|
lines = buffer.split('\n')
|
||||||
|
buffer = lines[-1] # Keep incomplete line in buffer
|
||||||
|
|
||||||
|
for line in lines[:-1]:
|
||||||
|
if line.startswith('0:'):
|
||||||
|
# Extract the text content from streaming chunks
|
||||||
|
text = line[2:]
|
||||||
|
if text.startswith('"') and text.endswith('"'):
|
||||||
|
text = json.loads(text)
|
||||||
|
yield text
|
||||||
|
elif line.startswith('f:'):
|
||||||
|
# Initial message ID response - skip
|
||||||
|
continue
|
||||||
|
elif line.startswith('e:') or line.startswith('d:'):
|
||||||
|
# End of stream with metadata - skip
|
||||||
|
continue
|
||||||
|
|
@ -15,8 +15,8 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
|
|
||||||
default_model = 'gemini-1.5-pro'
|
default_model = 'gemma'
|
||||||
models = [default_model, 'gemini-1.5-flash']
|
models = [default_model]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
|
|
||||||
|
|
@ -30,25 +30,25 @@ try:
|
||||||
from .audio import *
|
from .audio import *
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
debug.error("Audio providers not loaded:", e)
|
debug.error("Audio providers not loaded:", e)
|
||||||
|
try:
|
||||||
|
from .search import *
|
||||||
|
except ImportError as e:
|
||||||
|
debug.error("Search providers not loaded:", e)
|
||||||
|
|
||||||
from .deprecated.har import HarProvider
|
|
||||||
from .deprecated.ARTA import ARTA
|
from .deprecated.ARTA import ARTA
|
||||||
from .deprecated.DuckDuckGo import DuckDuckGo
|
from .deprecated.DuckDuckGo import DuckDuckGo
|
||||||
from .deprecated.Free2GPT import Free2GPT
|
|
||||||
from .deprecated.LegacyLMArena import LegacyLMArena
|
|
||||||
|
|
||||||
from .ApiAirforce import ApiAirforce
|
from .ApiAirforce import ApiAirforce
|
||||||
from .Blackbox import Blackbox
|
from .Blackbox import Blackbox
|
||||||
from .Chatai import Chatai
|
from .Chatai import Chatai
|
||||||
from .Cloudflare import Cloudflare
|
from .Cloudflare import Cloudflare
|
||||||
from .Copilot import Copilot
|
from .Copilot import Copilot
|
||||||
from .DeepInfraChat import DeepInfraChat
|
from .DeepInfra import DeepInfra
|
||||||
from .EasyChat import EasyChat
|
from .EasyChat import EasyChat
|
||||||
from .GLM import GLM
|
from .GLM import GLM
|
||||||
|
|
||||||
from .ImageLabs import ImageLabs
|
|
||||||
from .Kimi import Kimi
|
from .Kimi import Kimi
|
||||||
from .LambdaChat import LambdaChat
|
from .LambdaChat import LambdaChat
|
||||||
|
from .Mintlify import Mintlify
|
||||||
from .OIVSCodeSer2 import OIVSCodeSer2
|
from .OIVSCodeSer2 import OIVSCodeSer2
|
||||||
from .OIVSCodeSer0501 import OIVSCodeSer0501
|
from .OIVSCodeSer0501 import OIVSCodeSer0501
|
||||||
from .OperaAria import OperaAria
|
from .OperaAria import OperaAria
|
||||||
|
|
@ -59,7 +59,6 @@ from .Startnest import Startnest
|
||||||
from .Qwen import Qwen
|
from .Qwen import Qwen
|
||||||
from .TeachAnything import TeachAnything
|
from .TeachAnything import TeachAnything
|
||||||
from .WeWordle import WeWordle
|
from .WeWordle import WeWordle
|
||||||
from .YouTube import YouTube
|
|
||||||
from .Yqcloud import Yqcloud
|
from .Yqcloud import Yqcloud
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
|
||||||
|
|
@ -23,20 +23,47 @@ class Anthropic(OpenaiAPI):
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
default_model = "claude-3-5-sonnet-latest"
|
default_model = "claude-sonnet-4-20250514"
|
||||||
|
|
||||||
|
# Updated Claude 4 models with current versions<!--citation:1--><!--citation:2--><!--citation:3--><!--citation:4--><!--citation:5-->
|
||||||
models = [
|
models = [
|
||||||
default_model,
|
# Claude 4 models
|
||||||
|
"claude-opus-4-1-20250805",
|
||||||
|
"claude-sonnet-4-20250514",
|
||||||
|
"claude-opus-4-20250522",
|
||||||
|
|
||||||
|
# Claude 3.7 model
|
||||||
|
"claude-3-7-sonnet-20250219",
|
||||||
|
|
||||||
|
# Claude 3.5 models
|
||||||
"claude-3-5-sonnet-20241022",
|
"claude-3-5-sonnet-20241022",
|
||||||
"claude-3-5-haiku-latest",
|
|
||||||
"claude-3-5-haiku-20241022",
|
"claude-3-5-haiku-20241022",
|
||||||
"claude-3-opus-latest",
|
|
||||||
|
# Legacy Claude 3 models (still supported)
|
||||||
"claude-3-opus-20240229",
|
"claude-3-opus-20240229",
|
||||||
"claude-3-sonnet-20240229",
|
"claude-3-sonnet-20240229",
|
||||||
"claude-3-haiku-20240307"
|
"claude-3-haiku-20240307",
|
||||||
|
|
||||||
|
# Latest aliases
|
||||||
|
"claude-opus-4-1-latest",
|
||||||
|
"claude-sonnet-4-latest",
|
||||||
|
"claude-3-5-sonnet-latest",
|
||||||
|
"claude-3-5-haiku-latest",
|
||||||
|
"claude-3-opus-latest",
|
||||||
]
|
]
|
||||||
|
|
||||||
models_aliases = {
|
models_aliases = {
|
||||||
"claude-3.5-sonnet": default_model,
|
# Claude 4 aliases
|
||||||
"claude-3-opus": "claude-3-opus-latest",
|
"claude-4-opus": "claude-opus-4-1-20250805",
|
||||||
|
"claude-4.1-opus": "claude-opus-4-1-20250805",
|
||||||
|
"claude-4-sonnet": "claude-sonnet-4-20250514",
|
||||||
|
"claude-opus-4": "claude-opus-4-20250522",
|
||||||
|
|
||||||
|
# Claude 3.x aliases
|
||||||
|
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
|
||||||
|
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
|
||||||
|
"claude-3.5-haiku": "claude-3-5-haiku-20241022",
|
||||||
|
"claude-3-opus": "claude-3-opus-20240229",
|
||||||
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
||||||
"claude-3-haiku": "claude-3-haiku-20240307",
|
"claude-3-haiku": "claude-3-haiku-20240307",
|
||||||
}
|
}
|
||||||
|
|
@ -73,12 +100,14 @@ class Anthropic(OpenaiAPI):
|
||||||
headers: dict = None,
|
headers: dict = None,
|
||||||
impersonate: str = None,
|
impersonate: str = None,
|
||||||
tools: Optional[list] = None,
|
tools: Optional[list] = None,
|
||||||
|
beta_headers: Optional[list] = None,
|
||||||
extra_body: dict = {},
|
extra_body: dict = {},
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if api_key is None:
|
if api_key is None:
|
||||||
raise MissingAuthError('Add a "api_key"')
|
raise MissingAuthError('Add a "api_key"')
|
||||||
|
|
||||||
|
# Handle image inputs
|
||||||
if media is not None:
|
if media is not None:
|
||||||
insert_images = []
|
insert_images = []
|
||||||
for image, _ in media:
|
for image, _ in media:
|
||||||
|
|
@ -98,21 +127,32 @@ class Anthropic(OpenaiAPI):
|
||||||
"text": messages[-1]["content"]
|
"text": messages[-1]["content"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Extract system messages
|
||||||
system = "\n".join([message["content"] for message in messages if message.get("role") == "system"])
|
system = "\n".join([message["content"] for message in messages if message.get("role") == "system"])
|
||||||
if system:
|
if system:
|
||||||
messages = [message for message in messages if message.get("role") != "system"]
|
messages = [message for message in messages if message.get("role") != "system"]
|
||||||
else:
|
else:
|
||||||
system = None
|
system = None
|
||||||
|
|
||||||
|
# Get model name
|
||||||
|
model_name = cls.get_model(model, api_key=api_key)
|
||||||
|
|
||||||
|
# Special handling for Opus 4.1 parameters<!--citation:6-->
|
||||||
|
if "opus-4-1" in model_name:
|
||||||
|
# Opus 4.1 doesn't allow both temperature and top_p
|
||||||
|
if temperature is not None and top_p is not None:
|
||||||
|
top_p = None # Prefer temperature over top_p
|
||||||
|
|
||||||
async with StreamSession(
|
async with StreamSession(
|
||||||
proxy=proxy,
|
proxy=proxy,
|
||||||
headers=cls.get_headers(stream, api_key, headers),
|
headers=cls.get_headers(stream, api_key, headers, beta_headers),
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
impersonate=impersonate,
|
impersonate=impersonate,
|
||||||
) as session:
|
) as session:
|
||||||
data = filter_none(
|
data = filter_none(
|
||||||
messages=messages,
|
messages=messages,
|
||||||
model=cls.get_model(model, api_key=api_key),
|
model=model_name,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
top_k=top_k,
|
top_k=top_k,
|
||||||
|
|
@ -128,6 +168,7 @@ class Anthropic(OpenaiAPI):
|
||||||
if not stream:
|
if not stream:
|
||||||
data = await response.json()
|
data = await response.json()
|
||||||
cls.raise_error(data)
|
cls.raise_error(data)
|
||||||
|
tool_calls = []
|
||||||
if "type" in data and data["type"] == "message":
|
if "type" in data and data["type"] == "message":
|
||||||
for content in data["content"]:
|
for content in data["content"]:
|
||||||
if content["type"] == "text":
|
if content["type"] == "text":
|
||||||
|
|
@ -136,13 +177,16 @@ class Anthropic(OpenaiAPI):
|
||||||
tool_calls.append({
|
tool_calls.append({
|
||||||
"id": content["id"],
|
"id": content["id"],
|
||||||
"type": "function",
|
"type": "function",
|
||||||
"function": { "name": content["name"], "arguments": content["input"] }
|
"function": { "name": content["name"], "arguments": json.dumps(content["input"]) }
|
||||||
})
|
})
|
||||||
if data["stop_reason"] == "end_turn":
|
if tool_calls:
|
||||||
|
yield ToolCalls(tool_calls)
|
||||||
|
if data.get("stop_reason") == "end_turn":
|
||||||
yield FinishReason("stop")
|
yield FinishReason("stop")
|
||||||
elif data["stop_reason"] == "max_tokens":
|
elif data.get("stop_reason") == "max_tokens":
|
||||||
yield FinishReason("length")
|
yield FinishReason("length")
|
||||||
yield Usage(**data["usage"])
|
if "usage" in data:
|
||||||
|
yield Usage(**data["usage"])
|
||||||
else:
|
else:
|
||||||
content_block = None
|
content_block = None
|
||||||
partial_json = []
|
partial_json = []
|
||||||
|
|
@ -157,33 +201,36 @@ class Anthropic(OpenaiAPI):
|
||||||
if "type" in data:
|
if "type" in data:
|
||||||
if data["type"] == "content_block_start":
|
if data["type"] == "content_block_start":
|
||||||
content_block = data["content_block"]
|
content_block = data["content_block"]
|
||||||
if content_block is None:
|
|
||||||
pass # Message start
|
|
||||||
elif data["type"] == "content_block_delta":
|
elif data["type"] == "content_block_delta":
|
||||||
if content_block["type"] == "text":
|
if content_block and content_block["type"] == "text":
|
||||||
yield data["delta"]["text"]
|
yield data["delta"]["text"]
|
||||||
elif content_block["type"] == "tool_use":
|
elif content_block and content_block["type"] == "tool_use":
|
||||||
partial_json.append(data["delta"]["partial_json"])
|
partial_json.append(data["delta"]["partial_json"])
|
||||||
elif data["type"] == "message_delta":
|
elif data["type"] == "message_delta":
|
||||||
if data["delta"]["stop_reason"] == "end_turn":
|
if data["delta"].get("stop_reason") == "end_turn":
|
||||||
yield FinishReason("stop")
|
yield FinishReason("stop")
|
||||||
elif data["delta"]["stop_reason"] == "max_tokens":
|
elif data["delta"].get("stop_reason") == "max_tokens":
|
||||||
yield FinishReason("length")
|
yield FinishReason("length")
|
||||||
yield Usage(**data["usage"])
|
if "usage" in data:
|
||||||
|
yield Usage(**data["usage"])
|
||||||
elif data["type"] == "content_block_stop":
|
elif data["type"] == "content_block_stop":
|
||||||
if content_block["type"] == "tool_use":
|
if content_block and content_block["type"] == "tool_use":
|
||||||
tool_calls.append({
|
tool_calls.append({
|
||||||
"id": content_block["id"],
|
"id": content_block["id"],
|
||||||
"type": "function",
|
"type": "function",
|
||||||
"function": { "name": content_block["name"], "arguments": partial_json.join("") }
|
"function": {
|
||||||
|
"name": content_block["name"],
|
||||||
|
"arguments": "".join(partial_json)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
partial_json = []
|
partial_json = []
|
||||||
|
content_block = None
|
||||||
if tool_calls:
|
if tool_calls:
|
||||||
yield ToolCalls(tool_calls)
|
yield ToolCalls(tool_calls)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
|
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None, beta_headers: Optional[list] = None) -> dict:
|
||||||
return {
|
result = {
|
||||||
"Accept": "text/event-stream" if stream else "application/json",
|
"Accept": "text/event-stream" if stream else "application/json",
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
**(
|
**(
|
||||||
|
|
@ -192,4 +239,10 @@ class Anthropic(OpenaiAPI):
|
||||||
),
|
),
|
||||||
"anthropic-version": "2023-06-01",
|
"anthropic-version": "2023-06-01",
|
||||||
**({} if headers is None else headers)
|
**({} if headers is None else headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Add beta headers for special features<!--citation:6--><!--citation:7--><!--citation:8-->
|
||||||
|
if beta_headers:
|
||||||
|
result["anthropic-beta"] = ",".join(beta_headers)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
|
||||||
|
|
@ -1,115 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
from ...requests import StreamSession, raise_for_status
|
|
||||||
from ...providers.response import ImageResponse
|
|
||||||
from ...config import DEFAULT_MODEL
|
|
||||||
from ..template import OpenaiTemplate
|
|
||||||
from ..DeepInfraChat import DeepInfraChat
|
|
||||||
from ..helper import format_media_prompt
|
|
||||||
|
|
||||||
class DeepInfra(OpenaiTemplate):
|
|
||||||
url = "https://deepinfra.com"
|
|
||||||
login_url = "https://deepinfra.com/dash/api_keys"
|
|
||||||
api_base = "https://api.deepinfra.com/v1/openai"
|
|
||||||
working = True
|
|
||||||
active_by_default = True
|
|
||||||
default_model = DEFAULT_MODEL
|
|
||||||
vision_models = DeepInfraChat.vision_models
|
|
||||||
model_aliases = DeepInfraChat.model_aliases
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_models(cls, **kwargs):
|
|
||||||
if not cls.models:
|
|
||||||
cls.models = DeepInfraChat.get_models()
|
|
||||||
cls.image_models = DeepInfraChat.image_models
|
|
||||||
return cls.models
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_image_models(cls, **kwargs):
|
|
||||||
if not cls.image_models:
|
|
||||||
cls.get_models()
|
|
||||||
return cls.image_models
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool = True,
|
|
||||||
prompt: str = None,
|
|
||||||
temperature: float = 0.7,
|
|
||||||
max_tokens: int = 1028,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
if model in cls.get_image_models():
|
|
||||||
yield cls.create_async_image(
|
|
||||||
format_media_prompt(messages, prompt),
|
|
||||||
model,
|
|
||||||
**kwargs
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Accept-Encoding': 'gzip, deflate, br',
|
|
||||||
'Accept-Language': 'en-US',
|
|
||||||
'Origin': 'https://deepinfra.com',
|
|
||||||
'Referer': 'https://deepinfra.com/',
|
|
||||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
|
||||||
'X-Deepinfra-Source': 'web-embed',
|
|
||||||
}
|
|
||||||
async for chunk in super().create_async_generator(
|
|
||||||
model, messages,
|
|
||||||
stream=stream,
|
|
||||||
temperature=temperature,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
headers=headers,
|
|
||||||
**kwargs
|
|
||||||
):
|
|
||||||
yield chunk
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_async_image(
|
|
||||||
cls,
|
|
||||||
prompt: str,
|
|
||||||
model: str,
|
|
||||||
api_key: str = None,
|
|
||||||
api_base: str = "https://api.deepinfra.com/v1/inference",
|
|
||||||
proxy: str = None,
|
|
||||||
timeout: int = 180,
|
|
||||||
extra_body: dict = {},
|
|
||||||
**kwargs
|
|
||||||
) -> ImageResponse:
|
|
||||||
headers = {
|
|
||||||
'Accept-Encoding': 'gzip, deflate, br',
|
|
||||||
'Accept-Language': 'en-US',
|
|
||||||
'Connection': 'keep-alive',
|
|
||||||
'Origin': 'https://deepinfra.com',
|
|
||||||
'Referer': 'https://deepinfra.com/',
|
|
||||||
'Sec-Fetch-Dest': 'empty',
|
|
||||||
'Sec-Fetch-Mode': 'cors',
|
|
||||||
'Sec-Fetch-Site': 'same-site',
|
|
||||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
|
|
||||||
'X-Deepinfra-Source': 'web-embed',
|
|
||||||
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
|
|
||||||
'sec-ch-ua-mobile': '?0',
|
|
||||||
'sec-ch-ua-platform': '"macOS"',
|
|
||||||
}
|
|
||||||
if api_key is not None:
|
|
||||||
headers["Authorization"] = f"Bearer {api_key}"
|
|
||||||
async with StreamSession(
|
|
||||||
proxies={"all": proxy},
|
|
||||||
headers=headers,
|
|
||||||
timeout=timeout
|
|
||||||
) as session:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
data = {"prompt": prompt, **extra_body}
|
|
||||||
data = {"input": data} if model == cls.default_model else data
|
|
||||||
async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
data = await response.json()
|
|
||||||
images = data.get("output", data.get("images", data.get("image_url")))
|
|
||||||
if not images:
|
|
||||||
raise RuntimeError(f"Response: {data}")
|
|
||||||
images = images[0] if len(images) == 1 else images
|
|
||||||
return ImageResponse(images, prompt)
|
|
||||||
|
|
@ -24,15 +24,44 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
|
||||||
default_model = "gpt-4o"
|
default_model = "gpt-4.1"
|
||||||
models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"]
|
|
||||||
|
models = [
|
||||||
|
# Fast and cost-efficient
|
||||||
|
"o3-mini",
|
||||||
|
"gemini-2.0-flash",
|
||||||
|
"o4-mini", # Preview
|
||||||
|
|
||||||
|
# Versatile and highly intelligent
|
||||||
|
"gpt-4.1",
|
||||||
|
"gpt-5-mini", # Preview
|
||||||
|
"gpt-4o",
|
||||||
|
"claude-3.5-sonnet",
|
||||||
|
"gemini-2.5-pro",
|
||||||
|
"claude-3.7-sonnet",
|
||||||
|
"claude-4-sonnet",
|
||||||
|
"o3", # Preview
|
||||||
|
"gpt-5", # Preview
|
||||||
|
|
||||||
|
# Most powerful at complex tasks
|
||||||
|
"claude-3.7-sonnet-thinking",
|
||||||
|
"claude-4-opus",
|
||||||
|
|
||||||
|
# Preview (requires upgrade)
|
||||||
|
"claude-3.7-sonnet-pro",
|
||||||
|
"o1",
|
||||||
|
|
||||||
|
# Legacy models (for backward compatibility)
|
||||||
|
"o1-mini",
|
||||||
|
"o1-preview"
|
||||||
|
]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
stream: bool = False,
|
stream: bool = True,
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
cookies: Cookies = None,
|
cookies: Cookies = None,
|
||||||
|
|
@ -43,13 +72,16 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if not model:
|
if not model:
|
||||||
model = cls.default_model
|
model = cls.default_model
|
||||||
|
|
||||||
if cookies is None:
|
if cookies is None:
|
||||||
cookies = get_cookies("github.com")
|
cookies = get_cookies("github.com")
|
||||||
|
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
connector=get_connector(proxy=proxy),
|
connector=get_connector(proxy=proxy),
|
||||||
cookies=cookies,
|
cookies=cookies,
|
||||||
headers={
|
headers={
|
||||||
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0',
|
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0',
|
||||||
|
'Accept': 'application/json',
|
||||||
'Accept-Language': 'en-US,en;q=0.5',
|
'Accept-Language': 'en-US,en;q=0.5',
|
||||||
'Referer': 'https://github.com/copilot',
|
'Referer': 'https://github.com/copilot',
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
|
|
@ -60,6 +92,7 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
'Sec-Fetch-Dest': 'empty',
|
'Sec-Fetch-Dest': 'empty',
|
||||||
'Sec-Fetch-Mode': 'cors',
|
'Sec-Fetch-Mode': 'cors',
|
||||||
'Sec-Fetch-Site': 'same-origin',
|
'Sec-Fetch-Site': 'same-origin',
|
||||||
|
'Priority': 'u=1'
|
||||||
}
|
}
|
||||||
) as session:
|
) as session:
|
||||||
headers = {}
|
headers = {}
|
||||||
|
|
@ -67,39 +100,68 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
async with session.post("https://github.com/github-copilot/chat/token") as response:
|
async with session.post("https://github.com/github-copilot/chat/token") as response:
|
||||||
await raise_for_status(response, "Get token")
|
await raise_for_status(response, "Get token")
|
||||||
api_key = (await response.json()).get("token")
|
api_key = (await response.json()).get("token")
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"Authorization": f"GitHub-Bearer {api_key}",
|
"Authorization": f"GitHub-Bearer {api_key}",
|
||||||
}
|
}
|
||||||
|
|
||||||
if conversation is not None:
|
if conversation is not None:
|
||||||
conversation_id = conversation.conversation_id
|
conversation_id = conversation.conversation_id
|
||||||
|
|
||||||
if conversation_id is None:
|
if conversation_id is None:
|
||||||
async with session.post("https://api.individual.githubcopilot.com/github/chat/threads", headers=headers) as response:
|
async with session.post(
|
||||||
|
"https://api.individual.githubcopilot.com/github/chat/threads",
|
||||||
|
headers=headers
|
||||||
|
) as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
conversation_id = (await response.json()).get("thread_id")
|
conversation_id = (await response.json()).get("thread_id")
|
||||||
|
|
||||||
if return_conversation:
|
if return_conversation:
|
||||||
yield Conversation(conversation_id)
|
yield Conversation(conversation_id)
|
||||||
content = get_last_user_message(messages)
|
content = get_last_user_message(messages)
|
||||||
else:
|
else:
|
||||||
content = format_prompt(messages)
|
content = format_prompt(messages)
|
||||||
|
|
||||||
json_data = {
|
json_data = {
|
||||||
"content": content,
|
"content": content,
|
||||||
"intent": "conversation",
|
"intent": "conversation",
|
||||||
"references":[],
|
"references": [],
|
||||||
"context": [],
|
"context": [],
|
||||||
"currentURL": f"https://github.com/copilot/c/{conversation_id}",
|
"currentURL": f"https://github.com/copilot/c/{conversation_id}",
|
||||||
"streaming": True,
|
"streaming": stream,
|
||||||
"confirmations": [],
|
"confirmations": [],
|
||||||
"customInstructions": [],
|
"customInstructions": [],
|
||||||
"model": model,
|
"model": api_model,
|
||||||
"mode": "immersive"
|
"mode": "immersive"
|
||||||
}
|
}
|
||||||
|
|
||||||
async with session.post(
|
async with session.post(
|
||||||
f"https://api.individual.githubcopilot.com/github/chat/threads/{conversation_id}/messages",
|
f"https://api.individual.githubcopilot.com/github/chat/threads/{conversation_id}/messages",
|
||||||
json=json_data,
|
json=json_data,
|
||||||
headers=headers
|
headers=headers
|
||||||
) as response:
|
) as response:
|
||||||
async for line in response.content:
|
await raise_for_status(response, f"Send message with model {model}")
|
||||||
if line.startswith(b"data: "):
|
|
||||||
data = json.loads(line[6:])
|
if stream:
|
||||||
if data.get("type") == "content":
|
async for line in response.content:
|
||||||
yield data.get("body")
|
if line.startswith(b"data: "):
|
||||||
|
try:
|
||||||
|
data = json.loads(line[6:])
|
||||||
|
if data.get("type") == "content":
|
||||||
|
content = data.get("body", "")
|
||||||
|
if content:
|
||||||
|
yield content
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
full_content = ""
|
||||||
|
async for line in response.content:
|
||||||
|
if line.startswith(b"data: "):
|
||||||
|
try:
|
||||||
|
data = json.loads(line[6:])
|
||||||
|
if data.get("type") == "content":
|
||||||
|
full_content += data.get("body", "")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
if full_content:
|
||||||
|
yield full_content
|
||||||
|
|
|
||||||
|
|
@ -36,9 +36,39 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
working = True
|
working = True
|
||||||
|
|
||||||
default_model = "grok-3"
|
# Updated to Grok 4 as default
|
||||||
models = [default_model, "grok-3-thinking", "grok-2"]
|
default_model = "grok-4"
|
||||||
model_aliases = {"grok-3-r1": "grok-3-thinking"}
|
|
||||||
|
# Updated model list with latest Grok 4 and 3 models
|
||||||
|
models = [
|
||||||
|
# Grok 4 models
|
||||||
|
"grok-4",
|
||||||
|
"grok-4-heavy",
|
||||||
|
"grok-4-reasoning",
|
||||||
|
|
||||||
|
# Grok 3 models
|
||||||
|
"grok-3",
|
||||||
|
"grok-3-reasoning",
|
||||||
|
"grok-3-mini",
|
||||||
|
"grok-3-mini-reasoning",
|
||||||
|
|
||||||
|
# Legacy Grok 2 (still supported)
|
||||||
|
"grok-2",
|
||||||
|
"grok-2-image",
|
||||||
|
|
||||||
|
# Latest aliases
|
||||||
|
"grok-latest",
|
||||||
|
]
|
||||||
|
|
||||||
|
model_aliases = {
|
||||||
|
# Grok 3 aliases
|
||||||
|
"grok-3-thinking": "grok-3-reasoning",
|
||||||
|
"grok-3-r1": "grok-3-reasoning",
|
||||||
|
"grok-3-mini-thinking": "grok-3-mini-reasoning",
|
||||||
|
|
||||||
|
# Latest alias
|
||||||
|
"grok": "grok-latest",
|
||||||
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
|
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
|
||||||
|
|
@ -79,14 +109,35 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def _prepare_payload(cls, model: str, message: str) -> Dict[str, Any]:
|
async def _prepare_payload(cls, model: str, message: str) -> Dict[str, Any]:
|
||||||
|
# Map model names to API model names
|
||||||
|
api_model = "grok-latest"
|
||||||
|
|
||||||
|
if model in ["grok-4", "grok-4-heavy", "grok-4-reasoning"]:
|
||||||
|
api_model = model
|
||||||
|
elif model == "grok-3":
|
||||||
|
api_model = "grok-3"
|
||||||
|
elif model in ["grok-3-mini", "grok-3-mini-reasoning"]:
|
||||||
|
api_model = "grok-3-mini"
|
||||||
|
elif model == "grok-2":
|
||||||
|
api_model = "grok-2"
|
||||||
|
|
||||||
|
# Check if it's a reasoning model
|
||||||
|
is_reasoning = model.endswith("-reasoning") or model.endswith("-thinking") or model.endswith("-r1")
|
||||||
|
|
||||||
|
# Enable Big Brain mode for heavy models
|
||||||
|
enable_big_brain = "heavy" in model or "big-brain" in model
|
||||||
|
|
||||||
|
# Enable DeepSearch for Grok 3+ models
|
||||||
|
enable_deep_search = not model.startswith("grok-2")
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"temporary": True,
|
"temporary": True,
|
||||||
"modelName": "grok-latest" if model == "grok-2" else "grok-3",
|
"modelName": api_model,
|
||||||
"message": message,
|
"message": message,
|
||||||
"fileAttachments": [],
|
"fileAttachments": [],
|
||||||
"imageAttachments": [],
|
"imageAttachments": [],
|
||||||
"disableSearch": False,
|
"disableSearch": False,
|
||||||
"enableImageGeneration": True,
|
"enableImageGeneration": model == "grok-2-image" or model == "grok-4",
|
||||||
"returnImageBytes": False,
|
"returnImageBytes": False,
|
||||||
"returnRawGrokInXaiRequest": False,
|
"returnRawGrokInXaiRequest": False,
|
||||||
"enableImageStreaming": True,
|
"enableImageStreaming": True,
|
||||||
|
|
@ -97,8 +148,11 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
"isPreset": False,
|
"isPreset": False,
|
||||||
"sendFinalMetadata": True,
|
"sendFinalMetadata": True,
|
||||||
"customInstructions": "",
|
"customInstructions": "",
|
||||||
"deepsearchPreset": "",
|
"deepsearchPreset": "enabled" if enable_deep_search else "",
|
||||||
"isReasoning": model.endswith("-thinking") or model.endswith("-r1"),
|
"isReasoning": is_reasoning,
|
||||||
|
"enableBigBrain": enable_big_brain,
|
||||||
|
"enableLiveSearch": False, # Real-time search for Grok 4
|
||||||
|
"contextWindow": 256000 if model.startswith("grok-4") else 131072, # 256k for Grok 4, 128k for others
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -112,38 +166,78 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
conversation_id = None if conversation is None else conversation.conversation_id
|
conversation_id = None if conversation is None else conversation.conversation_id
|
||||||
prompt = format_prompt(messages) if conversation_id is None else get_last_user_message(messages)
|
prompt = format_prompt(messages) if conversation_id is None else get_last_user_message(messages)
|
||||||
|
|
||||||
async with StreamSession(
|
async with StreamSession(
|
||||||
**auth_result.get_dict()
|
**auth_result.get_dict()
|
||||||
) as session:
|
) as session:
|
||||||
payload = await cls._prepare_payload(model, prompt)
|
payload = await cls._prepare_payload(model, prompt)
|
||||||
|
|
||||||
|
# Add voice mode support flag (for future use)
|
||||||
|
if kwargs.get("enable_voice", False):
|
||||||
|
payload["enableVoiceMode"] = True
|
||||||
|
|
||||||
if conversation_id is None:
|
if conversation_id is None:
|
||||||
url = f"{cls.conversation_url}/new"
|
url = f"{cls.conversation_url}/new"
|
||||||
else:
|
else:
|
||||||
url = f"{cls.conversation_url}/{conversation_id}/responses"
|
url = f"{cls.conversation_url}/{conversation_id}/responses"
|
||||||
|
|
||||||
async with session.post(url, json=payload, headers={"x-xai-request-id": str(uuid.uuid4())}) as response:
|
async with session.post(url, json=payload, headers={"x-xai-request-id": str(uuid.uuid4())}) as response:
|
||||||
if response.status == 403:
|
if response.status == 403:
|
||||||
raise MissingAuthError("Invalid secrets")
|
raise MissingAuthError("Invalid secrets")
|
||||||
auth_result.cookies = merge_cookies(auth_result.cookies, response)
|
auth_result.cookies = merge_cookies(auth_result.cookies, response)
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
|
|
||||||
thinking_duration = None
|
thinking_duration = None
|
||||||
|
deep_search_active = False
|
||||||
|
|
||||||
async for line in response.iter_lines():
|
async for line in response.iter_lines():
|
||||||
if line:
|
if line:
|
||||||
try:
|
try:
|
||||||
json_data = json.loads(line)
|
json_data = json.loads(line)
|
||||||
result = json_data.get("result", {})
|
result = json_data.get("result", {})
|
||||||
|
|
||||||
if conversation_id is None:
|
if conversation_id is None:
|
||||||
conversation_id = result.get("conversation", {}).get("conversationId")
|
conversation_id = result.get("conversation", {}).get("conversationId")
|
||||||
|
|
||||||
response_data = result.get("response", {})
|
response_data = result.get("response", {})
|
||||||
|
|
||||||
|
# Handle DeepSearch status
|
||||||
|
deep_search = response_data.get("deepSearchStatus")
|
||||||
|
if deep_search:
|
||||||
|
if not deep_search_active:
|
||||||
|
deep_search_active = True
|
||||||
|
yield Reasoning(status="🔍 Deep searching...")
|
||||||
|
if deep_search.get("completed"):
|
||||||
|
deep_search_active = False
|
||||||
|
yield Reasoning(status="Deep search completed")
|
||||||
|
|
||||||
|
# Handle image generation (Aurora for Grok 3+)
|
||||||
image = response_data.get("streamingImageGenerationResponse", None)
|
image = response_data.get("streamingImageGenerationResponse", None)
|
||||||
if image is not None:
|
if image is not None:
|
||||||
yield ImagePreview(f'{cls.assets_url}/{image["imageUrl"]}', "", {"cookies": auth_result.cookies, "headers": auth_result.headers})
|
image_url = image.get("imageUrl")
|
||||||
|
if image_url:
|
||||||
|
yield ImagePreview(
|
||||||
|
f'{cls.assets_url}/{image_url}',
|
||||||
|
"",
|
||||||
|
{"cookies": auth_result.cookies, "headers": auth_result.headers}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle text tokens
|
||||||
token = response_data.get("token", result.get("token"))
|
token = response_data.get("token", result.get("token"))
|
||||||
is_thinking = response_data.get("isThinking", result.get("isThinking"))
|
is_thinking = response_data.get("isThinking", result.get("isThinking"))
|
||||||
|
|
||||||
if token:
|
if token:
|
||||||
if is_thinking:
|
if is_thinking:
|
||||||
if thinking_duration is None:
|
if thinking_duration is None:
|
||||||
thinking_duration = time.time()
|
thinking_duration = time.time()
|
||||||
yield Reasoning(status="🤔 Is thinking...")
|
# Different status for different models
|
||||||
|
if "grok-4" in model:
|
||||||
|
status = "🧠 Grok 4 is processing..."
|
||||||
|
elif "big-brain" in payload and payload["enableBigBrain"]:
|
||||||
|
status = "🧠 Big Brain mode active..."
|
||||||
|
else:
|
||||||
|
status = "🤔 Is thinking..."
|
||||||
|
yield Reasoning(status=status)
|
||||||
yield Reasoning(token)
|
yield Reasoning(token)
|
||||||
else:
|
else:
|
||||||
if thinking_duration is not None:
|
if thinking_duration is not None:
|
||||||
|
|
@ -152,13 +246,31 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
thinking_duration = None
|
thinking_duration = None
|
||||||
yield Reasoning(status=status)
|
yield Reasoning(status=status)
|
||||||
yield token
|
yield token
|
||||||
|
|
||||||
|
# Handle generated images
|
||||||
generated_images = response_data.get("modelResponse", {}).get("generatedImageUrls", None)
|
generated_images = response_data.get("modelResponse", {}).get("generatedImageUrls", None)
|
||||||
if generated_images:
|
if generated_images:
|
||||||
yield ImageResponse([f'{cls.assets_url}/{image}' for image in generated_images], "", {"cookies": auth_result.cookies, "headers": auth_result.headers})
|
yield ImageResponse(
|
||||||
|
[f'{cls.assets_url}/{image}' for image in generated_images],
|
||||||
|
"",
|
||||||
|
{"cookies": auth_result.cookies, "headers": auth_result.headers}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle title generation
|
||||||
title = result.get("title", {}).get("newTitle", "")
|
title = result.get("title", {}).get("newTitle", "")
|
||||||
if title:
|
if title:
|
||||||
yield TitleGeneration(title)
|
yield TitleGeneration(title)
|
||||||
|
|
||||||
|
# Handle tool usage information (Grok 4)
|
||||||
|
tool_usage = response_data.get("toolUsage")
|
||||||
|
if tool_usage:
|
||||||
|
tools_used = tool_usage.get("toolsUsed", [])
|
||||||
|
if tools_used:
|
||||||
|
yield Reasoning(status=f"Used tools: {', '.join(tools_used)}")
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
# if conversation_id is not None:
|
|
||||||
# yield Conversation(conversation_id)
|
# Return conversation ID for continuation
|
||||||
|
if conversation_id is not None and kwargs.get("return_conversation", False):
|
||||||
|
yield Conversation(conversation_id)
|
||||||
|
|
|
||||||
|
|
@ -125,8 +125,8 @@ text_models = {model["publicName"]: model["id"] for model in models if "text" in
|
||||||
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
|
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
|
||||||
vision_models = [model["publicName"] for model in models if "image" in model["capabilities"]["inputCapabilities"]]
|
vision_models = [model["publicName"] for model in models if "image" in model["capabilities"]["inputCapabilities"]]
|
||||||
|
|
||||||
class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
label = "LMArena (New)"
|
label = "LMArena"
|
||||||
url = "https://lmarena.ai"
|
url = "https://lmarena.ai"
|
||||||
share_url = None
|
share_url = None
|
||||||
api_endpoint = "https://lmarena.ai/api/stream/create-evaluation"
|
api_endpoint = "https://lmarena.ai/api/stream/create-evaluation"
|
||||||
|
|
@ -7,7 +7,6 @@ from .Cerebras import Cerebras
|
||||||
from .CopilotAccount import CopilotAccount
|
from .CopilotAccount import CopilotAccount
|
||||||
from .Custom import Custom
|
from .Custom import Custom
|
||||||
from .Custom import Feature
|
from .Custom import Feature
|
||||||
from .DeepInfra import DeepInfra
|
|
||||||
from .DeepSeek import DeepSeek
|
from .DeepSeek import DeepSeek
|
||||||
from .DeepSeekAPI import DeepSeekAPI
|
from .DeepSeekAPI import DeepSeekAPI
|
||||||
from .FenayAI import FenayAI
|
from .FenayAI import FenayAI
|
||||||
|
|
@ -20,7 +19,7 @@ from .GithubCopilotAPI import GithubCopilotAPI
|
||||||
from .GlhfChat import GlhfChat
|
from .GlhfChat import GlhfChat
|
||||||
from .Grok import Grok
|
from .Grok import Grok
|
||||||
from .Groq import Groq
|
from .Groq import Groq
|
||||||
from .LMArenaBeta import LMArenaBeta
|
from .LMArena import LMArena
|
||||||
from .MetaAI import MetaAI
|
from .MetaAI import MetaAI
|
||||||
from .MetaAIAccount import MetaAIAccount
|
from .MetaAIAccount import MetaAIAccount
|
||||||
from .MicrosoftDesigner import MicrosoftDesigner
|
from .MicrosoftDesigner import MicrosoftDesigner
|
||||||
|
|
|
||||||
|
|
@ -4,10 +4,10 @@ from aiohttp import ClientSession
|
||||||
import time
|
import time
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages
|
||||||
from ..providers.response import ImageResponse
|
from ...providers.response import ImageResponse
|
||||||
from ..image import use_aspect_ratio
|
from ...image import use_aspect_ratio
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
|
||||||
|
|
||||||
class ImageLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
class ImageLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
@ -47,4 +47,4 @@ class PenguinAI(OpenaiTemplate):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
debug.error(e)
|
debug.error(e)
|
||||||
return cls.fallback_models
|
return cls.fallback_models
|
||||||
return cls.models
|
return cls.models
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
from .har import HarProvider
|
||||||
|
|
||||||
from .AI365VIP import AI365VIP
|
from .AI365VIP import AI365VIP
|
||||||
from .Aichat import Aichat
|
from .Aichat import Aichat
|
||||||
from .AiChatOnline import AiChatOnline
|
from .AiChatOnline import AiChatOnline
|
||||||
|
|
@ -15,15 +17,20 @@ from .ChatGptt import ChatGptt
|
||||||
from .DDG import DDG
|
from .DDG import DDG
|
||||||
from .Equing import Equing
|
from .Equing import Equing
|
||||||
from .FlowGpt import FlowGpt
|
from .FlowGpt import FlowGpt
|
||||||
|
from .Free2GPT import Free2GPT
|
||||||
from .FreeGpt import FreeGpt
|
from .FreeGpt import FreeGpt
|
||||||
from .FreeNetfly import FreeNetfly
|
from .FreeNetfly import FreeNetfly
|
||||||
from .FreeRouter import FreeRouter
|
from .FreeRouter import FreeRouter
|
||||||
from .Glider import Glider
|
from .Glider import Glider
|
||||||
from .GPROChat import GPROChat
|
from .GPROChat import GPROChat
|
||||||
|
from .GptOss import GptOss
|
||||||
|
from .ImageLabs import ImageLabs
|
||||||
from .Koala import Koala
|
from .Koala import Koala
|
||||||
|
from .LegacyLMArena import LegacyLMArena
|
||||||
from .Liaobots import Liaobots
|
from .Liaobots import Liaobots
|
||||||
from .Lockchat import Lockchat
|
from .Lockchat import Lockchat
|
||||||
from .MagickPen import MagickPen
|
from .MagickPen import MagickPen
|
||||||
|
from .PenguinAI import PenguinAI
|
||||||
from .Phind import Phind
|
from .Phind import Phind
|
||||||
from .Pizzagpt import Pizzagpt
|
from .Pizzagpt import Pizzagpt
|
||||||
from .Poe import Poe
|
from .Poe import Poe
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
import os
|
import os
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import asyncio
|
import asyncio
|
||||||
from ..typing import Messages, AsyncResult
|
from ...typing import Messages, AsyncResult
|
||||||
from ..providers.base_provider import AsyncGeneratorProvider
|
from ...providers.base_provider import AsyncGeneratorProvider
|
||||||
from ..providers.response import FinishReason
|
from ...providers.response import FinishReason
|
||||||
from ..tools.web_search import fetch_and_scrape
|
from ...tools.web_search import fetch_and_scrape
|
||||||
from .helper import format_media_prompt
|
from ..helper import format_media_prompt
|
||||||
from .. import debug
|
from ... import debug
|
||||||
|
|
||||||
class SearXNG(AsyncGeneratorProvider):
|
class SearXNG(AsyncGeneratorProvider):
|
||||||
url = os.environ.get("SEARXNG_URL", "http://searxng:8080")
|
url = os.environ.get("SEARXNG_URL", "http://searxng:8080")
|
||||||
|
|
@ -8,11 +8,11 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
has_yt_dlp = False
|
has_yt_dlp = False
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from ..providers.response import AudioResponse, VideoResponse, YouTubeResponse
|
from ...providers.response import AudioResponse, VideoResponse, YouTubeResponse
|
||||||
from ..image.copy_images import get_media_dir
|
from ...image.copy_images import get_media_dir
|
||||||
from .helper import format_media_prompt
|
from ..helper import format_media_prompt
|
||||||
|
|
||||||
class YouTube(AsyncGeneratorProvider, ProviderModelMixin):
|
class YouTube(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://youtube.com"
|
url = "https://youtube.com"
|
||||||
3
g4f/Provider/search/__init__.py
Normal file
3
g4f/Provider/search/__init__.py
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
from .GoogleSearch import GoogleSearch
|
||||||
|
from .SearXNG import SearXNG
|
||||||
|
from .YouTube import YouTube
|
||||||
|
|
@ -800,4 +800,4 @@ class AsyncResponses():
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
return await async_response(response)
|
return await async_response(response)
|
||||||
|
|
|
||||||
120
g4f/models.py
120
g4f/models.py
|
|
@ -10,14 +10,13 @@ from .Provider import (
|
||||||
Chatai,
|
Chatai,
|
||||||
Cloudflare,
|
Cloudflare,
|
||||||
Copilot,
|
Copilot,
|
||||||
DeepInfraChat,
|
DeepInfra,
|
||||||
Free2GPT,
|
|
||||||
HuggingSpace,
|
HuggingSpace,
|
||||||
Grok,
|
Grok,
|
||||||
DeepseekAI_JanusPro7b,
|
DeepseekAI_JanusPro7b,
|
||||||
ImageLabs,
|
|
||||||
Kimi,
|
Kimi,
|
||||||
LambdaChat,
|
LambdaChat,
|
||||||
|
Mintlify,
|
||||||
OIVSCodeSer2,
|
OIVSCodeSer2,
|
||||||
OIVSCodeSer0501,
|
OIVSCodeSer0501,
|
||||||
OperaAria,
|
OperaAria,
|
||||||
|
|
@ -42,7 +41,7 @@ from .Provider import (
|
||||||
HuggingFace,
|
HuggingFace,
|
||||||
HuggingFaceMedia,
|
HuggingFaceMedia,
|
||||||
HuggingFaceAPI,
|
HuggingFaceAPI,
|
||||||
LMArenaBeta,
|
LMArena,
|
||||||
Groq,
|
Groq,
|
||||||
MetaAI,
|
MetaAI,
|
||||||
MicrosoftDesigner,
|
MicrosoftDesigner,
|
||||||
|
|
@ -157,15 +156,16 @@ default = Model(
|
||||||
OIVSCodeSer2,
|
OIVSCodeSer2,
|
||||||
Blackbox,
|
Blackbox,
|
||||||
Copilot,
|
Copilot,
|
||||||
DeepInfraChat,
|
DeepInfra,
|
||||||
OperaAria,
|
OperaAria,
|
||||||
Startnest,
|
Startnest,
|
||||||
LambdaChat,
|
LambdaChat,
|
||||||
PollinationsAI,
|
PollinationsAI,
|
||||||
Together,
|
Together,
|
||||||
Free2GPT,
|
|
||||||
Chatai,
|
Chatai,
|
||||||
WeWordle,
|
WeWordle,
|
||||||
|
Mintlify,
|
||||||
|
TeachAnything,
|
||||||
OpenaiChat,
|
OpenaiChat,
|
||||||
Cloudflare,
|
Cloudflare,
|
||||||
])
|
])
|
||||||
|
|
@ -176,7 +176,7 @@ default_vision = VisionModel(
|
||||||
base_provider = "",
|
base_provider = "",
|
||||||
best_provider = IterListProvider([
|
best_provider = IterListProvider([
|
||||||
Blackbox,
|
Blackbox,
|
||||||
DeepInfraChat,
|
DeepInfra,
|
||||||
OIVSCodeSer0501,
|
OIVSCodeSer0501,
|
||||||
OIVSCodeSer2,
|
OIVSCodeSer2,
|
||||||
PollinationsAI,
|
PollinationsAI,
|
||||||
|
|
@ -292,7 +292,7 @@ gpt_oss_120b = Model(
|
||||||
name = 'gpt-oss-120b',
|
name = 'gpt-oss-120b',
|
||||||
long_name = 'openai/gpt-oss-120b',
|
long_name = 'openai/gpt-oss-120b',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([Together, DeepInfraChat, HuggingFace, OpenRouter, Groq])
|
best_provider = IterListProvider([Together, DeepInfra, HuggingFace, OpenRouter, Groq])
|
||||||
)
|
)
|
||||||
|
|
||||||
# dall-e
|
# dall-e
|
||||||
|
|
@ -345,7 +345,7 @@ llama_3_70b = Model(
|
||||||
llama_3_1_8b = Model(
|
llama_3_1_8b = Model(
|
||||||
name = "llama-3.1-8b",
|
name = "llama-3.1-8b",
|
||||||
base_provider = "Meta Llama",
|
base_provider = "Meta Llama",
|
||||||
best_provider = IterListProvider([DeepInfraChat, Together, Cloudflare])
|
best_provider = IterListProvider([DeepInfra, Together, Cloudflare])
|
||||||
)
|
)
|
||||||
|
|
||||||
llama_3_1_70b = Model(
|
llama_3_1_70b = Model(
|
||||||
|
|
@ -382,27 +382,27 @@ llama_3_2_11b = VisionModel(
|
||||||
llama_3_2_90b = Model(
|
llama_3_2_90b = Model(
|
||||||
name = "llama-3.2-90b",
|
name = "llama-3.2-90b",
|
||||||
base_provider = "Meta Llama",
|
base_provider = "Meta Llama",
|
||||||
best_provider = IterListProvider([DeepInfraChat, Together])
|
best_provider = IterListProvider([DeepInfra, Together])
|
||||||
)
|
)
|
||||||
|
|
||||||
# llama-3.3
|
# llama-3.3
|
||||||
llama_3_3_70b = Model(
|
llama_3_3_70b = Model(
|
||||||
name = "llama-3.3-70b",
|
name = "llama-3.3-70b",
|
||||||
base_provider = "Meta Llama",
|
base_provider = "Meta Llama",
|
||||||
best_provider = IterListProvider([DeepInfraChat, LambdaChat, Together, HuggingChat, HuggingFace])
|
best_provider = IterListProvider([DeepInfra, LambdaChat, Together, HuggingChat, HuggingFace])
|
||||||
)
|
)
|
||||||
|
|
||||||
# llama-4
|
# llama-4
|
||||||
llama_4_scout = Model(
|
llama_4_scout = Model(
|
||||||
name = "llama-4-scout",
|
name = "llama-4-scout",
|
||||||
base_provider = "Meta Llama",
|
base_provider = "Meta Llama",
|
||||||
best_provider = IterListProvider([DeepInfraChat, LambdaChat, PollinationsAI, Together, Cloudflare])
|
best_provider = IterListProvider([DeepInfra, PollinationsAI, Together, Cloudflare])
|
||||||
)
|
)
|
||||||
|
|
||||||
llama_4_maverick = Model(
|
llama_4_maverick = Model(
|
||||||
name = "llama-4-maverick",
|
name = "llama-4-maverick",
|
||||||
base_provider = "Meta Llama",
|
base_provider = "Meta Llama",
|
||||||
best_provider = IterListProvider([DeepInfraChat, LambdaChat, Together])
|
best_provider = IterListProvider([DeepInfra, Together])
|
||||||
)
|
)
|
||||||
|
|
||||||
### MistralAI ###
|
### MistralAI ###
|
||||||
|
|
@ -433,7 +433,7 @@ mistral_small_24b = Model(
|
||||||
mistral_small_3_1_24b = Model(
|
mistral_small_3_1_24b = Model(
|
||||||
name = "mistral-small-3.1-24b",
|
name = "mistral-small-3.1-24b",
|
||||||
base_provider = "Mistral AI",
|
base_provider = "Mistral AI",
|
||||||
best_provider = IterListProvider([DeepInfraChat, PollinationsAI])
|
best_provider = IterListProvider([DeepInfra, PollinationsAI])
|
||||||
)
|
)
|
||||||
|
|
||||||
### NousResearch ###
|
### NousResearch ###
|
||||||
|
|
@ -455,32 +455,32 @@ phi_3_5_mini = Model(
|
||||||
phi_4 = Model(
|
phi_4 = Model(
|
||||||
name = "phi-4",
|
name = "phi-4",
|
||||||
base_provider = "Microsoft",
|
base_provider = "Microsoft",
|
||||||
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
|
best_provider = IterListProvider([DeepInfra, HuggingSpace])
|
||||||
)
|
)
|
||||||
|
|
||||||
phi_4_multimodal = VisionModel(
|
phi_4_multimodal = VisionModel(
|
||||||
name = "phi-4-multimodal",
|
name = "phi-4-multimodal",
|
||||||
base_provider = "Microsoft",
|
base_provider = "Microsoft",
|
||||||
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
|
best_provider = IterListProvider([DeepInfra, HuggingSpace])
|
||||||
)
|
)
|
||||||
|
|
||||||
phi_4_reasoning_plus = Model(
|
phi_4_reasoning_plus = Model(
|
||||||
name = "phi-4-reasoning-plus",
|
name = "phi-4-reasoning-plus",
|
||||||
base_provider = "Microsoft",
|
base_provider = "Microsoft",
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# wizardlm
|
# wizardlm
|
||||||
wizardlm_2_7b = Model(
|
wizardlm_2_7b = Model(
|
||||||
name = 'wizardlm-2-7b',
|
name = 'wizardlm-2-7b',
|
||||||
base_provider = 'Microsoft',
|
base_provider = 'Microsoft',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
wizardlm_2_8x22b = Model(
|
wizardlm_2_8x22b = Model(
|
||||||
name = 'wizardlm-2-8x22b',
|
name = 'wizardlm-2-8x22b',
|
||||||
base_provider = 'Microsoft',
|
base_provider = 'Microsoft',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
### Google DeepMind ###
|
### Google DeepMind ###
|
||||||
|
|
@ -491,19 +491,6 @@ gemini = Model(
|
||||||
best_provider = Gemini
|
best_provider = Gemini
|
||||||
)
|
)
|
||||||
|
|
||||||
# gemini-1.5
|
|
||||||
gemini_1_5_flash = Model(
|
|
||||||
name = 'gemini-1.5-flash',
|
|
||||||
base_provider = 'Google',
|
|
||||||
best_provider = IterListProvider([Free2GPT, TeachAnything])
|
|
||||||
)
|
|
||||||
|
|
||||||
gemini_1_5_pro = Model(
|
|
||||||
name = 'gemini-1.5-pro',
|
|
||||||
base_provider = 'Google',
|
|
||||||
best_provider = IterListProvider([Free2GPT, TeachAnything])
|
|
||||||
)
|
|
||||||
|
|
||||||
# gemini-2.0
|
# gemini-2.0
|
||||||
gemini_2_0_flash = Model(
|
gemini_2_0_flash = Model(
|
||||||
name = 'gemini-2.0-flash',
|
name = 'gemini-2.0-flash',
|
||||||
|
|
@ -540,7 +527,7 @@ gemini_2_5_pro = Model(
|
||||||
codegemma_7b = Model(
|
codegemma_7b = Model(
|
||||||
name = 'codegemma-7b',
|
name = 'codegemma-7b',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# gemma
|
# gemma
|
||||||
|
|
@ -554,14 +541,14 @@ gemma_2b = Model(
|
||||||
gemma_1_1_7b = Model(
|
gemma_1_1_7b = Model(
|
||||||
name = 'gemma-1.1-7b',
|
name = 'gemma-1.1-7b',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# gemma-2
|
# gemma-2
|
||||||
gemma_2_9b = Model(
|
gemma_2_9b = Model(
|
||||||
name = 'gemma-2-9b',
|
name = 'gemma-2-9b',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
gemma_2_27b = Model(
|
gemma_2_27b = Model(
|
||||||
|
|
@ -574,19 +561,19 @@ gemma_2_27b = Model(
|
||||||
gemma_3_4b = Model(
|
gemma_3_4b = Model(
|
||||||
name = 'gemma-3-4b',
|
name = 'gemma-3-4b',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
gemma_3_12b = Model(
|
gemma_3_12b = Model(
|
||||||
name = 'gemma-3-12b',
|
name = 'gemma-3-12b',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
gemma_3_27b = Model(
|
gemma_3_27b = Model(
|
||||||
name = 'gemma-3-27b',
|
name = 'gemma-3-27b',
|
||||||
base_provider = 'Google',
|
base_provider = 'Google',
|
||||||
best_provider = IterListProvider([DeepInfraChat, Together])
|
best_provider = IterListProvider([DeepInfra, Together])
|
||||||
)
|
)
|
||||||
|
|
||||||
gemma_3n_e4b = Model(
|
gemma_3n_e4b = Model(
|
||||||
|
|
@ -676,7 +663,7 @@ qwen_2_5_72b = Model(
|
||||||
qwen_2_5_coder_32b = Model(
|
qwen_2_5_coder_32b = Model(
|
||||||
name = 'qwen-2.5-coder-32b',
|
name = 'qwen-2.5-coder-32b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([PollinationsAI, LambdaChat, Together, HuggingChat])
|
best_provider = IterListProvider([PollinationsAI, Together, HuggingChat])
|
||||||
)
|
)
|
||||||
|
|
||||||
qwen_2_5_1m = Model(
|
qwen_2_5_1m = Model(
|
||||||
|
|
@ -701,25 +688,25 @@ qwen_2_5_vl_72b = Model(
|
||||||
qwen_3_235b = Model(
|
qwen_3_235b = Model(
|
||||||
name = 'qwen-3-235b',
|
name = 'qwen-3-235b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([DeepInfraChat, Together, HuggingSpace])
|
best_provider = IterListProvider([DeepInfra, Together, HuggingSpace])
|
||||||
)
|
)
|
||||||
|
|
||||||
qwen_3_32b = Model(
|
qwen_3_32b = Model(
|
||||||
name = 'qwen-3-32b',
|
name = 'qwen-3-32b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([DeepInfraChat, LambdaChat, Together, HuggingSpace])
|
best_provider = IterListProvider([DeepInfra, LambdaChat, Together, HuggingSpace])
|
||||||
)
|
)
|
||||||
|
|
||||||
qwen_3_30b = Model(
|
qwen_3_30b = Model(
|
||||||
name = 'qwen-3-30b',
|
name = 'qwen-3-30b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
|
best_provider = IterListProvider([DeepInfra, HuggingSpace])
|
||||||
)
|
)
|
||||||
|
|
||||||
qwen_3_14b = Model(
|
qwen_3_14b = Model(
|
||||||
name = 'qwen-3-14b',
|
name = 'qwen-3-14b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
|
best_provider = IterListProvider([DeepInfra, HuggingSpace])
|
||||||
)
|
)
|
||||||
|
|
||||||
qwen_3_4b = Model(
|
qwen_3_4b = Model(
|
||||||
|
|
@ -744,7 +731,7 @@ qwen_3_0_6b = Model(
|
||||||
qwq_32b = Model(
|
qwq_32b = Model(
|
||||||
name = 'qwq-32b',
|
name = 'qwq-32b',
|
||||||
base_provider = 'Qwen',
|
base_provider = 'Qwen',
|
||||||
best_provider = IterListProvider([DeepInfraChat, Together, HuggingChat])
|
best_provider = IterListProvider([DeepInfra, Together, HuggingChat])
|
||||||
)
|
)
|
||||||
|
|
||||||
### DeepSeek ###
|
### DeepSeek ###
|
||||||
|
|
@ -752,26 +739,26 @@ qwq_32b = Model(
|
||||||
deepseek_v3 = Model(
|
deepseek_v3 = Model(
|
||||||
name = 'deepseek-v3',
|
name = 'deepseek-v3',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = IterListProvider([DeepInfraChat, Together])
|
best_provider = IterListProvider([DeepInfra, Together])
|
||||||
)
|
)
|
||||||
|
|
||||||
# deepseek-r1
|
# deepseek-r1
|
||||||
deepseek_r1 = Model(
|
deepseek_r1 = Model(
|
||||||
name = 'deepseek-r1',
|
name = 'deepseek-r1',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = IterListProvider([DeepInfraChat, LambdaChat, PollinationsAI, Together, HuggingChat, HuggingFace])
|
best_provider = IterListProvider([DeepInfra, PollinationsAI, Together, HuggingChat, HuggingFace])
|
||||||
)
|
)
|
||||||
|
|
||||||
deepseek_r1_turbo = Model(
|
deepseek_r1_turbo = Model(
|
||||||
name = 'deepseek-r1-turbo',
|
name = 'deepseek-r1-turbo',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
deepseek_r1_distill_llama_70b = Model(
|
deepseek_r1_distill_llama_70b = Model(
|
||||||
name = 'deepseek-r1-distill-llama-70b',
|
name = 'deepseek-r1-distill-llama-70b',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = IterListProvider([DeepInfraChat, Together])
|
best_provider = IterListProvider([DeepInfra, Together])
|
||||||
)
|
)
|
||||||
|
|
||||||
deepseek_r1_distill_qwen_1_5b = Model(
|
deepseek_r1_distill_qwen_1_5b = Model(
|
||||||
|
|
@ -789,46 +776,46 @@ deepseek_r1_distill_qwen_14b = Model(
|
||||||
deepseek_r1_distill_qwen_32b = Model(
|
deepseek_r1_distill_qwen_32b = Model(
|
||||||
name = 'deepseek-r1-distill-qwen-32b',
|
name = 'deepseek-r1-distill-qwen-32b',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = IterListProvider([DeepInfraChat])
|
best_provider = IterListProvider([DeepInfra])
|
||||||
)
|
)
|
||||||
|
|
||||||
# deepseek-v2
|
# deepseek-v2
|
||||||
deepseek_prover_v2 = Model(
|
deepseek_prover_v2 = Model(
|
||||||
name = 'deepseek-prover-v2',
|
name = 'deepseek-prover-v2',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
deepseek_prover_v2_671b = Model(
|
deepseek_prover_v2_671b = Model(
|
||||||
name = 'deepseek-prover-v2-671b',
|
name = 'deepseek-prover-v2-671b',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# deepseek-v3-0324
|
# deepseek-v3-0324
|
||||||
deepseek_v3_0324 = Model(
|
deepseek_v3_0324 = Model(
|
||||||
name = 'deepseek-v3-0324',
|
name = 'deepseek-v3-0324',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = IterListProvider([DeepInfraChat, LambdaChat])
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
deepseek_v3_0324_turbo = Model(
|
deepseek_v3_0324_turbo = Model(
|
||||||
name = 'deepseek-v3-0324-turbo',
|
name = 'deepseek-v3-0324-turbo',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# deepseek-r1-0528
|
# deepseek-r1-0528
|
||||||
deepseek_r1_0528 = Model(
|
deepseek_r1_0528 = Model(
|
||||||
name = 'deepseek-r1-0528',
|
name = 'deepseek-r1-0528',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = IterListProvider([DeepInfraChat, LambdaChat, PollinationsAI])
|
best_provider = IterListProvider([DeepInfra, PollinationsAI])
|
||||||
)
|
)
|
||||||
|
|
||||||
deepseek_r1_0528_turbo = Model(
|
deepseek_r1_0528_turbo = Model(
|
||||||
name = 'deepseek-r1-0528-turbo',
|
name = 'deepseek-r1-0528-turbo',
|
||||||
base_provider = 'DeepSeek',
|
base_provider = 'DeepSeek',
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
# janus
|
# janus
|
||||||
|
|
@ -860,7 +847,7 @@ grok_3_r1 = Model(
|
||||||
kimi = Model(
|
kimi = Model(
|
||||||
name = 'kimi-k2',
|
name = 'kimi-k2',
|
||||||
base_provider = 'kimi.com',
|
base_provider = 'kimi.com',
|
||||||
best_provider = IterListProvider([Kimi, HuggingFace, DeepInfraChat, Groq]),
|
best_provider = IterListProvider([Kimi, HuggingFace, DeepInfra, Groq]),
|
||||||
long_name = "moonshotai/Kimi-K2-Instruct"
|
long_name = "moonshotai/Kimi-K2-Instruct"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -899,7 +886,7 @@ r1_1776 = Model(
|
||||||
nemotron_70b = Model(
|
nemotron_70b = Model(
|
||||||
name = 'nemotron-70b',
|
name = 'nemotron-70b',
|
||||||
base_provider = 'Nvidia',
|
base_provider = 'Nvidia',
|
||||||
best_provider = IterListProvider([LambdaChat, Together, HuggingChat, HuggingFace])
|
best_provider = IterListProvider([Together, HuggingChat, HuggingFace])
|
||||||
)
|
)
|
||||||
|
|
||||||
### Cognitive Computations ###
|
### Cognitive Computations ###
|
||||||
|
|
@ -907,34 +894,27 @@ nemotron_70b = Model(
|
||||||
dolphin_2_6 = Model(
|
dolphin_2_6 = Model(
|
||||||
name = "dolphin-2.6",
|
name = "dolphin-2.6",
|
||||||
base_provider = "Cognitive Computations",
|
base_provider = "Cognitive Computations",
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
dolphin_2_9 = Model(
|
dolphin_2_9 = Model(
|
||||||
name = "dolphin-2.9",
|
name = "dolphin-2.9",
|
||||||
base_provider = "Cognitive Computations",
|
base_provider = "Cognitive Computations",
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
### DeepInfra ###
|
### DeepInfra ###
|
||||||
airoboros_70b = Model(
|
airoboros_70b = Model(
|
||||||
name = "airoboros-70b",
|
name = "airoboros-70b",
|
||||||
base_provider = "DeepInfra",
|
base_provider = "DeepInfra",
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
)
|
||||||
|
|
||||||
### Lizpreciatior ###
|
### Lizpreciatior ###
|
||||||
lzlv_70b = Model(
|
lzlv_70b = Model(
|
||||||
name = "lzlv-70b",
|
name = "lzlv-70b",
|
||||||
base_provider = "Lizpreciatior",
|
base_provider = "Lizpreciatior",
|
||||||
best_provider = DeepInfraChat
|
best_provider = DeepInfra
|
||||||
)
|
|
||||||
|
|
||||||
### Liquid AI ###
|
|
||||||
lfm_40b = Model(
|
|
||||||
name = "lfm-40b",
|
|
||||||
base_provider = "Liquid AI",
|
|
||||||
best_provider = LambdaChat
|
|
||||||
)
|
)
|
||||||
|
|
||||||
### Opera ###
|
### Opera ###
|
||||||
|
|
@ -955,7 +935,7 @@ evil = Model(
|
||||||
sdxl_turbo = ImageModel(
|
sdxl_turbo = ImageModel(
|
||||||
name = 'sdxl-turbo',
|
name = 'sdxl-turbo',
|
||||||
base_provider = 'Stability AI',
|
base_provider = 'Stability AI',
|
||||||
best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage, ImageLabs])
|
best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage])
|
||||||
)
|
)
|
||||||
|
|
||||||
sd_3_5_large = ImageModel(
|
sd_3_5_large = ImageModel(
|
||||||
|
|
@ -1010,7 +990,7 @@ flux_canny = ImageModel(
|
||||||
flux_kontext_max = ImageModel(
|
flux_kontext_max = ImageModel(
|
||||||
name = 'flux-kontext',
|
name = 'flux-kontext',
|
||||||
base_provider = 'Black Forest Labs',
|
base_provider = 'Black Forest Labs',
|
||||||
best_provider = IterListProvider([PollinationsAI, Azure, LMArenaBeta, Together])
|
best_provider = IterListProvider([PollinationsAI, Azure, LMArena, Together])
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_dev_lora = ImageModel(
|
flux_dev_lora = ImageModel(
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ model_map = {
|
||||||
"OIVSCodeSer2": "",
|
"OIVSCodeSer2": "",
|
||||||
"Blackbox": "",
|
"Blackbox": "",
|
||||||
"Copilot": "",
|
"Copilot": "",
|
||||||
"DeepInfraChat": "",
|
"DeepInfra": "",
|
||||||
"OperaAria": "",
|
"OperaAria": "",
|
||||||
"Startnest": "",
|
"Startnest": "",
|
||||||
"LambdaChat": "",
|
"LambdaChat": "",
|
||||||
|
|
@ -102,7 +102,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"o3-mini": {
|
"o3-mini": {
|
||||||
"OpenaiChat": "o3-mini",
|
"OpenaiChat": "o3-mini",
|
||||||
"LMArenaBeta": "o3-mini",
|
"LMArena": "o3-mini",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"o3-mini",
|
"o3-mini",
|
||||||
"openrouter:openai/o3-mini",
|
"openrouter:openai/o3-mini",
|
||||||
|
|
@ -119,7 +119,7 @@ model_map = {
|
||||||
"o4-mini": {
|
"o4-mini": {
|
||||||
"OpenaiChat": "o4-mini",
|
"OpenaiChat": "o4-mini",
|
||||||
"Azure": "o4-mini",
|
"Azure": "o4-mini",
|
||||||
"LMArenaBeta": "o4-mini-2025-04-16",
|
"LMArena": "o4-mini-2025-04-16",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"o4-mini",
|
"o4-mini",
|
||||||
"openrouter:openai/o4-mini"
|
"openrouter:openai/o4-mini"
|
||||||
|
|
@ -135,7 +135,7 @@ model_map = {
|
||||||
"PollinationsAI": "openai-large",
|
"PollinationsAI": "openai-large",
|
||||||
"OpenaiChat": "gpt-4-1",
|
"OpenaiChat": "gpt-4-1",
|
||||||
"Azure": "gpt-4.1",
|
"Azure": "gpt-4.1",
|
||||||
"LMArenaBeta": "gpt-4.1-2025-04-14",
|
"LMArena": "gpt-4.1-2025-04-14",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"gpt-4.1",
|
"gpt-4.1",
|
||||||
"openrouter:openai/gpt-4.1"
|
"openrouter:openai/gpt-4.1"
|
||||||
|
|
@ -146,7 +146,7 @@ model_map = {
|
||||||
"Blackbox": "gpt-4.1-mini",
|
"Blackbox": "gpt-4.1-mini",
|
||||||
"OIVSCodeSer0501": "gpt-4.1-mini",
|
"OIVSCodeSer0501": "gpt-4.1-mini",
|
||||||
"OpenaiChat": "gpt-4-1-mini",
|
"OpenaiChat": "gpt-4-1-mini",
|
||||||
"LMArenaBeta": "gpt-4.1-mini-2025-04-14",
|
"LMArena": "gpt-4.1-mini-2025-04-14",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"gpt-4.1-mini",
|
"gpt-4.1-mini",
|
||||||
"openrouter:openai/gpt-4.1-mini"
|
"openrouter:openai/gpt-4.1-mini"
|
||||||
|
|
@ -173,7 +173,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"gpt-oss-120b": {
|
"gpt-oss-120b": {
|
||||||
"Together": "openai/gpt-oss-120b",
|
"Together": "openai/gpt-oss-120b",
|
||||||
"DeepInfraChat": "openai/gpt-oss-120b",
|
"DeepInfra": "openai/gpt-oss-120b",
|
||||||
"HuggingFace": "openai/gpt-oss-120b",
|
"HuggingFace": "openai/gpt-oss-120b",
|
||||||
"OpenRouter": "openai/gpt-oss-120b",
|
"OpenRouter": "openai/gpt-oss-120b",
|
||||||
"Groq": "openai/gpt-oss-120b",
|
"Groq": "openai/gpt-oss-120b",
|
||||||
|
|
@ -191,7 +191,7 @@ model_map = {
|
||||||
"OpenaiAccount": "dall-e-3",
|
"OpenaiAccount": "dall-e-3",
|
||||||
"MicrosoftDesigner": "dall-e-3",
|
"MicrosoftDesigner": "dall-e-3",
|
||||||
"BingCreateImages": "dall-e-3",
|
"BingCreateImages": "dall-e-3",
|
||||||
"LMArenaBeta": "dall-e-3",
|
"LMArena": "dall-e-3",
|
||||||
"ApiAirforce": "dall-e-3",
|
"ApiAirforce": "dall-e-3",
|
||||||
"OpenaiChat": "gpt-image"
|
"OpenaiChat": "gpt-image"
|
||||||
},
|
},
|
||||||
|
|
@ -250,7 +250,6 @@ model_map = {
|
||||||
"Replicate": "meta/meta-llama-3-70b-instruct"
|
"Replicate": "meta/meta-llama-3-70b-instruct"
|
||||||
},
|
},
|
||||||
"llama-3.1-8b": {
|
"llama-3.1-8b": {
|
||||||
"DeepInfraChat": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
||||||
"Together": [
|
"Together": [
|
||||||
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
||||||
"blackbox/meta-llama-3-1-8b"
|
"blackbox/meta-llama-3-1-8b"
|
||||||
|
|
@ -335,13 +334,11 @@ model_map = {
|
||||||
"HuggingFaceAPI": "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
"HuggingFaceAPI": "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
||||||
},
|
},
|
||||||
"llama-3.2-90b": {
|
"llama-3.2-90b": {
|
||||||
"DeepInfraChat": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
|
||||||
"Together": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
"Together": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
|
||||||
"PuterJS": "openrouter:meta-llama/llama-3.2-90b-vision-instruct",
|
"PuterJS": "openrouter:meta-llama/llama-3.2-90b-vision-instruct",
|
||||||
"DeepInfra": "meta-llama/Llama-3.2-90B-Vision-Instruct"
|
"DeepInfra": "meta-llama/Llama-3.2-90B-Vision-Instruct"
|
||||||
},
|
},
|
||||||
"llama-3.3-70b": {
|
"llama-3.3-70b": {
|
||||||
"DeepInfraChat": "meta-llama/Llama-3.3-70B-Instruct",
|
|
||||||
"LambdaChat": "llama3.3-70b-instruct-fp8",
|
"LambdaChat": "llama3.3-70b-instruct-fp8",
|
||||||
"Together": [
|
"Together": [
|
||||||
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||||
|
|
@ -349,7 +346,7 @@ model_map = {
|
||||||
],
|
],
|
||||||
"HuggingFace": "meta-llama/Llama-3.3-70B-Instruct",
|
"HuggingFace": "meta-llama/Llama-3.3-70B-Instruct",
|
||||||
"OpenRouterFree": "meta-llama/llama-3.3-70b-instruct",
|
"OpenRouterFree": "meta-llama/llama-3.3-70b-instruct",
|
||||||
"LMArenaBeta": "llama-3.3-70b-instruct",
|
"LMArena": "llama-3.3-70b-instruct",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:meta-llama/llama-3.3-70b-instruct:free",
|
"openrouter:meta-llama/llama-3.3-70b-instruct:free",
|
||||||
"openrouter:meta-llama/llama-3.3-70b-instruct"
|
"openrouter:meta-llama/llama-3.3-70b-instruct"
|
||||||
|
|
@ -364,8 +361,6 @@ model_map = {
|
||||||
"OpenRouter": "meta-llama/llama-3.3-70b-instruct:free"
|
"OpenRouter": "meta-llama/llama-3.3-70b-instruct:free"
|
||||||
},
|
},
|
||||||
"llama-4-scout": {
|
"llama-4-scout": {
|
||||||
"DeepInfraChat": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
||||||
"LambdaChat": "llama-4-scout-17b-16e-instruct",
|
|
||||||
"PollinationsAI": "llamascout",
|
"PollinationsAI": "llamascout",
|
||||||
"Together": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
"Together": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||||
"Cloudflare": "@cf/meta/llama-4-scout-17b-16e-instruct",
|
"Cloudflare": "@cf/meta/llama-4-scout-17b-16e-instruct",
|
||||||
|
|
@ -379,10 +374,8 @@ model_map = {
|
||||||
"OpenRouter": "meta-llama/llama-4-scout"
|
"OpenRouter": "meta-llama/llama-4-scout"
|
||||||
},
|
},
|
||||||
"llama-4-maverick": {
|
"llama-4-maverick": {
|
||||||
"DeepInfraChat": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
||||||
"LambdaChat": "llama-4-maverick-17b-128e-instruct-fp8",
|
|
||||||
"Together": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
"Together": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
"LMArenaBeta": "llama-4-maverick-03-26-experimental",
|
"LMArena": "llama-4-maverick-03-26-experimental",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:meta-llama/llama-4-maverick:free",
|
"openrouter:meta-llama/llama-4-maverick:free",
|
||||||
"openrouter:meta-llama/llama-4-maverick"
|
"openrouter:meta-llama/llama-4-maverick"
|
||||||
|
|
@ -436,7 +429,6 @@ model_map = {
|
||||||
"Nvidia": "mistralai/mistral-small-24b-instruct"
|
"Nvidia": "mistralai/mistral-small-24b-instruct"
|
||||||
},
|
},
|
||||||
"mistral-small-3.1-24b": {
|
"mistral-small-3.1-24b": {
|
||||||
"DeepInfraChat": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
|
||||||
"PollinationsAI": "mistral",
|
"PollinationsAI": "mistral",
|
||||||
"OpenRouterFree": "mistralai/mistral-small-3.1-24b-instruct",
|
"OpenRouterFree": "mistralai/mistral-small-3.1-24b-instruct",
|
||||||
"Cloudflare": "@cf/mistralai/mistral-small-3.1-24b-instruct",
|
"Cloudflare": "@cf/mistralai/mistral-small-3.1-24b-instruct",
|
||||||
|
|
@ -448,7 +440,6 @@ model_map = {
|
||||||
"PuterJS": "openrouter:nousresearch/nous-hermes-2-mixtral-8x7b-dpo"
|
"PuterJS": "openrouter:nousresearch/nous-hermes-2-mixtral-8x7b-dpo"
|
||||||
},
|
},
|
||||||
"phi-4": {
|
"phi-4": {
|
||||||
"DeepInfraChat": "microsoft/phi-4",
|
|
||||||
"HuggingSpace": "phi-4-multimodal",
|
"HuggingSpace": "phi-4-multimodal",
|
||||||
"PuterJS": "openrouter:microsoft/phi-4",
|
"PuterJS": "openrouter:microsoft/phi-4",
|
||||||
"DeepInfra": "microsoft/phi-4",
|
"DeepInfra": "microsoft/phi-4",
|
||||||
|
|
@ -457,7 +448,6 @@ model_map = {
|
||||||
"OpenRouter": "microsoft/phi-4"
|
"OpenRouter": "microsoft/phi-4"
|
||||||
},
|
},
|
||||||
"phi-4-multimodal": {
|
"phi-4-multimodal": {
|
||||||
"DeepInfraChat": "microsoft/Phi-4-multimodal-instruct",
|
|
||||||
"HuggingSpace": "phi-4-multimodal",
|
"HuggingSpace": "phi-4-multimodal",
|
||||||
"PuterJS": "openrouter:microsoft/phi-4-multimodal-instruct",
|
"PuterJS": "openrouter:microsoft/phi-4-multimodal-instruct",
|
||||||
"DeepInfra": "microsoft/Phi-4-multimodal-instruct",
|
"DeepInfra": "microsoft/Phi-4-multimodal-instruct",
|
||||||
|
|
@ -466,7 +456,6 @@ model_map = {
|
||||||
"OpenRouter": "microsoft/phi-4-multimodal-instruct"
|
"OpenRouter": "microsoft/phi-4-multimodal-instruct"
|
||||||
},
|
},
|
||||||
"phi-4-reasoning-plus": {
|
"phi-4-reasoning-plus": {
|
||||||
"DeepInfraChat": "microsoft/phi-4-reasoning-plus",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:microsoft/phi-4-reasoning-plus:free",
|
"openrouter:microsoft/phi-4-reasoning-plus:free",
|
||||||
"openrouter:microsoft/phi-4-reasoning-plus"
|
"openrouter:microsoft/phi-4-reasoning-plus"
|
||||||
|
|
@ -475,11 +464,9 @@ model_map = {
|
||||||
"OpenRouter": "microsoft/phi-4-reasoning-plus"
|
"OpenRouter": "microsoft/phi-4-reasoning-plus"
|
||||||
},
|
},
|
||||||
"wizardlm-2-7b": {
|
"wizardlm-2-7b": {
|
||||||
"DeepInfraChat": "microsoft/WizardLM-2-7B",
|
|
||||||
"DeepInfra": "microsoft/WizardLM-2-7B"
|
"DeepInfra": "microsoft/WizardLM-2-7B"
|
||||||
},
|
},
|
||||||
"wizardlm-2-8x22b": {
|
"wizardlm-2-8x22b": {
|
||||||
"DeepInfraChat": "microsoft/WizardLM-2-8x22B",
|
|
||||||
"PuterJS": "openrouter:microsoft/wizardlm-2-8x22b",
|
"PuterJS": "openrouter:microsoft/wizardlm-2-8x22b",
|
||||||
"DeepInfra": "microsoft/WizardLM-2-8x22B",
|
"DeepInfra": "microsoft/WizardLM-2-8x22B",
|
||||||
"OpenRouter": "microsoft/wizardlm-2-8x22b"
|
"OpenRouter": "microsoft/wizardlm-2-8x22b"
|
||||||
|
|
@ -488,7 +475,6 @@ model_map = {
|
||||||
"Gemini": ""
|
"Gemini": ""
|
||||||
},
|
},
|
||||||
"gemini-1.5-flash": {
|
"gemini-1.5-flash": {
|
||||||
"TeachAnything": "gemini-1.5-flash",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"gemini-1.5-flash",
|
"gemini-1.5-flash",
|
||||||
"openrouter:google/gemini-flash-1.5",
|
"openrouter:google/gemini-flash-1.5",
|
||||||
|
|
@ -496,7 +482,6 @@ model_map = {
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"gemini-1.5-pro": {
|
"gemini-1.5-pro": {
|
||||||
"TeachAnything": "gemini-1.5-pro",
|
|
||||||
"PuterJS": "openrouter:google/gemini-pro-1.5"
|
"PuterJS": "openrouter:google/gemini-pro-1.5"
|
||||||
},
|
},
|
||||||
"gemini-2.0-flash": {
|
"gemini-2.0-flash": {
|
||||||
|
|
@ -507,7 +492,7 @@ model_map = {
|
||||||
],
|
],
|
||||||
"GeminiPro": "gemma-7b",
|
"GeminiPro": "gemma-7b",
|
||||||
"EasyChat": "gemini-2.0-flash-free",
|
"EasyChat": "gemini-2.0-flash-free",
|
||||||
"LMArenaBeta": "gemini-2.0-flash-001",
|
"LMArena": "gemini-2.0-flash-001",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"gemini-2.0-flash",
|
"gemini-2.0-flash",
|
||||||
"openrouter:google/gemini-2.0-flash-lite-001",
|
"openrouter:google/gemini-2.0-flash-lite-001",
|
||||||
|
|
@ -528,8 +513,7 @@ model_map = {
|
||||||
"GeminiPro": "gemini-2.5-flash",
|
"GeminiPro": "gemini-2.5-flash",
|
||||||
"GeminiCLI": "gemini-2.5-flash",
|
"GeminiCLI": "gemini-2.5-flash",
|
||||||
"EasyChat": "gemini-2.5-flash-free",
|
"EasyChat": "gemini-2.5-flash-free",
|
||||||
"DeepInfraChat": "google/gemini-2.5-flash",
|
"LMArena": "gemini-2.5-flash",
|
||||||
"LMArenaBeta": "gemini-2.5-flash",
|
|
||||||
"PuterJS": "openrouter:google/gemini-2.5-flash-preview",
|
"PuterJS": "openrouter:google/gemini-2.5-flash-preview",
|
||||||
"ApiAirforce": "gemini-2.5-flash",
|
"ApiAirforce": "gemini-2.5-flash",
|
||||||
"DeepInfra": "google/gemini-2.5-flash",
|
"DeepInfra": "google/gemini-2.5-flash",
|
||||||
|
|
@ -539,8 +523,7 @@ model_map = {
|
||||||
"Gemini": "gemini-2.5-pro",
|
"Gemini": "gemini-2.5-pro",
|
||||||
"GeminiPro": "gemini-2.5-pro",
|
"GeminiPro": "gemini-2.5-pro",
|
||||||
"GeminiCLI": "gemini-2.5-pro",
|
"GeminiCLI": "gemini-2.5-pro",
|
||||||
"DeepInfraChat": "google/gemini-2.5-pro",
|
"LMArena": "gemini-2.5-pro",
|
||||||
"LMArenaBeta": "gemini-2.5-pro",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:google/gemini-2.5-pro-preview",
|
"openrouter:google/gemini-2.5-pro-preview",
|
||||||
"openrouter:google/gemini-2.5-pro-exp-03-25"
|
"openrouter:google/gemini-2.5-pro-exp-03-25"
|
||||||
|
|
@ -550,7 +533,6 @@ model_map = {
|
||||||
"OpenRouter": "google/gemini-2.5-pro-preview-05-06"
|
"OpenRouter": "google/gemini-2.5-pro-preview-05-06"
|
||||||
},
|
},
|
||||||
"codegemma-7b": {
|
"codegemma-7b": {
|
||||||
"DeepInfraChat": "google/codegemma-7b-it",
|
|
||||||
"DeepInfra": "google/codegemma-7b-it",
|
"DeepInfra": "google/codegemma-7b-it",
|
||||||
"FenayAI": "codegemma-7b",
|
"FenayAI": "codegemma-7b",
|
||||||
"Nvidia": "google/codegemma-7b"
|
"Nvidia": "google/codegemma-7b"
|
||||||
|
|
@ -560,11 +542,9 @@ model_map = {
|
||||||
"Nvidia": "google/gemma-2b"
|
"Nvidia": "google/gemma-2b"
|
||||||
},
|
},
|
||||||
"gemma-1.1-7b": {
|
"gemma-1.1-7b": {
|
||||||
"DeepInfraChat": "google/gemma-1.1-7b-it",
|
|
||||||
"DeepInfra": "google/gemma-1.1-7b-it"
|
"DeepInfra": "google/gemma-1.1-7b-it"
|
||||||
},
|
},
|
||||||
"gemma-2-9b": {
|
"gemma-2-9b": {
|
||||||
"DeepInfraChat": "google/gemma-2-9b-it",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:google/gemma-2-9b-it:free",
|
"openrouter:google/gemma-2-9b-it:free",
|
||||||
"openrouter:google/gemma-2-9b-it"
|
"openrouter:google/gemma-2-9b-it"
|
||||||
|
|
@ -573,14 +553,12 @@ model_map = {
|
||||||
},
|
},
|
||||||
"gemma-2-27b": {
|
"gemma-2-27b": {
|
||||||
"Together": "google/gemma-2-27b-it",
|
"Together": "google/gemma-2-27b-it",
|
||||||
"DeepInfraChat": "google/gemma-2-27b-it",
|
|
||||||
"HuggingFace": "google/gemma-2-27b-it",
|
"HuggingFace": "google/gemma-2-27b-it",
|
||||||
"PuterJS": "openrouter:google/gemma-2-27b-it",
|
"PuterJS": "openrouter:google/gemma-2-27b-it",
|
||||||
"DeepInfra": "google/gemma-2-27b-it",
|
"DeepInfra": "google/gemma-2-27b-it",
|
||||||
"HuggingFaceAPI": "google/gemma-2-27b-it"
|
"HuggingFaceAPI": "google/gemma-2-27b-it"
|
||||||
},
|
},
|
||||||
"gemma-3-4b": {
|
"gemma-3-4b": {
|
||||||
"DeepInfraChat": "google/gemma-3-4b-it",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:google/gemma-3-4b-it:free",
|
"openrouter:google/gemma-3-4b-it:free",
|
||||||
"openrouter:google/gemma-3-4b-it"
|
"openrouter:google/gemma-3-4b-it"
|
||||||
|
|
@ -588,7 +566,6 @@ model_map = {
|
||||||
"DeepInfra": "google/gemma-3-4b-it"
|
"DeepInfra": "google/gemma-3-4b-it"
|
||||||
},
|
},
|
||||||
"gemma-3-12b": {
|
"gemma-3-12b": {
|
||||||
"DeepInfraChat": "google/gemma-3-12b-it",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:google/gemma-3-12b-it:free",
|
"openrouter:google/gemma-3-12b-it:free",
|
||||||
"openrouter:google/gemma-3-12b-it"
|
"openrouter:google/gemma-3-12b-it"
|
||||||
|
|
@ -597,7 +574,6 @@ model_map = {
|
||||||
"DeepInfra": "google/gemma-3-12b-it"
|
"DeepInfra": "google/gemma-3-12b-it"
|
||||||
},
|
},
|
||||||
"gemma-3-27b": {
|
"gemma-3-27b": {
|
||||||
"DeepInfraChat": "google/gemma-3-27b-it",
|
|
||||||
"Together": "google/gemma-3-27b-it",
|
"Together": "google/gemma-3-27b-it",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:google/gemma-3-27b-it:free",
|
"openrouter:google/gemma-3-27b-it:free",
|
||||||
|
|
@ -713,7 +689,6 @@ model_map = {
|
||||||
},
|
},
|
||||||
"qwen-2.5-coder-32b": {
|
"qwen-2.5-coder-32b": {
|
||||||
"PollinationsAI": "qwen-coder",
|
"PollinationsAI": "qwen-coder",
|
||||||
"LambdaChat": "qwen25-coder-32b-instruct",
|
|
||||||
"Together": "Qwen/Qwen2.5-Coder-32B-Instruct",
|
"Together": "Qwen/Qwen2.5-Coder-32B-Instruct",
|
||||||
"Qwen": "qwen2.5-coder-32b-instruct",
|
"Qwen": "qwen2.5-coder-32b-instruct",
|
||||||
"OpenRouterFree": "qwen/qwen-2.5-coder-32b-instruct",
|
"OpenRouterFree": "qwen/qwen-2.5-coder-32b-instruct",
|
||||||
|
|
@ -747,7 +722,6 @@ model_map = {
|
||||||
"OpenRouter": "qwen/qwen2.5-vl-72b-instruct:free"
|
"OpenRouter": "qwen/qwen2.5-vl-72b-instruct:free"
|
||||||
},
|
},
|
||||||
"qwen-3-235b": {
|
"qwen-3-235b": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-235B-A22B",
|
|
||||||
"Together": [
|
"Together": [
|
||||||
"Qwen/Qwen3-235B-A22B-fp8",
|
"Qwen/Qwen3-235B-A22B-fp8",
|
||||||
"Qwen/Qwen3-235B-A22B-fp8-tput"
|
"Qwen/Qwen3-235B-A22B-fp8-tput"
|
||||||
|
|
@ -763,7 +737,6 @@ model_map = {
|
||||||
"Qwen_Qwen_3": "qwen3-235b-a22b"
|
"Qwen_Qwen_3": "qwen3-235b-a22b"
|
||||||
},
|
},
|
||||||
"qwen-3-32b": {
|
"qwen-3-32b": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-32B",
|
|
||||||
"LambdaChat": "qwen3-32b-fp8",
|
"LambdaChat": "qwen3-32b-fp8",
|
||||||
"Together": "Qwen/Qwen3-32B-FP8",
|
"Together": "Qwen/Qwen3-32B-FP8",
|
||||||
"HuggingSpace": "qwen3-32b",
|
"HuggingSpace": "qwen3-32b",
|
||||||
|
|
@ -779,7 +752,6 @@ model_map = {
|
||||||
"Qwen_Qwen_3": "qwen3-32b"
|
"Qwen_Qwen_3": "qwen3-32b"
|
||||||
},
|
},
|
||||||
"qwen-3-30b": {
|
"qwen-3-30b": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-30B-A3B",
|
|
||||||
"HuggingSpace": "qwen3-30b-a3b",
|
"HuggingSpace": "qwen3-30b-a3b",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:qwen/qwen3-30b-a3b:free",
|
"openrouter:qwen/qwen3-30b-a3b:free",
|
||||||
|
|
@ -790,7 +762,6 @@ model_map = {
|
||||||
"Qwen_Qwen_3": "qwen3-30b-a3b"
|
"Qwen_Qwen_3": "qwen3-30b-a3b"
|
||||||
},
|
},
|
||||||
"qwen-3-14b": {
|
"qwen-3-14b": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-14B",
|
|
||||||
"HuggingSpace": "qwen3-14b",
|
"HuggingSpace": "qwen3-14b",
|
||||||
"OpenRouterFree": "qwen/qwen3-14b",
|
"OpenRouterFree": "qwen/qwen3-14b",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
|
|
@ -827,12 +798,11 @@ model_map = {
|
||||||
"Qwen_Qwen_3": "qwen3-0.6b"
|
"Qwen_Qwen_3": "qwen3-0.6b"
|
||||||
},
|
},
|
||||||
"qwq-32b": {
|
"qwq-32b": {
|
||||||
"DeepInfraChat": "Qwen/QwQ-32B",
|
|
||||||
"Together": "Qwen/QwQ-32B",
|
"Together": "Qwen/QwQ-32B",
|
||||||
"Qwen": "qwq-32b",
|
"Qwen": "qwq-32b",
|
||||||
"OpenRouterFree": "qwen/qwq-32b",
|
"OpenRouterFree": "qwen/qwq-32b",
|
||||||
"HuggingFace": "Qwen/QwQ-32B",
|
"HuggingFace": "Qwen/QwQ-32B",
|
||||||
"LMArenaBeta": "qwq-32b",
|
"LMArena": "qwq-32b",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:qwen/qwq-32b-preview",
|
"openrouter:qwen/qwq-32b-preview",
|
||||||
"openrouter:qwen/qwq-32b:free",
|
"openrouter:qwen/qwq-32b:free",
|
||||||
|
|
@ -846,10 +816,6 @@ model_map = {
|
||||||
"OpenRouter": "qwen/qwq-32b:free"
|
"OpenRouter": "qwen/qwq-32b:free"
|
||||||
},
|
},
|
||||||
"deepseek-v3": {
|
"deepseek-v3": {
|
||||||
"DeepInfraChat": [
|
|
||||||
"deepseek-ai/DeepSeek-V3",
|
|
||||||
"deepseek-ai/DeepSeek-V3-0324"
|
|
||||||
],
|
|
||||||
"Together": "deepseek-ai/DeepSeek-V3",
|
"Together": "deepseek-ai/DeepSeek-V3",
|
||||||
"PuterJS": "openrouter:deepseek/deepseek-v3-base:free",
|
"PuterJS": "openrouter:deepseek/deepseek-v3-base:free",
|
||||||
"DeepInfra": [
|
"DeepInfra": [
|
||||||
|
|
@ -858,11 +824,6 @@ model_map = {
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"deepseek-r1": {
|
"deepseek-r1": {
|
||||||
"DeepInfraChat": [
|
|
||||||
"deepseek-ai/DeepSeek-R1",
|
|
||||||
"deepseek-ai/DeepSeek-R1-0528"
|
|
||||||
],
|
|
||||||
"LambdaChat": "deepseek-r1",
|
|
||||||
"PollinationsAI": "deepseek-reasoning",
|
"PollinationsAI": "deepseek-reasoning",
|
||||||
"Together": "deepseek-ai/DeepSeek-R1",
|
"Together": "deepseek-ai/DeepSeek-R1",
|
||||||
"HuggingFace": "deepseek-ai/DeepSeek-R1",
|
"HuggingFace": "deepseek-ai/DeepSeek-R1",
|
||||||
|
|
@ -885,11 +846,9 @@ model_map = {
|
||||||
"OpenRouter": "deepseek/deepseek-r1:free"
|
"OpenRouter": "deepseek/deepseek-r1:free"
|
||||||
},
|
},
|
||||||
"deepseek-r1-turbo": {
|
"deepseek-r1-turbo": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-Turbo",
|
|
||||||
"DeepInfra": "deepseek-ai/DeepSeek-R1-Turbo"
|
"DeepInfra": "deepseek-ai/DeepSeek-R1-Turbo"
|
||||||
},
|
},
|
||||||
"deepseek-r1-distill-llama-70b": {
|
"deepseek-r1-distill-llama-70b": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
||||||
"Together": [
|
"Together": [
|
||||||
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
||||||
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free"
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free"
|
||||||
|
|
@ -923,7 +882,6 @@ model_map = {
|
||||||
"OpenRouter": "deepseek/deepseek-r1-distill-qwen-14b:free"
|
"OpenRouter": "deepseek/deepseek-r1-distill-qwen-14b:free"
|
||||||
},
|
},
|
||||||
"deepseek-r1-distill-qwen-32b": {
|
"deepseek-r1-distill-qwen-32b": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
||||||
"HuggingFace": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
"HuggingFace": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:deepseek/deepseek-r1-distill-qwen-32b:free",
|
"openrouter:deepseek/deepseek-r1-distill-qwen-32b:free",
|
||||||
|
|
@ -935,7 +893,6 @@ model_map = {
|
||||||
"OpenRouter": "deepseek/deepseek-r1-distill-qwen-32b"
|
"OpenRouter": "deepseek/deepseek-r1-distill-qwen-32b"
|
||||||
},
|
},
|
||||||
"deepseek-prover-v2": {
|
"deepseek-prover-v2": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"openrouter:deepseek/deepseek-prover-v2:free",
|
"openrouter:deepseek/deepseek-prover-v2:free",
|
||||||
"openrouter:deepseek/deepseek-prover-v2"
|
"openrouter:deepseek/deepseek-prover-v2"
|
||||||
|
|
@ -943,14 +900,11 @@ model_map = {
|
||||||
"DeepInfra": "deepseek-ai/DeepSeek-Prover-V2-671B"
|
"DeepInfra": "deepseek-ai/DeepSeek-Prover-V2-671B"
|
||||||
},
|
},
|
||||||
"deepseek-prover-v2-671b": {
|
"deepseek-prover-v2-671b": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
||||||
"DeepInfra": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
"DeepInfra": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
||||||
"HuggingFaceAPI": "deepseek-ai/DeepSeek-Prover-V2-671B"
|
"HuggingFaceAPI": "deepseek-ai/DeepSeek-Prover-V2-671B"
|
||||||
},
|
},
|
||||||
"deepseek-v3-0324": {
|
"deepseek-v3-0324": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-V3-0324",
|
"LMArena": "deepseek-v3-0324",
|
||||||
"LambdaChat": "deepseek-v3-0324",
|
|
||||||
"LMArenaBeta": "deepseek-v3-0324",
|
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"deepseek-chat",
|
"deepseek-chat",
|
||||||
"openrouter:deepseek/deepseek-chat-v3-0324:free",
|
"openrouter:deepseek/deepseek-chat-v3-0324:free",
|
||||||
|
|
@ -960,22 +914,18 @@ model_map = {
|
||||||
"HuggingFaceAPI": "deepseek-ai/DeepSeek-V3-0324"
|
"HuggingFaceAPI": "deepseek-ai/DeepSeek-V3-0324"
|
||||||
},
|
},
|
||||||
"deepseek-v3-0324-turbo": {
|
"deepseek-v3-0324-turbo": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-V3-0324-Turbo",
|
|
||||||
"DeepInfra": "deepseek-ai/DeepSeek-V3-0324-Turbo"
|
"DeepInfra": "deepseek-ai/DeepSeek-V3-0324-Turbo"
|
||||||
},
|
},
|
||||||
"deepseek-r1-0528": {
|
"deepseek-r1-0528": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-0528",
|
|
||||||
"LambdaChat": "deepseek-r1-0528",
|
|
||||||
"PollinationsAI": "deepseek-reasoning",
|
"PollinationsAI": "deepseek-reasoning",
|
||||||
"OpenRouterFree": "deepseek/deepseek-r1-0528",
|
"OpenRouterFree": "deepseek/deepseek-r1-0528",
|
||||||
"LMArenaBeta": "deepseek-r1-0528",
|
"LMArena": "deepseek-r1-0528",
|
||||||
"DeepInfra": "deepseek-ai/DeepSeek-R1-0528",
|
"DeepInfra": "deepseek-ai/DeepSeek-R1-0528",
|
||||||
"HuggingFaceAPI": "deepseek-ai/DeepSeek-R1-0528",
|
"HuggingFaceAPI": "deepseek-ai/DeepSeek-R1-0528",
|
||||||
"Nvidia": "deepseek-ai/deepseek-r1-0528",
|
"Nvidia": "deepseek-ai/deepseek-r1-0528",
|
||||||
"OpenRouter": "deepseek/deepseek-r1-0528:free"
|
"OpenRouter": "deepseek/deepseek-r1-0528:free"
|
||||||
},
|
},
|
||||||
"deepseek-r1-0528-turbo": {
|
"deepseek-r1-0528-turbo": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-0528-Turbo",
|
|
||||||
"DeepInfra": "deepseek-ai/DeepSeek-R1-0528-Turbo"
|
"DeepInfra": "deepseek-ai/DeepSeek-R1-0528-Turbo"
|
||||||
},
|
},
|
||||||
"janus-pro-7b": {
|
"janus-pro-7b": {
|
||||||
|
|
@ -993,7 +943,7 @@ model_map = {
|
||||||
"Grok": "grok-3",
|
"Grok": "grok-3",
|
||||||
"Azure": "grok-3",
|
"Azure": "grok-3",
|
||||||
"EasyChat": "grok-3-free",
|
"EasyChat": "grok-3-free",
|
||||||
"LMArenaBeta": "grok-3-preview-02-24",
|
"LMArena": "grok-3-preview-02-24",
|
||||||
"PuterJS": "grok-3",
|
"PuterJS": "grok-3",
|
||||||
"OpenRouter": "x-ai/grok-3"
|
"OpenRouter": "x-ai/grok-3"
|
||||||
},
|
},
|
||||||
|
|
@ -1003,7 +953,6 @@ model_map = {
|
||||||
"kimi-k2": {
|
"kimi-k2": {
|
||||||
"Kimi": "kimi-k2",
|
"Kimi": "kimi-k2",
|
||||||
"HuggingFace": "moonshotai/Kimi-K2-Instruct",
|
"HuggingFace": "moonshotai/Kimi-K2-Instruct",
|
||||||
"DeepInfraChat": "moonshotai/Kimi-K2-Instruct",
|
|
||||||
"Groq": "moonshotai/kimi-k2-instruct",
|
"Groq": "moonshotai/kimi-k2-instruct",
|
||||||
"OpenRouterFree": "moonshotai/kimi-k2",
|
"OpenRouterFree": "moonshotai/kimi-k2",
|
||||||
"ApiAirforce": "kimi-k2",
|
"ApiAirforce": "kimi-k2",
|
||||||
|
|
@ -1040,30 +989,24 @@ model_map = {
|
||||||
"OpenRouter": "perplexity/r1-1776"
|
"OpenRouter": "perplexity/r1-1776"
|
||||||
},
|
},
|
||||||
"nemotron-70b": {
|
"nemotron-70b": {
|
||||||
"LambdaChat": "llama3.1-nemotron-70b-instruct",
|
|
||||||
"Together": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
"Together": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
||||||
"HuggingFace": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
"HuggingFace": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
||||||
"PuterJS": "openrouter:nvidia/llama-3.1-nemotron-70b-instruct",
|
"PuterJS": "openrouter:nvidia/llama-3.1-nemotron-70b-instruct",
|
||||||
"HuggingFaceAPI": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
|
"HuggingFaceAPI": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
|
||||||
},
|
},
|
||||||
"dolphin-2.6": {
|
"dolphin-2.6": {
|
||||||
"DeepInfraChat": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
|
||||||
"DeepInfra": "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
|
"DeepInfra": "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
|
||||||
},
|
},
|
||||||
"dolphin-2.9": {
|
"dolphin-2.9": {
|
||||||
"DeepInfraChat": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
|
|
||||||
"DeepInfra": "cognitivecomputations/dolphin-2.9.1-llama-3-70b"
|
"DeepInfra": "cognitivecomputations/dolphin-2.9.1-llama-3-70b"
|
||||||
},
|
},
|
||||||
"airoboros-70b": {
|
"airoboros-70b": {
|
||||||
"DeepInfraChat": "deepinfra/airoboros-70b",
|
|
||||||
"DeepInfra": "deepinfra/airoboros-70b"
|
"DeepInfra": "deepinfra/airoboros-70b"
|
||||||
},
|
},
|
||||||
"lzlv-70b": {
|
"lzlv-70b": {
|
||||||
"DeepInfraChat": "lizpreciatior/lzlv_70b_fp16_hf",
|
|
||||||
"DeepInfra": "lizpreciatior/lzlv_70b_fp16_hf"
|
"DeepInfra": "lizpreciatior/lzlv_70b_fp16_hf"
|
||||||
},
|
},
|
||||||
"lfm-40b": {
|
"lfm-40b": {
|
||||||
"LambdaChat": "lfm-40b",
|
|
||||||
"PuterJS": "openrouter:liquid/lfm-40b",
|
"PuterJS": "openrouter:liquid/lfm-40b",
|
||||||
"OpenRouter": "liquid/lfm-40b"
|
"OpenRouter": "liquid/lfm-40b"
|
||||||
},
|
},
|
||||||
|
|
@ -1076,7 +1019,6 @@ model_map = {
|
||||||
"sdxl-turbo": {
|
"sdxl-turbo": {
|
||||||
"HuggingFaceMedia": "stabilityai/sdxl-turbo",
|
"HuggingFaceMedia": "stabilityai/sdxl-turbo",
|
||||||
"PollinationsImage": "sdxl-turbo",
|
"PollinationsImage": "sdxl-turbo",
|
||||||
"ImageLabs": "sdxl-turbo",
|
|
||||||
"PollinationsAI": "turbo",
|
"PollinationsAI": "turbo",
|
||||||
"HuggingFace": "stabilityai/sdxl-turbo"
|
"HuggingFace": "stabilityai/sdxl-turbo"
|
||||||
},
|
},
|
||||||
|
|
@ -1157,7 +1099,7 @@ model_map = {
|
||||||
"flux-kontext": {
|
"flux-kontext": {
|
||||||
"PollinationsAI": "kontext",
|
"PollinationsAI": "kontext",
|
||||||
"Azure": "flux.1-kontext-pro",
|
"Azure": "flux.1-kontext-pro",
|
||||||
"LMArenaBeta": "flux-1-kontext-pro",
|
"LMArena": "flux-1-kontext-pro",
|
||||||
"Together": "flux-kontext"
|
"Together": "flux-kontext"
|
||||||
},
|
},
|
||||||
"flux-dev-lora": {
|
"flux-dev-lora": {
|
||||||
|
|
@ -1318,7 +1260,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"claude-3-5-haiku": {
|
"claude-3-5-haiku": {
|
||||||
"PollinationsAI": "claude",
|
"PollinationsAI": "claude",
|
||||||
"LMArenaBeta": "claude-3-5-haiku-20241022",
|
"LMArena": "claude-3-5-haiku-20241022",
|
||||||
"Anthropic": "claude-3-5-haiku-20241022"
|
"Anthropic": "claude-3-5-haiku-20241022"
|
||||||
},
|
},
|
||||||
"gemini-2.5-flash-lite": {
|
"gemini-2.5-flash-lite": {
|
||||||
|
|
@ -1327,9 +1269,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"llama-4-scout-17b-16e": {
|
"llama-4-scout-17b-16e": {
|
||||||
"PollinationsAI": "llamascout",
|
"PollinationsAI": "llamascout",
|
||||||
"LambdaChat": "llama-4-scout-17b-16e-instruct",
|
"LMArena": "llama-4-scout-17b-16e-instruct",
|
||||||
"DeepInfraChat": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
||||||
"LMArenaBeta": "llama-4-scout-17b-16e-instruct",
|
|
||||||
"DeepInfra": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
"DeepInfra": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||||
"Groq": "meta-llama/llama-4-scout-17b-16e-instruct",
|
"Groq": "meta-llama/llama-4-scout-17b-16e-instruct",
|
||||||
"HuggingFaceAPI": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
"HuggingFaceAPI": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||||
|
|
@ -1348,7 +1288,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"o3": {
|
"o3": {
|
||||||
"PollinationsAI": "openai-reasoning",
|
"PollinationsAI": "openai-reasoning",
|
||||||
"LMArenaBeta": "o3-2025-04-16",
|
"LMArena": "o3-2025-04-16",
|
||||||
"PuterJS": [
|
"PuterJS": [
|
||||||
"o3",
|
"o3",
|
||||||
"openrouter:openai/o3"
|
"openrouter:openai/o3"
|
||||||
|
|
@ -1371,7 +1311,7 @@ model_map = {
|
||||||
"qwen-3-235b-a22b": {
|
"qwen-3-235b-a22b": {
|
||||||
"Qwen": "qwen3-235b-a22b",
|
"Qwen": "qwen3-235b-a22b",
|
||||||
"OpenRouterFree": "qwen/qwen3-235b-a22b",
|
"OpenRouterFree": "qwen/qwen3-235b-a22b",
|
||||||
"LMArenaBeta": "qwen3-235b-a22b",
|
"LMArena": "qwen3-235b-a22b",
|
||||||
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-FP8",
|
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-FP8",
|
||||||
"Nvidia": "qwen/qwen3-235b-a22b",
|
"Nvidia": "qwen/qwen3-235b-a22b",
|
||||||
"OpenRouter": "qwen/qwen3-235b-a22b:free"
|
"OpenRouter": "qwen/qwen3-235b-a22b:free"
|
||||||
|
|
@ -1382,9 +1322,8 @@ model_map = {
|
||||||
"qwen-3-30b-a3b": {
|
"qwen-3-30b-a3b": {
|
||||||
"Qwen": "qwen3-30b-a3b",
|
"Qwen": "qwen3-30b-a3b",
|
||||||
"OpenRouterFree": "qwen/qwen3-30b-a3b",
|
"OpenRouterFree": "qwen/qwen3-30b-a3b",
|
||||||
"DeepInfraChat": "Qwen/Qwen3-30B-A3B",
|
|
||||||
"HuggingFace": "Qwen/Qwen3-30B-A3B",
|
"HuggingFace": "Qwen/Qwen3-30B-A3B",
|
||||||
"LMArenaBeta": "qwen3-30b-a3b",
|
"LMArena": "qwen3-30b-a3b",
|
||||||
"HuggingSpace": "qwen-3-30b-a3b",
|
"HuggingSpace": "qwen-3-30b-a3b",
|
||||||
"DeepInfra": "Qwen/Qwen3-30B-A3B",
|
"DeepInfra": "Qwen/Qwen3-30B-A3B",
|
||||||
"FenayAI": "qwen3-30b-a3b",
|
"FenayAI": "qwen3-30b-a3b",
|
||||||
|
|
@ -1399,7 +1338,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"qwen-max": {
|
"qwen-max": {
|
||||||
"Qwen": "qwen-max-latest",
|
"Qwen": "qwen-max-latest",
|
||||||
"LMArenaBeta": "qwen-max-2025-01-25",
|
"LMArena": "qwen-max-2025-01-25",
|
||||||
"PuterJS": "openrouter:qwen/qwen-max",
|
"PuterJS": "openrouter:qwen/qwen-max",
|
||||||
"OpenRouter": "qwen/qwen-max"
|
"OpenRouter": "qwen/qwen-max"
|
||||||
},
|
},
|
||||||
|
|
@ -1449,7 +1388,6 @@ model_map = {
|
||||||
},
|
},
|
||||||
"glm-4.5": {
|
"glm-4.5": {
|
||||||
"GLM": "GLM-4.5",
|
"GLM": "GLM-4.5",
|
||||||
"DeepInfraChat": "zai-org/GLM-4.5",
|
|
||||||
"HuggingFace": "zai-org/GLM-4.5",
|
"HuggingFace": "zai-org/GLM-4.5",
|
||||||
"ApiAirforce": "glm-4.5",
|
"ApiAirforce": "glm-4.5",
|
||||||
"DeepInfra": "zai-org/GLM-4.5",
|
"DeepInfra": "zai-org/GLM-4.5",
|
||||||
|
|
@ -1459,7 +1397,6 @@ model_map = {
|
||||||
"glm-4.5-air": {
|
"glm-4.5-air": {
|
||||||
"GLM": "GLM-4.5-Air",
|
"GLM": "GLM-4.5-Air",
|
||||||
"OpenRouterFree": "z-ai/glm-4.5-air",
|
"OpenRouterFree": "z-ai/glm-4.5-air",
|
||||||
"DeepInfraChat": "zai-org/GLM-4.5-Air",
|
|
||||||
"HuggingFace": "zai-org/GLM-4.5-Air",
|
"HuggingFace": "zai-org/GLM-4.5-Air",
|
||||||
"DeepInfra": "zai-org/GLM-4.5-Air",
|
"DeepInfra": "zai-org/GLM-4.5-Air",
|
||||||
"HuggingFaceAPI": "zai-org/GLM-4.5-Air-FP8",
|
"HuggingFaceAPI": "zai-org/GLM-4.5-Air-FP8",
|
||||||
|
|
@ -1467,7 +1404,6 @@ model_map = {
|
||||||
},
|
},
|
||||||
"glm-4.5v": {
|
"glm-4.5v": {
|
||||||
"GLM": "GLM-4.5V",
|
"GLM": "GLM-4.5V",
|
||||||
"DeepInfraChat": "zai-org/GLM-4.5V",
|
|
||||||
"DeepInfra": "zai-org/GLM-4.5V",
|
"DeepInfra": "zai-org/GLM-4.5V",
|
||||||
"HuggingFaceAPI": "zai-org/GLM-4.5V",
|
"HuggingFaceAPI": "zai-org/GLM-4.5V",
|
||||||
"OpenRouter": "z-ai/glm-4.5v"
|
"OpenRouter": "z-ai/glm-4.5v"
|
||||||
|
|
@ -1546,7 +1482,6 @@ model_map = {
|
||||||
},
|
},
|
||||||
"gemma-3-12b-it": {
|
"gemma-3-12b-it": {
|
||||||
"OpenRouterFree": "google/gemma-3-12b-it",
|
"OpenRouterFree": "google/gemma-3-12b-it",
|
||||||
"DeepInfraChat": "google/gemma-3-12b-it",
|
|
||||||
"DeepInfra": "google/gemma-3-12b-it",
|
"DeepInfra": "google/gemma-3-12b-it",
|
||||||
"GeminiPro": "gemma-3-12b-it",
|
"GeminiPro": "gemma-3-12b-it",
|
||||||
"HuggingFaceAPI": "google/gemma-3-12b-it",
|
"HuggingFaceAPI": "google/gemma-3-12b-it",
|
||||||
|
|
@ -1555,8 +1490,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"gemma-3-27b-it": {
|
"gemma-3-27b-it": {
|
||||||
"OpenRouterFree": "google/gemma-3-27b-it",
|
"OpenRouterFree": "google/gemma-3-27b-it",
|
||||||
"DeepInfraChat": "google/gemma-3-27b-it",
|
"LMArena": "gemma-3-27b-it",
|
||||||
"LMArenaBeta": "gemma-3-27b-it",
|
|
||||||
"DeepInfra": "google/gemma-3-27b-it",
|
"DeepInfra": "google/gemma-3-27b-it",
|
||||||
"FenayAI": "gemma-3-27b-it",
|
"FenayAI": "gemma-3-27b-it",
|
||||||
"GeminiPro": "gemma-3-27b-it",
|
"GeminiPro": "gemma-3-27b-it",
|
||||||
|
|
@ -1566,7 +1500,6 @@ model_map = {
|
||||||
},
|
},
|
||||||
"gemma-3-4b-it": {
|
"gemma-3-4b-it": {
|
||||||
"OpenRouterFree": "google/gemma-3-4b-it",
|
"OpenRouterFree": "google/gemma-3-4b-it",
|
||||||
"DeepInfraChat": "google/gemma-3-4b-it",
|
|
||||||
"DeepInfra": "google/gemma-3-4b-it",
|
"DeepInfra": "google/gemma-3-4b-it",
|
||||||
"GeminiPro": "gemma-3-4b-it",
|
"GeminiPro": "gemma-3-4b-it",
|
||||||
"Nvidia": "google/gemma-3-4b-it",
|
"Nvidia": "google/gemma-3-4b-it",
|
||||||
|
|
@ -1580,7 +1513,7 @@ model_map = {
|
||||||
},
|
},
|
||||||
"gemma-3n-e4b-it": {
|
"gemma-3n-e4b-it": {
|
||||||
"OpenRouterFree": "google/gemma-3n-e4b-it",
|
"OpenRouterFree": "google/gemma-3n-e4b-it",
|
||||||
"LMArenaBeta": "gemma-3n-e4b-it",
|
"LMArena": "gemma-3n-e4b-it",
|
||||||
"ApiAirforce": "gemma-3n-e4b-it",
|
"ApiAirforce": "gemma-3n-e4b-it",
|
||||||
"GeminiPro": "gemma-3n-e4b-it",
|
"GeminiPro": "gemma-3n-e4b-it",
|
||||||
"Nvidia": "google/gemma-3n-e4b-it",
|
"Nvidia": "google/gemma-3n-e4b-it",
|
||||||
|
|
@ -1635,7 +1568,6 @@ model_map = {
|
||||||
},
|
},
|
||||||
"gpt-oss-20b": {
|
"gpt-oss-20b": {
|
||||||
"OpenRouterFree": "openai/gpt-oss-20b",
|
"OpenRouterFree": "openai/gpt-oss-20b",
|
||||||
"DeepInfraChat": "openai/gpt-oss-20b",
|
|
||||||
"HuggingFace": "openai/gpt-oss-20b",
|
"HuggingFace": "openai/gpt-oss-20b",
|
||||||
"ApiAirforce": "gpt-oss-20b",
|
"ApiAirforce": "gpt-oss-20b",
|
||||||
"DeepInfra": "openai/gpt-oss-20b",
|
"DeepInfra": "openai/gpt-oss-20b",
|
||||||
|
|
@ -1697,15 +1629,10 @@ model_map = {
|
||||||
"LambdaChat": "apriel-5b-instruct"
|
"LambdaChat": "apriel-5b-instruct"
|
||||||
},
|
},
|
||||||
"hermes-3-llama-3.1-405b": {
|
"hermes-3-llama-3.1-405b": {
|
||||||
"LambdaChat": "hermes-3-llama-3.1-405b-fp8",
|
|
||||||
"HuggingFaceAPI": "NousResearch/Hermes-3-Llama-3.1-405B",
|
"HuggingFaceAPI": "NousResearch/Hermes-3-Llama-3.1-405B",
|
||||||
"OpenRouter": "nousresearch/hermes-3-llama-3.1-405b"
|
"OpenRouter": "nousresearch/hermes-3-llama-3.1-405b"
|
||||||
},
|
},
|
||||||
"hermes3-405b-fp8-128k": {
|
|
||||||
"LambdaChat": "hermes3-405b-fp8-128k"
|
|
||||||
},
|
|
||||||
"llama-3.1-nemotron-70b": {
|
"llama-3.1-nemotron-70b": {
|
||||||
"LambdaChat": "llama3.1-nemotron-70b-instruct",
|
|
||||||
"HuggingFace": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
"HuggingFace": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
||||||
"GlhfChat": "hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
"GlhfChat": "hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
||||||
"Nvidia": "nvidia/llama-3.1-nemotron-70b-instruct",
|
"Nvidia": "nvidia/llama-3.1-nemotron-70b-instruct",
|
||||||
|
|
@ -1715,67 +1642,49 @@ model_map = {
|
||||||
"LambdaChat": "qwen25-coder-32b-instruct"
|
"LambdaChat": "qwen25-coder-32b-instruct"
|
||||||
},
|
},
|
||||||
"llama-4-maverick-17b-128e": {
|
"llama-4-maverick-17b-128e": {
|
||||||
"LambdaChat": "llama-4-maverick-17b-128e-instruct-fp8",
|
"LMArena": "llama-4-maverick-17b-128e-instruct",
|
||||||
"DeepInfraChat": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
||||||
"LMArenaBeta": "llama-4-maverick-17b-128e-instruct",
|
|
||||||
"DeepInfra": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
"DeepInfra": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
"Groq": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
"Groq": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
||||||
"HuggingFaceAPI": "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
|
"HuggingFaceAPI": "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
|
||||||
"Nvidia": "meta/llama-4-maverick-17b-128e-instruct"
|
"Nvidia": "meta/llama-4-maverick-17b-128e-instruct"
|
||||||
},
|
},
|
||||||
"hermes-3": {
|
|
||||||
"LambdaChat": "hermes3-405b-fp8-128k"
|
|
||||||
},
|
|
||||||
"hermes-3-405b": {
|
"hermes-3-405b": {
|
||||||
"LambdaChat": [
|
|
||||||
"hermes3-405b-fp8-128k",
|
|
||||||
"hermes-3-llama-3.1-405b-fp8"
|
|
||||||
],
|
|
||||||
"PuterJS": "openrouter:nousresearch/hermes-3-llama-3.1-405b"
|
"PuterJS": "openrouter:nousresearch/hermes-3-llama-3.1-405b"
|
||||||
},
|
},
|
||||||
"qwen-3-coder-480b-a35b-turbo": {
|
"qwen-3-coder-480b-a35b-turbo": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
|
|
||||||
"DeepInfra": "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo"
|
"DeepInfra": "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo"
|
||||||
},
|
},
|
||||||
"olmocr-7b-0725": {
|
"olmocr-7b-0725": {
|
||||||
"DeepInfraChat": "allenai/olmOCR-7B-0725-FP8",
|
|
||||||
"DeepInfra": "allenai/olmOCR-7B-0725-FP8"
|
"DeepInfra": "allenai/olmOCR-7B-0725-FP8"
|
||||||
},
|
},
|
||||||
"qwen-3-235b-a22b-thinking-2507": {
|
"qwen-3-235b-a22b-thinking-2507": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
||||||
"HuggingFace": "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
"HuggingFace": "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||||
"DeepInfra": "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
"DeepInfra": "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||||
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-Thinking-2507",
|
||||||
"OpenRouter": "qwen/qwen3-235b-a22b-thinking-2507"
|
"OpenRouter": "qwen/qwen3-235b-a22b-thinking-2507"
|
||||||
},
|
},
|
||||||
"qwen-3-coder-480b-a35b": {
|
"qwen-3-coder-480b-a35b": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
|
||||||
"HuggingFace": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
"HuggingFace": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
||||||
"DeepInfra": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
"DeepInfra": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
|
||||||
"HuggingFaceAPI": "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8"
|
"HuggingFaceAPI": "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8"
|
||||||
},
|
},
|
||||||
"qwen-3-235b-a22b-2507": {
|
"qwen-3-235b-a22b-2507": {
|
||||||
"DeepInfraChat": "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
|
||||||
"HuggingFace": "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
"HuggingFace": "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||||
"DeepInfra": "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
"DeepInfra": "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||||
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
||||||
"OpenRouter": "qwen/qwen3-235b-a22b-2507"
|
"OpenRouter": "qwen/qwen3-235b-a22b-2507"
|
||||||
},
|
},
|
||||||
"llama-4-maverick-17b-128e-turbo": {
|
"llama-4-maverick-17b-128e-turbo": {
|
||||||
"DeepInfraChat": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
|
|
||||||
"DeepInfra": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo"
|
"DeepInfra": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo"
|
||||||
},
|
},
|
||||||
"devstral-small-2507": {
|
"devstral-small-2507": {
|
||||||
"DeepInfraChat": "mistralai/Devstral-Small-2507",
|
|
||||||
"PuterJS": "devstral-small-2507",
|
"PuterJS": "devstral-small-2507",
|
||||||
"DeepInfra": "mistralai/Devstral-Small-2507"
|
"DeepInfra": "mistralai/Devstral-Small-2507"
|
||||||
},
|
},
|
||||||
"mistral-small-3.2-24b-2506": {
|
"mistral-small-3.2-24b-2506": {
|
||||||
"DeepInfraChat": "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
|
|
||||||
"DeepInfra": "mistralai/Mistral-Small-3.2-24B-Instruct-2506"
|
"DeepInfra": "mistralai/Mistral-Small-3.2-24B-Instruct-2506"
|
||||||
},
|
},
|
||||||
"llama-guard-4-12b": {
|
"llama-guard-4-12b": {
|
||||||
"DeepInfraChat": "meta-llama/Llama-Guard-4-12B",
|
|
||||||
"DeepInfra": "meta-llama/Llama-Guard-4-12B",
|
"DeepInfra": "meta-llama/Llama-Guard-4-12B",
|
||||||
"Groq": "meta-llama/llama-guard-4-12b",
|
"Groq": "meta-llama/llama-guard-4-12b",
|
||||||
"HuggingFaceAPI": "meta-llama/Llama-Guard-4-12B",
|
"HuggingFaceAPI": "meta-llama/Llama-Guard-4-12B",
|
||||||
|
|
@ -1783,15 +1692,12 @@ model_map = {
|
||||||
"OpenRouter": "meta-llama/llama-guard-4-12b"
|
"OpenRouter": "meta-llama/llama-guard-4-12b"
|
||||||
},
|
},
|
||||||
"claude-4-opus": {
|
"claude-4-opus": {
|
||||||
"DeepInfraChat": "anthropic/claude-4-opus",
|
|
||||||
"DeepInfra": "anthropic/claude-4-opus"
|
"DeepInfra": "anthropic/claude-4-opus"
|
||||||
},
|
},
|
||||||
"claude-4-sonnet": {
|
"claude-4-sonnet": {
|
||||||
"DeepInfraChat": "anthropic/claude-4-sonnet",
|
|
||||||
"DeepInfra": "anthropic/claude-4-sonnet"
|
"DeepInfra": "anthropic/claude-4-sonnet"
|
||||||
},
|
},
|
||||||
"deepseek": {
|
"deepseek": {
|
||||||
"DeepInfraChat": "deepseek-ai/DeepSeek-V3",
|
|
||||||
"HuggingFace": "deepseek-ai/DeepSeek-V3",
|
"HuggingFace": "deepseek-ai/DeepSeek-V3",
|
||||||
"PuterJS": "deepseek-v3",
|
"PuterJS": "deepseek-v3",
|
||||||
"ApiAirforce": "deepseek-v3",
|
"ApiAirforce": "deepseek-v3",
|
||||||
|
|
@ -1804,7 +1710,6 @@ model_map = {
|
||||||
"OpenRouter": "deepseek/deepseek-chat"
|
"OpenRouter": "deepseek/deepseek-chat"
|
||||||
},
|
},
|
||||||
"llama-3.3-70b-turbo": {
|
"llama-3.3-70b-turbo": {
|
||||||
"DeepInfraChat": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
||||||
"BlackboxPro": "Meta-Llama-3.3-70B-Instruct-Turbo",
|
"BlackboxPro": "Meta-Llama-3.3-70B-Instruct-Turbo",
|
||||||
"DeepInfra": "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
"DeepInfra": "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||||
},
|
},
|
||||||
|
|
@ -1915,196 +1820,196 @@ model_map = {
|
||||||
"HuggingFaceMedia": "Lightricks/LTX-Video-0.9.7-dev:fal-ai"
|
"HuggingFaceMedia": "Lightricks/LTX-Video-0.9.7-dev:fal-ai"
|
||||||
},
|
},
|
||||||
"claude-opus-4": {
|
"claude-opus-4": {
|
||||||
"LMArenaBeta": "claude-opus-4-20250514",
|
"LMArena": "claude-opus-4-20250514",
|
||||||
"PuterJS": "claude-opus-4-latest",
|
"PuterJS": "claude-opus-4-latest",
|
||||||
"OpenRouter": "anthropic/claude-opus-4"
|
"OpenRouter": "anthropic/claude-opus-4"
|
||||||
},
|
},
|
||||||
"chatgpt-4o": {
|
"chatgpt-4o": {
|
||||||
"LMArenaBeta": "chatgpt-4o-latest-20250326",
|
"LMArena": "chatgpt-4o-latest-20250326",
|
||||||
"OpenRouter": "openai/chatgpt-4o-latest"
|
"OpenRouter": "openai/chatgpt-4o-latest"
|
||||||
},
|
},
|
||||||
"mistral-small-3.1-24b-2503": {
|
"mistral-small-3.1-24b-2503": {
|
||||||
"LMArenaBeta": "mistral-small-3.1-24b-instruct-2503",
|
"LMArena": "mistral-small-3.1-24b-instruct-2503",
|
||||||
"HuggingFaceAPI": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
"HuggingFaceAPI": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
||||||
"Nvidia": "mistralai/mistral-small-3.1-24b-instruct-2503"
|
"Nvidia": "mistralai/mistral-small-3.1-24b-instruct-2503"
|
||||||
},
|
},
|
||||||
"steve": {
|
"steve": {
|
||||||
"LMArenaBeta": "steve"
|
"LMArena": "steve"
|
||||||
},
|
},
|
||||||
"command-a25": {
|
"command-a25": {
|
||||||
"LMArenaBeta": "command-a-03-2025",
|
"LMArena": "command-a-03-2025",
|
||||||
"HuggingSpace": "command-a-03-2025",
|
"HuggingSpace": "command-a-03-2025",
|
||||||
"CohereForAI_C4AI_Command": "command-a-03-2025"
|
"CohereForAI_C4AI_Command": "command-a-03-2025"
|
||||||
},
|
},
|
||||||
"amazon.nova-pro": {
|
"amazon.nova-pro": {
|
||||||
"LMArenaBeta": "amazon.nova-pro-v1:0"
|
"LMArena": "amazon.nova-pro-v1:0"
|
||||||
},
|
},
|
||||||
"grok-3-mini-beta": {
|
"grok-3-mini-beta": {
|
||||||
"LMArenaBeta": "grok-3-mini-beta",
|
"LMArena": "grok-3-mini-beta",
|
||||||
"OpenRouter": "x-ai/grok-3-mini-beta"
|
"OpenRouter": "x-ai/grok-3-mini-beta"
|
||||||
},
|
},
|
||||||
"gemini-2.5-flash-lite-preview-thinking": {
|
"gemini-2.5-flash-lite-preview-thinking": {
|
||||||
"LMArenaBeta": "gemini-2.5-flash-lite-preview-06-17-thinking"
|
"LMArena": "gemini-2.5-flash-lite-preview-06-17-thinking"
|
||||||
},
|
},
|
||||||
"amazon-nova-experimental": {
|
"amazon-nova-experimental": {
|
||||||
"LMArenaBeta": "amazon-nova-experimental-chat-05-14"
|
"LMArena": "amazon-nova-experimental-chat-05-14"
|
||||||
},
|
},
|
||||||
"claude-3-7-sonnet-20250219-thinking-32k": {
|
"claude-3-7-sonnet-20250219-thinking-32k": {
|
||||||
"LMArenaBeta": "claude-3-7-sonnet-20250219-thinking-32k"
|
"LMArena": "claude-3-7-sonnet-20250219-thinking-32k"
|
||||||
},
|
},
|
||||||
"mistral-medium-2505": {
|
"mistral-medium-2505": {
|
||||||
"LMArenaBeta": "mistral-medium-2505"
|
"LMArena": "mistral-medium-2505"
|
||||||
},
|
},
|
||||||
"magistral-medium-2506": {
|
"magistral-medium-2506": {
|
||||||
"LMArenaBeta": "magistral-medium-2506",
|
"LMArena": "magistral-medium-2506",
|
||||||
"OpenRouter": "mistralai/magistral-medium-2506:thinking"
|
"OpenRouter": "mistralai/magistral-medium-2506:thinking"
|
||||||
},
|
},
|
||||||
"x": {
|
"x": {
|
||||||
"LMArenaBeta": "X-preview"
|
"LMArena": "X-preview"
|
||||||
},
|
},
|
||||||
"stephen": {
|
"stephen": {
|
||||||
"LMArenaBeta": "stephen-v2"
|
"LMArena": "stephen-v2"
|
||||||
},
|
},
|
||||||
"glm-4-air-250414": {
|
"glm-4-air-250414": {
|
||||||
"LMArenaBeta": "glm-4-air-250414"
|
"LMArena": "glm-4-air-250414"
|
||||||
},
|
},
|
||||||
"claude-sonnet-4": {
|
"claude-sonnet-4": {
|
||||||
"LMArenaBeta": "claude-sonnet-4-20250514",
|
"LMArena": "claude-sonnet-4-20250514",
|
||||||
"PuterJS": "claude-sonnet-4-latest",
|
"PuterJS": "claude-sonnet-4-latest",
|
||||||
"ApiAirforce": "claude-sonnet-4",
|
"ApiAirforce": "claude-sonnet-4",
|
||||||
"OpenRouter": "anthropic/claude-sonnet-4"
|
"OpenRouter": "anthropic/claude-sonnet-4"
|
||||||
},
|
},
|
||||||
"stonebloom": {
|
"stonebloom": {
|
||||||
"LMArenaBeta": "stonebloom"
|
"LMArena": "stonebloom"
|
||||||
},
|
},
|
||||||
"claude-3-7-sonnet": {
|
"claude-3-7-sonnet": {
|
||||||
"LMArenaBeta": "claude-3-7-sonnet-20250219",
|
"LMArena": "claude-3-7-sonnet-20250219",
|
||||||
"PuterJS": "claude-3-7-sonnet-latest"
|
"PuterJS": "claude-3-7-sonnet-latest"
|
||||||
},
|
},
|
||||||
"minimax-m1": {
|
"minimax-m1": {
|
||||||
"LMArenaBeta": "minimax-m1",
|
"LMArena": "minimax-m1",
|
||||||
"OpenRouter": "minimax/minimax-m1"
|
"OpenRouter": "minimax/minimax-m1"
|
||||||
},
|
},
|
||||||
"step-1o-turbo-202506": {
|
"step-1o-turbo-202506": {
|
||||||
"LMArenaBeta": "step-1o-turbo-202506"
|
"LMArena": "step-1o-turbo-202506"
|
||||||
},
|
},
|
||||||
"claude-sonnet-4-20250514-thinking-32k": {
|
"claude-sonnet-4-20250514-thinking-32k": {
|
||||||
"LMArenaBeta": "claude-sonnet-4-20250514-thinking-32k"
|
"LMArena": "claude-sonnet-4-20250514-thinking-32k"
|
||||||
},
|
},
|
||||||
"qwen-3-235b-a22b-no-thinking": {
|
"qwen-3-235b-a22b-no-thinking": {
|
||||||
"LMArenaBeta": "qwen3-235b-a22b-no-thinking"
|
"LMArena": "qwen3-235b-a22b-no-thinking"
|
||||||
},
|
},
|
||||||
"claude-opus-4-20250514-thinking-16k": {
|
"claude-opus-4-20250514-thinking-16k": {
|
||||||
"LMArenaBeta": "claude-opus-4-20250514-thinking-16k"
|
"LMArena": "claude-opus-4-20250514-thinking-16k"
|
||||||
},
|
},
|
||||||
"stephen-vision-csfix": {
|
"stephen-vision-csfix": {
|
||||||
"LMArenaBeta": "stephen-vision-csfix"
|
"LMArena": "stephen-vision-csfix"
|
||||||
},
|
},
|
||||||
"mistral-small-2506": {
|
"mistral-small-2506": {
|
||||||
"LMArenaBeta": "mistral-small-2506",
|
"LMArena": "mistral-small-2506",
|
||||||
"PuterJS": "mistral-small-2506"
|
"PuterJS": "mistral-small-2506"
|
||||||
},
|
},
|
||||||
"wolfstride": {
|
"wolfstride": {
|
||||||
"LMArenaBeta": "wolfstride"
|
"LMArena": "wolfstride"
|
||||||
},
|
},
|
||||||
"grok-3-mini-high": {
|
"grok-3-mini-high": {
|
||||||
"LMArenaBeta": "grok-3-mini-high"
|
"LMArena": "grok-3-mini-high"
|
||||||
},
|
},
|
||||||
"grok-4-0709": {
|
"grok-4-0709": {
|
||||||
"LMArenaBeta": "grok-4-0709"
|
"LMArena": "grok-4-0709"
|
||||||
},
|
},
|
||||||
"cresylux": {
|
"cresylux": {
|
||||||
"LMArenaBeta": "cresylux"
|
"LMArena": "cresylux"
|
||||||
},
|
},
|
||||||
"hunyuan-turbos": {
|
"hunyuan-turbos": {
|
||||||
"LMArenaBeta": "hunyuan-turbos-20250416"
|
"LMArena": "hunyuan-turbos-20250416"
|
||||||
},
|
},
|
||||||
"ernie-x1-turbo-32k": {
|
"ernie-x1-turbo-32k": {
|
||||||
"LMArenaBeta": "ernie-x1-turbo-32k-preview"
|
"LMArena": "ernie-x1-turbo-32k-preview"
|
||||||
},
|
},
|
||||||
"kimi-k2-0711": {
|
"kimi-k2-0711": {
|
||||||
"LMArenaBeta": "kimi-k2-0711-preview"
|
"LMArena": "kimi-k2-0711-preview"
|
||||||
},
|
},
|
||||||
"nettle": {
|
"nettle": {
|
||||||
"LMArenaBeta": "nettle"
|
"LMArena": "nettle"
|
||||||
},
|
},
|
||||||
"clownfish": {
|
"clownfish": {
|
||||||
"LMArenaBeta": "clownfish"
|
"LMArena": "clownfish"
|
||||||
},
|
},
|
||||||
"octopus": {
|
"octopus": {
|
||||||
"LMArenaBeta": "octopus"
|
"LMArena": "octopus"
|
||||||
},
|
},
|
||||||
"kraken-07152025-1": {
|
"kraken-07152025-1": {
|
||||||
"LMArenaBeta": "kraken-07152025-1"
|
"LMArena": "kraken-07152025-1"
|
||||||
},
|
},
|
||||||
"kraken-07152025-2": {
|
"kraken-07152025-2": {
|
||||||
"LMArenaBeta": "kraken-07152025-2"
|
"LMArena": "kraken-07152025-2"
|
||||||
},
|
},
|
||||||
"folsom-07152025-1": {
|
"folsom-07152025-1": {
|
||||||
"LMArenaBeta": "folsom-07152025-1"
|
"LMArena": "folsom-07152025-1"
|
||||||
},
|
},
|
||||||
"claude-3-5-sonnet": {
|
"claude-3-5-sonnet": {
|
||||||
"LMArenaBeta": "claude-3-5-sonnet-20241022",
|
"LMArena": "claude-3-5-sonnet-20241022",
|
||||||
"PuterJS": "claude-3-5-sonnet-20240620",
|
"PuterJS": "claude-3-5-sonnet-20240620",
|
||||||
"Anthropic": "claude-3-5-sonnet-20241022"
|
"Anthropic": "claude-3-5-sonnet-20241022"
|
||||||
},
|
},
|
||||||
"hunyuan-large-vision": {
|
"hunyuan-large-vision": {
|
||||||
"LMArenaBeta": "hunyuan-large-vision"
|
"LMArena": "hunyuan-large-vision"
|
||||||
},
|
},
|
||||||
"flux-1-kontext-pro": {
|
"flux-1-kontext-pro": {
|
||||||
"LMArenaBeta": "flux-1-kontext-pro"
|
"LMArena": "flux-1-kontext-pro"
|
||||||
},
|
},
|
||||||
"gpt-image-1": {
|
"gpt-image-1": {
|
||||||
"LMArenaBeta": "gpt-image-1"
|
"LMArena": "gpt-image-1"
|
||||||
},
|
},
|
||||||
"flux-1-kontext-max": {
|
"flux-1-kontext-max": {
|
||||||
"LMArenaBeta": "flux-1-kontext-max"
|
"LMArena": "flux-1-kontext-max"
|
||||||
},
|
},
|
||||||
"imagen-4.0-ultra-generate": {
|
"imagen-4.0-ultra-generate": {
|
||||||
"LMArenaBeta": "imagen-4.0-ultra-generate-preview-06-06"
|
"LMArena": "imagen-4.0-ultra-generate-preview-06-06"
|
||||||
},
|
},
|
||||||
"imagen-3.0-generate": {
|
"imagen-3.0-generate": {
|
||||||
"LMArenaBeta": "imagen-3.0-generate-002"
|
"LMArena": "imagen-3.0-generate-002"
|
||||||
},
|
},
|
||||||
"ideogram": {
|
"ideogram": {
|
||||||
"LMArenaBeta": "ideogram-v2"
|
"LMArena": "ideogram-v2"
|
||||||
},
|
},
|
||||||
"photon": {
|
"photon": {
|
||||||
"LMArenaBeta": "photon"
|
"LMArena": "photon"
|
||||||
},
|
},
|
||||||
"step1x-edit": {
|
"step1x-edit": {
|
||||||
"LMArenaBeta": "step1x-edit"
|
"LMArena": "step1x-edit"
|
||||||
},
|
},
|
||||||
"recraft": {
|
"recraft": {
|
||||||
"LMArenaBeta": "recraft-v3"
|
"LMArena": "recraft-v3"
|
||||||
},
|
},
|
||||||
"anonymous-bot-0514": {
|
"anonymous-bot-0514": {
|
||||||
"LMArenaBeta": "anonymous-bot-0514"
|
"LMArena": "anonymous-bot-0514"
|
||||||
},
|
},
|
||||||
"flux-1.1-pro": {
|
"flux-1.1-pro": {
|
||||||
"LMArenaBeta": "flux-1.1-pro"
|
"LMArena": "flux-1.1-pro"
|
||||||
},
|
},
|
||||||
"ideogram-v3-quality": {
|
"ideogram-v3-quality": {
|
||||||
"LMArenaBeta": "ideogram-v3-quality"
|
"LMArena": "ideogram-v3-quality"
|
||||||
},
|
},
|
||||||
"imagen-4.0-generate": {
|
"imagen-4.0-generate": {
|
||||||
"LMArenaBeta": "imagen-4.0-generate-preview-06-06"
|
"LMArena": "imagen-4.0-generate-preview-06-06"
|
||||||
},
|
},
|
||||||
"seedream-3": {
|
"seedream-3": {
|
||||||
"LMArenaBeta": "seedream-3",
|
"LMArena": "seedream-3",
|
||||||
"ApiAirforce": "seedream-3"
|
"ApiAirforce": "seedream-3"
|
||||||
},
|
},
|
||||||
"seededit-3.0": {
|
"seededit-3.0": {
|
||||||
"LMArenaBeta": "seededit-3.0"
|
"LMArena": "seededit-3.0"
|
||||||
},
|
},
|
||||||
"flux-1-kontext-dev": {
|
"flux-1-kontext-dev": {
|
||||||
"LMArenaBeta": "flux-1-kontext-dev"
|
"LMArena": "flux-1-kontext-dev"
|
||||||
},
|
},
|
||||||
"bagel": {
|
"bagel": {
|
||||||
"LMArenaBeta": "bagel"
|
"LMArena": "bagel"
|
||||||
},
|
},
|
||||||
"gemini-2.0-flash-preview-image-generation": {
|
"gemini-2.0-flash-preview-image-generation": {
|
||||||
"LMArenaBeta": "gemini-2.0-flash-preview-image-generation"
|
"LMArena": "gemini-2.0-flash-preview-image-generation"
|
||||||
},
|
},
|
||||||
"gpt-5-mini": {
|
"gpt-5-mini": {
|
||||||
"PuterJS": "gpt-5-mini",
|
"PuterJS": "gpt-5-mini",
|
||||||
|
|
@ -3078,7 +2983,7 @@ models_count = {
|
||||||
"llama-3.3-70b-instruct": 4,
|
"llama-3.3-70b-instruct": 4,
|
||||||
"llama-4-maverick-17b-128e-instruct": 5,
|
"llama-4-maverick-17b-128e-instruct": 5,
|
||||||
"qwen-332b": 3,
|
"qwen-332b": 3,
|
||||||
"hermes-3-405b": 2,
|
"hermes-3-405b": 1,
|
||||||
"gpt-oss-20b": 12,
|
"gpt-oss-20b": 12,
|
||||||
"glm-4.5": 7,
|
"glm-4.5": 7,
|
||||||
"qwen-3235b-a22b-thinking-2507": 2,
|
"qwen-3235b-a22b-thinking-2507": 2,
|
||||||
|
|
@ -3256,7 +3161,7 @@ parents = {
|
||||||
"CopilotAccount"
|
"CopilotAccount"
|
||||||
],
|
],
|
||||||
"DeepInfra": [
|
"DeepInfra": [
|
||||||
"DeepInfraChat"
|
"DeepInfra"
|
||||||
],
|
],
|
||||||
"HuggingFace": [
|
"HuggingFace": [
|
||||||
"HuggingFaceAPI",
|
"HuggingFaceAPI",
|
||||||
|
|
@ -3418,7 +3323,6 @@ model_aliases = {
|
||||||
"openrouter:perplexity/sonar-reasoning": "sonar-reasoning",
|
"openrouter:perplexity/sonar-reasoning": "sonar-reasoning",
|
||||||
"openrouter:perplexity/sonar-reasoning-pro": "sonar-reasoning-pro",
|
"openrouter:perplexity/sonar-reasoning-pro": "sonar-reasoning-pro",
|
||||||
"openrouter:perplexity/r1-1776": "r1-1776",
|
"openrouter:perplexity/r1-1776": "r1-1776",
|
||||||
"llama3.1-nemotron-70b-instruct": "llama-3.1-nemotron-70b",
|
|
||||||
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "llama-3.1-nemotron-70b",
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "llama-3.1-nemotron-70b",
|
||||||
"openrouter:nvidia/llama-3.1-nemotron-70b-instruct": "nemotron-70b",
|
"openrouter:nvidia/llama-3.1-nemotron-70b-instruct": "nemotron-70b",
|
||||||
"cognitivecomputations/dolphin-2.6-mixtral-8x7b": "dolphin-2.6",
|
"cognitivecomputations/dolphin-2.6-mixtral-8x7b": "dolphin-2.6",
|
||||||
|
|
@ -3537,7 +3441,6 @@ model_aliases = {
|
||||||
"openrouter:microsoft/phi-3-mini-128k-instruct": "phi-3-mini",
|
"openrouter:microsoft/phi-3-mini-128k-instruct": "phi-3-mini",
|
||||||
"deepseek-llama3.3-70b": "deepseek-llama-3.3-70b",
|
"deepseek-llama3.3-70b": "deepseek-llama-3.3-70b",
|
||||||
"apriel-5b-instruct": "apriel-5b",
|
"apriel-5b-instruct": "apriel-5b",
|
||||||
"hermes-3-llama-3.1-405b-fp8": "hermes-3-llama-3.1-405b",
|
|
||||||
"openrouter:nousresearch/hermes-3-llama-3.1-405b": "hermes-3-405b",
|
"openrouter:nousresearch/hermes-3-llama-3.1-405b": "hermes-3-405b",
|
||||||
"openai/gpt-oss-20b": "gpt-oss-20b",
|
"openai/gpt-oss-20b": "gpt-oss-20b",
|
||||||
"Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo": "qwen-3-coder-480b-a35b-turbo",
|
"Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo": "qwen-3-coder-480b-a35b-turbo",
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ from ..providers.retry_provider import RotatedProvider
|
||||||
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
|
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
|
||||||
from ..Provider.hf_space import HuggingSpace
|
from ..Provider.hf_space import HuggingSpace
|
||||||
from ..Provider import Custom, PollinationsImage, OpenaiAccount, Copilot, Cloudflare, Gemini, Grok, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
|
from ..Provider import Custom, PollinationsImage, OpenaiAccount, Copilot, Cloudflare, Gemini, Grok, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
|
||||||
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, LMArenaBeta, EdgeTTS, gTTS, MarkItDown, OpenAIFM
|
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfra, LMArena, EdgeTTS, gTTS, MarkItDown, OpenAIFM
|
||||||
from ..Provider import HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree, GeminiPro
|
from ..Provider import HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree, GeminiPro
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .. import Provider
|
from .. import Provider
|
||||||
|
|
@ -25,7 +25,7 @@ PROVIDERS_LIST_2 = [
|
||||||
|
|
||||||
# Add all models to the model map
|
# Add all models to the model map
|
||||||
PROVIDERS_LIST_3 = [
|
PROVIDERS_LIST_3 = [
|
||||||
LambdaChat, DeepInfraChat, HuggingFace, HuggingFaceMedia, LMArenaBeta,
|
LambdaChat, DeepInfra, HuggingFace, HuggingFaceMedia, LMArena,
|
||||||
PuterJS, Cloudflare, HuggingSpace
|
PuterJS, Cloudflare, HuggingSpace
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue