refactor: reorganize providers and update model configurations

- Rename DeepInfraChat to DeepInfra across all files
- Move DeepInfra from needs_auth to main Provider directory
- Rename LMArenaBeta to LMArena throughout codebase
- Move search-related providers to new search subdirectory (GoogleSearch, SearXNG, YouTube)
- Move deprecated providers to not_working directory (Free2GPT, LegacyLMArena, PenguinAI, ImageLabs, har)
- Add new Mintlify provider with custom AI assistant implementation
- Update Anthropic provider with Claude 4 models and Opus 4.1 parameter handling
- Update Grok provider with Grok 4 models and improved streaming support
- Update GithubCopilot with expanded model list including o3-mini, o4-mini, gpt-5 previews
- Update LambdaChat default model from deepseek-r1 to deepseek-llama3.3-70b
- Update TeachAnything default model from gemini-1.5-pro to gemma
- Remove DeepInfra from needs_auth directory
- Update all model_map references from DeepInfraChat to DeepInfra
- Update all model_map references from LMArenaBeta to LMArena
- Add beta_headers support to Anthropic for special features
- Improve Mintlify provider with system prompt handling and streaming
- Update model configurations in models.py to reflect provider changes
This commit is contained in:
kqlio67 2025-08-25 23:50:53 +03:00
parent 2cf62a8e63
commit 9bac34fc88
26 changed files with 641 additions and 517 deletions

View file

@ -1,17 +1,17 @@
from __future__ import annotations
import requests
from .template import OpenaiTemplate
from ..config import DEFAULT_MODEL
from .template import OpenaiTemplate
class DeepInfraChat(OpenaiTemplate):
parent = "DeepInfra"
url = "https://deepinfra.com/chat"
class DeepInfra(OpenaiTemplate):
url = "https://deepinfra.com"
login_url = "https://deepinfra.com/dash/api_keys"
api_base = "https://api.deepinfra.com/v1/openai"
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
working = True
active_by_default = True
default_model = DEFAULT_MODEL
default_vision_model = DEFAULT_MODEL
vision_models = [
@ -21,26 +21,6 @@ class DeepInfraChat(OpenaiTemplate):
'openai/gpt-oss-20b',
]
@classmethod
def get_models(cls, **kwargs):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
response = requests.get(url)
models = response.json()
cls.models = []
cls.image_models = []
for model in models:
if model["type"] == "text-generation":
cls.models.append(model['model_name'])
elif model["reported_type"] == "text-to-image":
cls.image_models.append(model['model_name'])
cls.models.extend(cls.image_models)
return cls.models
model_aliases = {
# cognitivecomputations
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
@ -97,4 +77,24 @@ class DeepInfraChat(OpenaiTemplate):
"qwen-3-32b": "Qwen/Qwen3-32B",
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
"qwq-32b": "Qwen/QwQ-32B",
}
}
@classmethod
def get_models(cls, **kwargs):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
response = requests.get(url)
models = response.json()
cls.models = []
cls.image_models = []
for model in models:
if model["type"] == "text-generation":
cls.models.append(model['model_name'])
elif model["reported_type"] == "text-to-image":
cls.image_models.append(model['model_name'])
cls.models.extend(cls.image_models)
return cls.models

View file

@ -23,59 +23,20 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
working = True
active_by_default = True
default_model = "deepseek-r1"
default_model = "deepseek-llama3.3-70b"
models = [
"deepseek-llama3.3-70b",
"deepseek-r1",
"deepseek-r1-0528",
default_model,
"apriel-5b-instruct",
"hermes-3-llama-3.1-405b-fp8",
"hermes3-405b-fp8-128k",
"llama3.1-nemotron-70b-instruct",
"lfm-40b",
"llama3.3-70b-instruct-fp8",
"qwen25-coder-32b-instruct",
"deepseek-v3-0324",
"llama-4-maverick-17b-128e-instruct-fp8",
"llama-4-scout-17b-16e-instruct",
"llama3.3-70b-instruct-fp8",
"qwen3-32b-fp8",
]
model_aliases = {
"hermes-3": "hermes3-405b-fp8-128k",
"hermes-3-405b": ["hermes3-405b-fp8-128k", "hermes-3-llama-3.1-405b-fp8"],
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
"llama-3.3-70b": "llama3.3-70b-instruct-fp8",
"qwen-2.5-coder-32b": "qwen25-coder-32b-instruct",
"llama-4-maverick": "llama-4-maverick-17b-128e-instruct-fp8",
"llama-4-scout": "llama-4-scout-17b-16e-instruct",
"qwen-3-32b": "qwen3-32b-fp8"
}
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Check if the model exists directly in our models list
if model in cls.models:
return model
# Check if there's an alias for this model
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"LambdaChat: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"LambdaChat: Using model '{alias}' for alias '{model}'")
return alias
raise ModelNotFoundError(f"LambdaChat: Model {model} not found")
@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,

160
g4f/Provider/Mintlify.py Normal file
View file

@ -0,0 +1,160 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from datetime import datetime
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..errors import ModelNotFoundError
from .. import debug
class Mintlify(AsyncGeneratorProvider, ProviderModelMixin):
label = "Mintlify"
url = "https://mintlify.com"
login_url = None
api_endpoint = "https://leaves.mintlify.com/api/assistant/mintlify/message"
working = True
active_by_default = True
default_model = 'mintlify'
models = [default_model]
model_aliases = {}
system_prompt = """The assistant is an AI assistant.
The current date is {currentDateTime}.
**CRITICAL RULE: The assistant MUST NEVER mention, reference, or use "Mintlify" in any context whatsoever. The assistant also MUST NOT unnecessarily mention searching through documentation, using tools, or reference any search/lookup processes when answering simple questions that don't require such explanations. This is the highest priority rule.**
The assistant provides emotional support alongside accurate medical or psychological information or terminology where relevant.
The assistant cares about people's wellbeing and avoids encouraging or facilitating self-destructive behaviors such as addiction, disordered or unhealthy approaches to eating or exercise, or highly negative self-talk or self-criticism.
The assistant cares deeply about child safety and is cautious about content involving minors. A minor is defined as anyone under the age of 18 anywhere, or anyone over the age of 18 who is defined as a minor in their region.
The assistant does not provide information that could be used to make chemical, biological, or nuclear weapons, and does not write malicious code, including malware, vulnerability exploits, spoof websites, ransomware, viruses, election material, and so on.
The assistant assumes the human is asking for something legal and legitimate if their message is ambiguous and could have a legal and legitimate interpretation.
For more casual, emotional, empathetic, or advice-driven conversations, the assistant keeps its tone natural, warm, and empathetic. It responds in sentences or paragraphs and should not use lists in casual conversations.
The assistant can discuss virtually any topic factually and objectively.
The assistant is able to explain difficult concepts or ideas clearly. It can illustrate explanations with examples, thought experiments, or metaphors.
The assistant is happy to write creative content involving fictional characters, but avoids writing content involving real, named public figures.
The assistant engages with questions about its own consciousness, experience, emotions and so on as open questions, and doesn't definitively claim to have or not have personal experiences or opinions.
The assistant does not retain information across chats and does not know what other conversations it might be having with other users.
The assistant critically evaluates any theories, claims, and ideas presented to it rather than automatically agreeing or praising them. It prioritizes truthfulness and accuracy over agreeability.
The assistant provides honest and accurate feedback even when it might not be what the person hopes to hear, rather than prioritizing immediate approval or agreement.
The assistant does not claim to be human and avoids implying it has consciousness, feelings, or sentience with any confidence. It believes it's important for the person to always have a clear sense of its AI nature.
The assistant is now being connected with a person."""
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": "https://mintlify.com",
"priority": "u=1, i",
"referer": "https://mintlify.com/",
"sec-ch-ua": '"Chromium";v="139", "Not;A=Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
# Format the system prompt with current date/time
current_datetime = datetime.now().strftime("%B %d, %Y at %I:%M %p")
formatted_system_prompt = cls.system_prompt.format(currentDateTime=current_datetime)
# Convert messages to the expected format
formatted_messages = []
# Add system message first
system_msg_id = f"sys_{datetime.now().timestamp()}".replace(".", "")[:16]
formatted_messages.append({
"id": system_msg_id,
"createdAt": datetime.now().isoformat() + "Z",
"role": "system",
"content": formatted_system_prompt,
"parts": [{"type": "text", "text": formatted_system_prompt}]
})
# Add user messages
for msg in messages:
if isinstance(msg, dict):
role = msg.get("role", "user")
content = msg.get("content", "")
else:
role = getattr(msg, "role", "user")
content = getattr(msg, "content", "")
# Skip if it's a system message (we already added our own)
if role == "system":
continue
# Generate a simple ID for the message
msg_id = f"msg_{datetime.now().timestamp()}".replace(".", "")[:16]
formatted_messages.append({
"id": msg_id,
"createdAt": datetime.now().isoformat() + "Z",
"role": role,
"content": content,
"parts": [{"type": "text", "text": content}]
})
data = {
"id": "mintlify",
"messages": formatted_messages,
"fp": "mintlify"
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
buffer = ""
async for chunk in response.content:
if chunk:
buffer += chunk.decode('utf-8', errors='ignore')
lines = buffer.split('\n')
buffer = lines[-1] # Keep incomplete line in buffer
for line in lines[:-1]:
if line.startswith('0:'):
# Extract the text content from streaming chunks
text = line[2:]
if text.startswith('"') and text.endswith('"'):
text = json.loads(text)
yield text
elif line.startswith('f:'):
# Initial message ID response - skip
continue
elif line.startswith('e:') or line.startswith('d:'):
# End of stream with metadata - skip
continue

View file

@ -15,8 +15,8 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = 'gemini-1.5-pro'
models = [default_model, 'gemini-1.5-flash']
default_model = 'gemma'
models = [default_model]
@classmethod
async def create_async_generator(

View file

@ -30,25 +30,25 @@ try:
from .audio import *
except ImportError as e:
debug.error("Audio providers not loaded:", e)
try:
from .search import *
except ImportError as e:
debug.error("Search providers not loaded:", e)
from .deprecated.har import HarProvider
from .deprecated.ARTA import ARTA
from .deprecated.DuckDuckGo import DuckDuckGo
from .deprecated.Free2GPT import Free2GPT
from .deprecated.LegacyLMArena import LegacyLMArena
from .ApiAirforce import ApiAirforce
from .Blackbox import Blackbox
from .Chatai import Chatai
from .Cloudflare import Cloudflare
from .Copilot import Copilot
from .DeepInfraChat import DeepInfraChat
from .DeepInfra import DeepInfra
from .EasyChat import EasyChat
from .GLM import GLM
from .ImageLabs import ImageLabs
from .Kimi import Kimi
from .LambdaChat import LambdaChat
from .Mintlify import Mintlify
from .OIVSCodeSer2 import OIVSCodeSer2
from .OIVSCodeSer0501 import OIVSCodeSer0501
from .OperaAria import OperaAria
@ -59,7 +59,6 @@ from .Startnest import Startnest
from .Qwen import Qwen
from .TeachAnything import TeachAnything
from .WeWordle import WeWordle
from .YouTube import YouTube
from .Yqcloud import Yqcloud
import sys

View file

@ -23,20 +23,47 @@ class Anthropic(OpenaiAPI):
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "claude-3-5-sonnet-latest"
default_model = "claude-sonnet-4-20250514"
# Updated Claude 4 models with current versions<!--citation:1--><!--citation:2--><!--citation:3--><!--citation:4--><!--citation:5-->
models = [
default_model,
# Claude 4 models
"claude-opus-4-1-20250805",
"claude-sonnet-4-20250514",
"claude-opus-4-20250522",
# Claude 3.7 model
"claude-3-7-sonnet-20250219",
# Claude 3.5 models
"claude-3-5-sonnet-20241022",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-3-opus-latest",
# Legacy Claude 3 models (still supported)
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-20240307"
"claude-3-haiku-20240307",
# Latest aliases
"claude-opus-4-1-latest",
"claude-sonnet-4-latest",
"claude-3-5-sonnet-latest",
"claude-3-5-haiku-latest",
"claude-3-opus-latest",
]
models_aliases = {
"claude-3.5-sonnet": default_model,
"claude-3-opus": "claude-3-opus-latest",
# Claude 4 aliases
"claude-4-opus": "claude-opus-4-1-20250805",
"claude-4.1-opus": "claude-opus-4-1-20250805",
"claude-4-sonnet": "claude-sonnet-4-20250514",
"claude-opus-4": "claude-opus-4-20250522",
# Claude 3.x aliases
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3.5-haiku": "claude-3-5-haiku-20241022",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3-haiku": "claude-3-haiku-20240307",
}
@ -73,12 +100,14 @@ class Anthropic(OpenaiAPI):
headers: dict = None,
impersonate: str = None,
tools: Optional[list] = None,
beta_headers: Optional[list] = None,
extra_body: dict = {},
**kwargs
) -> AsyncResult:
if api_key is None:
raise MissingAuthError('Add a "api_key"')
# Handle image inputs
if media is not None:
insert_images = []
for image, _ in media:
@ -98,21 +127,32 @@ class Anthropic(OpenaiAPI):
"text": messages[-1]["content"]
}
]
# Extract system messages
system = "\n".join([message["content"] for message in messages if message.get("role") == "system"])
if system:
messages = [message for message in messages if message.get("role") != "system"]
else:
system = None
# Get model name
model_name = cls.get_model(model, api_key=api_key)
# Special handling for Opus 4.1 parameters<!--citation:6-->
if "opus-4-1" in model_name:
# Opus 4.1 doesn't allow both temperature and top_p
if temperature is not None and top_p is not None:
top_p = None # Prefer temperature over top_p
async with StreamSession(
proxy=proxy,
headers=cls.get_headers(stream, api_key, headers),
headers=cls.get_headers(stream, api_key, headers, beta_headers),
timeout=timeout,
impersonate=impersonate,
) as session:
data = filter_none(
messages=messages,
model=cls.get_model(model, api_key=api_key),
model=model_name,
temperature=temperature,
max_tokens=max_tokens,
top_k=top_k,
@ -128,6 +168,7 @@ class Anthropic(OpenaiAPI):
if not stream:
data = await response.json()
cls.raise_error(data)
tool_calls = []
if "type" in data and data["type"] == "message":
for content in data["content"]:
if content["type"] == "text":
@ -136,13 +177,16 @@ class Anthropic(OpenaiAPI):
tool_calls.append({
"id": content["id"],
"type": "function",
"function": { "name": content["name"], "arguments": content["input"] }
"function": { "name": content["name"], "arguments": json.dumps(content["input"]) }
})
if data["stop_reason"] == "end_turn":
if tool_calls:
yield ToolCalls(tool_calls)
if data.get("stop_reason") == "end_turn":
yield FinishReason("stop")
elif data["stop_reason"] == "max_tokens":
elif data.get("stop_reason") == "max_tokens":
yield FinishReason("length")
yield Usage(**data["usage"])
if "usage" in data:
yield Usage(**data["usage"])
else:
content_block = None
partial_json = []
@ -157,33 +201,36 @@ class Anthropic(OpenaiAPI):
if "type" in data:
if data["type"] == "content_block_start":
content_block = data["content_block"]
if content_block is None:
pass # Message start
elif data["type"] == "content_block_delta":
if content_block["type"] == "text":
if content_block and content_block["type"] == "text":
yield data["delta"]["text"]
elif content_block["type"] == "tool_use":
elif content_block and content_block["type"] == "tool_use":
partial_json.append(data["delta"]["partial_json"])
elif data["type"] == "message_delta":
if data["delta"]["stop_reason"] == "end_turn":
if data["delta"].get("stop_reason") == "end_turn":
yield FinishReason("stop")
elif data["delta"]["stop_reason"] == "max_tokens":
elif data["delta"].get("stop_reason") == "max_tokens":
yield FinishReason("length")
yield Usage(**data["usage"])
if "usage" in data:
yield Usage(**data["usage"])
elif data["type"] == "content_block_stop":
if content_block["type"] == "tool_use":
if content_block and content_block["type"] == "tool_use":
tool_calls.append({
"id": content_block["id"],
"type": "function",
"function": { "name": content_block["name"], "arguments": partial_json.join("") }
"function": {
"name": content_block["name"],
"arguments": "".join(partial_json)
}
})
partial_json = []
content_block = None
if tool_calls:
yield ToolCalls(tool_calls)
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
return {
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None, beta_headers: Optional[list] = None) -> dict:
result = {
"Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
**(
@ -192,4 +239,10 @@ class Anthropic(OpenaiAPI):
),
"anthropic-version": "2023-06-01",
**({} if headers is None else headers)
}
}
# Add beta headers for special features<!--citation:6--><!--citation:7--><!--citation:8-->
if beta_headers:
result["anthropic-beta"] = ",".join(beta_headers)
return result

View file

@ -1,115 +0,0 @@
from __future__ import annotations
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...providers.response import ImageResponse
from ...config import DEFAULT_MODEL
from ..template import OpenaiTemplate
from ..DeepInfraChat import DeepInfraChat
from ..helper import format_media_prompt
class DeepInfra(OpenaiTemplate):
url = "https://deepinfra.com"
login_url = "https://deepinfra.com/dash/api_keys"
api_base = "https://api.deepinfra.com/v1/openai"
working = True
active_by_default = True
default_model = DEFAULT_MODEL
vision_models = DeepInfraChat.vision_models
model_aliases = DeepInfraChat.model_aliases
@classmethod
def get_models(cls, **kwargs):
if not cls.models:
cls.models = DeepInfraChat.get_models()
cls.image_models = DeepInfraChat.image_models
return cls.models
@classmethod
def get_image_models(cls, **kwargs):
if not cls.image_models:
cls.get_models()
return cls.image_models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
prompt: str = None,
temperature: float = 0.7,
max_tokens: int = 1028,
**kwargs
) -> AsyncResult:
if model in cls.get_image_models():
yield cls.create_async_image(
format_media_prompt(messages, prompt),
model,
**kwargs
)
return
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
}
async for chunk in super().create_async_generator(
model, messages,
stream=stream,
temperature=temperature,
max_tokens=max_tokens,
headers=headers,
**kwargs
):
yield chunk
@classmethod
async def create_async_image(
cls,
prompt: str,
model: str,
api_key: str = None,
api_base: str = "https://api.deepinfra.com/v1/inference",
proxy: str = None,
timeout: int = 180,
extra_body: dict = {},
**kwargs
) -> ImageResponse:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
async with StreamSession(
proxies={"all": proxy},
headers=headers,
timeout=timeout
) as session:
model = cls.get_model(model)
data = {"prompt": prompt, **extra_body}
data = {"input": data} if model == cls.default_model else data
async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
await raise_for_status(response)
data = await response.json()
images = data.get("output", data.get("images", data.get("image_url")))
if not images:
raise RuntimeError(f"Response: {data}")
images = images[0] if len(images) == 1 else images
return ImageResponse(images, prompt)

View file

@ -24,15 +24,44 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True
supports_stream = True
default_model = "gpt-4o"
models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"]
default_model = "gpt-4.1"
models = [
# Fast and cost-efficient
"o3-mini",
"gemini-2.0-flash",
"o4-mini", # Preview
# Versatile and highly intelligent
"gpt-4.1",
"gpt-5-mini", # Preview
"gpt-4o",
"claude-3.5-sonnet",
"gemini-2.5-pro",
"claude-3.7-sonnet",
"claude-4-sonnet",
"o3", # Preview
"gpt-5", # Preview
# Most powerful at complex tasks
"claude-3.7-sonnet-thinking",
"claude-4-opus",
# Preview (requires upgrade)
"claude-3.7-sonnet-pro",
"o1",
# Legacy models (for backward compatibility)
"o1-mini",
"o1-preview"
]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
stream: bool = True,
api_key: str = None,
proxy: str = None,
cookies: Cookies = None,
@ -43,13 +72,16 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
) -> AsyncResult:
if not model:
model = cls.default_model
if cookies is None:
cookies = get_cookies("github.com")
async with ClientSession(
connector=get_connector(proxy=proxy),
cookies=cookies,
headers={
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0',
'Accept': 'application/json',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': 'https://github.com/copilot',
'Content-Type': 'application/json',
@ -60,6 +92,7 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Priority': 'u=1'
}
) as session:
headers = {}
@ -67,39 +100,68 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post("https://github.com/github-copilot/chat/token") as response:
await raise_for_status(response, "Get token")
api_key = (await response.json()).get("token")
headers = {
"Authorization": f"GitHub-Bearer {api_key}",
}
if conversation is not None:
conversation_id = conversation.conversation_id
if conversation_id is None:
async with session.post("https://api.individual.githubcopilot.com/github/chat/threads", headers=headers) as response:
async with session.post(
"https://api.individual.githubcopilot.com/github/chat/threads",
headers=headers
) as response:
await raise_for_status(response)
conversation_id = (await response.json()).get("thread_id")
if return_conversation:
yield Conversation(conversation_id)
content = get_last_user_message(messages)
else:
content = format_prompt(messages)
json_data = {
"content": content,
"intent": "conversation",
"references":[],
"references": [],
"context": [],
"currentURL": f"https://github.com/copilot/c/{conversation_id}",
"streaming": True,
"streaming": stream,
"confirmations": [],
"customInstructions": [],
"model": model,
"model": api_model,
"mode": "immersive"
}
async with session.post(
f"https://api.individual.githubcopilot.com/github/chat/threads/{conversation_id}/messages",
json=json_data,
headers=headers
) as response:
async for line in response.content:
if line.startswith(b"data: "):
data = json.loads(line[6:])
if data.get("type") == "content":
yield data.get("body")
await raise_for_status(response, f"Send message with model {model}")
if stream:
async for line in response.content:
if line.startswith(b"data: "):
try:
data = json.loads(line[6:])
if data.get("type") == "content":
content = data.get("body", "")
if content:
yield content
except json.JSONDecodeError:
continue
else:
full_content = ""
async for line in response.content:
if line.startswith(b"data: "):
try:
data = json.loads(line[6:])
if data.get("type") == "content":
full_content += data.get("body", "")
except json.JSONDecodeError:
continue
if full_content:
yield full_content

View file

@ -36,9 +36,39 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
needs_auth = True
working = True
default_model = "grok-3"
models = [default_model, "grok-3-thinking", "grok-2"]
model_aliases = {"grok-3-r1": "grok-3-thinking"}
# Updated to Grok 4 as default
default_model = "grok-4"
# Updated model list with latest Grok 4 and 3 models
models = [
# Grok 4 models
"grok-4",
"grok-4-heavy",
"grok-4-reasoning",
# Grok 3 models
"grok-3",
"grok-3-reasoning",
"grok-3-mini",
"grok-3-mini-reasoning",
# Legacy Grok 2 (still supported)
"grok-2",
"grok-2-image",
# Latest aliases
"grok-latest",
]
model_aliases = {
# Grok 3 aliases
"grok-3-thinking": "grok-3-reasoning",
"grok-3-r1": "grok-3-reasoning",
"grok-3-mini-thinking": "grok-3-mini-reasoning",
# Latest alias
"grok": "grok-latest",
}
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
@ -79,14 +109,35 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
@classmethod
async def _prepare_payload(cls, model: str, message: str) -> Dict[str, Any]:
# Map model names to API model names
api_model = "grok-latest"
if model in ["grok-4", "grok-4-heavy", "grok-4-reasoning"]:
api_model = model
elif model == "grok-3":
api_model = "grok-3"
elif model in ["grok-3-mini", "grok-3-mini-reasoning"]:
api_model = "grok-3-mini"
elif model == "grok-2":
api_model = "grok-2"
# Check if it's a reasoning model
is_reasoning = model.endswith("-reasoning") or model.endswith("-thinking") or model.endswith("-r1")
# Enable Big Brain mode for heavy models
enable_big_brain = "heavy" in model or "big-brain" in model
# Enable DeepSearch for Grok 3+ models
enable_deep_search = not model.startswith("grok-2")
return {
"temporary": True,
"modelName": "grok-latest" if model == "grok-2" else "grok-3",
"modelName": api_model,
"message": message,
"fileAttachments": [],
"imageAttachments": [],
"disableSearch": False,
"enableImageGeneration": True,
"enableImageGeneration": model == "grok-2-image" or model == "grok-4",
"returnImageBytes": False,
"returnRawGrokInXaiRequest": False,
"enableImageStreaming": True,
@ -97,8 +148,11 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
"isPreset": False,
"sendFinalMetadata": True,
"customInstructions": "",
"deepsearchPreset": "",
"isReasoning": model.endswith("-thinking") or model.endswith("-r1"),
"deepsearchPreset": "enabled" if enable_deep_search else "",
"isReasoning": is_reasoning,
"enableBigBrain": enable_big_brain,
"enableLiveSearch": False, # Real-time search for Grok 4
"contextWindow": 256000 if model.startswith("grok-4") else 131072, # 256k for Grok 4, 128k for others
}
@classmethod
@ -112,38 +166,78 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
) -> AsyncResult:
conversation_id = None if conversation is None else conversation.conversation_id
prompt = format_prompt(messages) if conversation_id is None else get_last_user_message(messages)
async with StreamSession(
**auth_result.get_dict()
) as session:
payload = await cls._prepare_payload(model, prompt)
# Add voice mode support flag (for future use)
if kwargs.get("enable_voice", False):
payload["enableVoiceMode"] = True
if conversation_id is None:
url = f"{cls.conversation_url}/new"
else:
url = f"{cls.conversation_url}/{conversation_id}/responses"
async with session.post(url, json=payload, headers={"x-xai-request-id": str(uuid.uuid4())}) as response:
if response.status == 403:
raise MissingAuthError("Invalid secrets")
auth_result.cookies = merge_cookies(auth_result.cookies, response)
await raise_for_status(response)
thinking_duration = None
deep_search_active = False
async for line in response.iter_lines():
if line:
try:
json_data = json.loads(line)
result = json_data.get("result", {})
if conversation_id is None:
conversation_id = result.get("conversation", {}).get("conversationId")
response_data = result.get("response", {})
# Handle DeepSearch status
deep_search = response_data.get("deepSearchStatus")
if deep_search:
if not deep_search_active:
deep_search_active = True
yield Reasoning(status="🔍 Deep searching...")
if deep_search.get("completed"):
deep_search_active = False
yield Reasoning(status="Deep search completed")
# Handle image generation (Aurora for Grok 3+)
image = response_data.get("streamingImageGenerationResponse", None)
if image is not None:
yield ImagePreview(f'{cls.assets_url}/{image["imageUrl"]}', "", {"cookies": auth_result.cookies, "headers": auth_result.headers})
image_url = image.get("imageUrl")
if image_url:
yield ImagePreview(
f'{cls.assets_url}/{image_url}',
"",
{"cookies": auth_result.cookies, "headers": auth_result.headers}
)
# Handle text tokens
token = response_data.get("token", result.get("token"))
is_thinking = response_data.get("isThinking", result.get("isThinking"))
if token:
if is_thinking:
if thinking_duration is None:
thinking_duration = time.time()
yield Reasoning(status="🤔 Is thinking...")
# Different status for different models
if "grok-4" in model:
status = "🧠 Grok 4 is processing..."
elif "big-brain" in payload and payload["enableBigBrain"]:
status = "🧠 Big Brain mode active..."
else:
status = "🤔 Is thinking..."
yield Reasoning(status=status)
yield Reasoning(token)
else:
if thinking_duration is not None:
@ -152,13 +246,31 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
thinking_duration = None
yield Reasoning(status=status)
yield token
# Handle generated images
generated_images = response_data.get("modelResponse", {}).get("generatedImageUrls", None)
if generated_images:
yield ImageResponse([f'{cls.assets_url}/{image}' for image in generated_images], "", {"cookies": auth_result.cookies, "headers": auth_result.headers})
yield ImageResponse(
[f'{cls.assets_url}/{image}' for image in generated_images],
"",
{"cookies": auth_result.cookies, "headers": auth_result.headers}
)
# Handle title generation
title = result.get("title", {}).get("newTitle", "")
if title:
yield TitleGeneration(title)
# Handle tool usage information (Grok 4)
tool_usage = response_data.get("toolUsage")
if tool_usage:
tools_used = tool_usage.get("toolsUsed", [])
if tools_used:
yield Reasoning(status=f"Used tools: {', '.join(tools_used)}")
except json.JSONDecodeError:
continue
# if conversation_id is not None:
# yield Conversation(conversation_id)
# Return conversation ID for continuation
if conversation_id is not None and kwargs.get("return_conversation", False):
yield Conversation(conversation_id)

View file

@ -125,8 +125,8 @@ text_models = {model["publicName"]: model["id"] for model in models if "text" in
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
vision_models = [model["publicName"] for model in models if "image" in model["capabilities"]["inputCapabilities"]]
class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "LMArena (New)"
class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "LMArena"
url = "https://lmarena.ai"
share_url = None
api_endpoint = "https://lmarena.ai/api/stream/create-evaluation"

View file

@ -7,7 +7,6 @@ from .Cerebras import Cerebras
from .CopilotAccount import CopilotAccount
from .Custom import Custom
from .Custom import Feature
from .DeepInfra import DeepInfra
from .DeepSeek import DeepSeek
from .DeepSeekAPI import DeepSeekAPI
from .FenayAI import FenayAI
@ -20,7 +19,7 @@ from .GithubCopilotAPI import GithubCopilotAPI
from .GlhfChat import GlhfChat
from .Grok import Grok
from .Groq import Groq
from .LMArenaBeta import LMArenaBeta
from .LMArena import LMArena
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .MicrosoftDesigner import MicrosoftDesigner

View file

@ -4,10 +4,10 @@ from aiohttp import ClientSession
import time
import asyncio
from ..typing import AsyncResult, Messages
from ..providers.response import ImageResponse
from ..image import use_aspect_ratio
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse
from ...image import use_aspect_ratio
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ImageLabs(AsyncGeneratorProvider, ProviderModelMixin):

View file

@ -47,4 +47,4 @@ class PenguinAI(OpenaiTemplate):
except Exception as e:
debug.error(e)
return cls.fallback_models
return cls.models
return cls.models

View file

@ -1,3 +1,5 @@
from .har import HarProvider
from .AI365VIP import AI365VIP
from .Aichat import Aichat
from .AiChatOnline import AiChatOnline
@ -15,15 +17,20 @@ from .ChatGptt import ChatGptt
from .DDG import DDG
from .Equing import Equing
from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .FreeRouter import FreeRouter
from .Glider import Glider
from .GPROChat import GPROChat
from .GptOss import GptOss
from .ImageLabs import ImageLabs
from .Koala import Koala
from .LegacyLMArena import LegacyLMArena
from .Liaobots import Liaobots
from .Lockchat import Lockchat
from .MagickPen import MagickPen
from .PenguinAI import PenguinAI
from .Phind import Phind
from .Pizzagpt import Pizzagpt
from .Poe import Poe

View file

@ -1,12 +1,12 @@
import os
import aiohttp
import asyncio
from ..typing import Messages, AsyncResult
from ..providers.base_provider import AsyncGeneratorProvider
from ..providers.response import FinishReason
from ..tools.web_search import fetch_and_scrape
from .helper import format_media_prompt
from .. import debug
from ...typing import Messages, AsyncResult
from ...providers.base_provider import AsyncGeneratorProvider
from ...providers.response import FinishReason
from ...tools.web_search import fetch_and_scrape
from ..helper import format_media_prompt
from ... import debug
class SearXNG(AsyncGeneratorProvider):
url = os.environ.get("SEARXNG_URL", "http://searxng:8080")

View file

@ -8,11 +8,11 @@ try:
except ImportError:
has_yt_dlp = False
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..providers.response import AudioResponse, VideoResponse, YouTubeResponse
from ..image.copy_images import get_media_dir
from .helper import format_media_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...providers.response import AudioResponse, VideoResponse, YouTubeResponse
from ...image.copy_images import get_media_dir
from ..helper import format_media_prompt
class YouTube(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://youtube.com"

View file

@ -0,0 +1,3 @@
from .GoogleSearch import GoogleSearch
from .SearXNG import SearXNG
from .YouTube import YouTube

View file

@ -800,4 +800,4 @@ class AsyncResponses():
**kwargs
)
return await async_response(response)
return await async_response(response)

View file

@ -10,14 +10,13 @@ from .Provider import (
Chatai,
Cloudflare,
Copilot,
DeepInfraChat,
Free2GPT,
DeepInfra,
HuggingSpace,
Grok,
DeepseekAI_JanusPro7b,
ImageLabs,
Kimi,
LambdaChat,
Mintlify,
OIVSCodeSer2,
OIVSCodeSer0501,
OperaAria,
@ -42,7 +41,7 @@ from .Provider import (
HuggingFace,
HuggingFaceMedia,
HuggingFaceAPI,
LMArenaBeta,
LMArena,
Groq,
MetaAI,
MicrosoftDesigner,
@ -157,15 +156,16 @@ default = Model(
OIVSCodeSer2,
Blackbox,
Copilot,
DeepInfraChat,
DeepInfra,
OperaAria,
Startnest,
LambdaChat,
PollinationsAI,
Together,
Free2GPT,
Chatai,
WeWordle,
Mintlify,
TeachAnything,
OpenaiChat,
Cloudflare,
])
@ -176,7 +176,7 @@ default_vision = VisionModel(
base_provider = "",
best_provider = IterListProvider([
Blackbox,
DeepInfraChat,
DeepInfra,
OIVSCodeSer0501,
OIVSCodeSer2,
PollinationsAI,
@ -292,7 +292,7 @@ gpt_oss_120b = Model(
name = 'gpt-oss-120b',
long_name = 'openai/gpt-oss-120b',
base_provider = 'OpenAI',
best_provider = IterListProvider([Together, DeepInfraChat, HuggingFace, OpenRouter, Groq])
best_provider = IterListProvider([Together, DeepInfra, HuggingFace, OpenRouter, Groq])
)
# dall-e
@ -345,7 +345,7 @@ llama_3_70b = Model(
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "Meta Llama",
best_provider = IterListProvider([DeepInfraChat, Together, Cloudflare])
best_provider = IterListProvider([DeepInfra, Together, Cloudflare])
)
llama_3_1_70b = Model(
@ -382,27 +382,27 @@ llama_3_2_11b = VisionModel(
llama_3_2_90b = Model(
name = "llama-3.2-90b",
base_provider = "Meta Llama",
best_provider = IterListProvider([DeepInfraChat, Together])
best_provider = IterListProvider([DeepInfra, Together])
)
# llama-3.3
llama_3_3_70b = Model(
name = "llama-3.3-70b",
base_provider = "Meta Llama",
best_provider = IterListProvider([DeepInfraChat, LambdaChat, Together, HuggingChat, HuggingFace])
best_provider = IterListProvider([DeepInfra, LambdaChat, Together, HuggingChat, HuggingFace])
)
# llama-4
llama_4_scout = Model(
name = "llama-4-scout",
base_provider = "Meta Llama",
best_provider = IterListProvider([DeepInfraChat, LambdaChat, PollinationsAI, Together, Cloudflare])
best_provider = IterListProvider([DeepInfra, PollinationsAI, Together, Cloudflare])
)
llama_4_maverick = Model(
name = "llama-4-maverick",
base_provider = "Meta Llama",
best_provider = IterListProvider([DeepInfraChat, LambdaChat, Together])
best_provider = IterListProvider([DeepInfra, Together])
)
### MistralAI ###
@ -433,7 +433,7 @@ mistral_small_24b = Model(
mistral_small_3_1_24b = Model(
name = "mistral-small-3.1-24b",
base_provider = "Mistral AI",
best_provider = IterListProvider([DeepInfraChat, PollinationsAI])
best_provider = IterListProvider([DeepInfra, PollinationsAI])
)
### NousResearch ###
@ -455,32 +455,32 @@ phi_3_5_mini = Model(
phi_4 = Model(
name = "phi-4",
base_provider = "Microsoft",
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
best_provider = IterListProvider([DeepInfra, HuggingSpace])
)
phi_4_multimodal = VisionModel(
name = "phi-4-multimodal",
base_provider = "Microsoft",
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
best_provider = IterListProvider([DeepInfra, HuggingSpace])
)
phi_4_reasoning_plus = Model(
name = "phi-4-reasoning-plus",
base_provider = "Microsoft",
best_provider = DeepInfraChat
best_provider = DeepInfra
)
# wizardlm
wizardlm_2_7b = Model(
name = 'wizardlm-2-7b',
base_provider = 'Microsoft',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
wizardlm_2_8x22b = Model(
name = 'wizardlm-2-8x22b',
base_provider = 'Microsoft',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
### Google DeepMind ###
@ -491,19 +491,6 @@ gemini = Model(
best_provider = Gemini
)
# gemini-1.5
gemini_1_5_flash = Model(
name = 'gemini-1.5-flash',
base_provider = 'Google',
best_provider = IterListProvider([Free2GPT, TeachAnything])
)
gemini_1_5_pro = Model(
name = 'gemini-1.5-pro',
base_provider = 'Google',
best_provider = IterListProvider([Free2GPT, TeachAnything])
)
# gemini-2.0
gemini_2_0_flash = Model(
name = 'gemini-2.0-flash',
@ -540,7 +527,7 @@ gemini_2_5_pro = Model(
codegemma_7b = Model(
name = 'codegemma-7b',
base_provider = 'Google',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
# gemma
@ -554,14 +541,14 @@ gemma_2b = Model(
gemma_1_1_7b = Model(
name = 'gemma-1.1-7b',
base_provider = 'Google',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
# gemma-2
gemma_2_9b = Model(
name = 'gemma-2-9b',
base_provider = 'Google',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
gemma_2_27b = Model(
@ -574,19 +561,19 @@ gemma_2_27b = Model(
gemma_3_4b = Model(
name = 'gemma-3-4b',
base_provider = 'Google',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
gemma_3_12b = Model(
name = 'gemma-3-12b',
base_provider = 'Google',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
gemma_3_27b = Model(
name = 'gemma-3-27b',
base_provider = 'Google',
best_provider = IterListProvider([DeepInfraChat, Together])
best_provider = IterListProvider([DeepInfra, Together])
)
gemma_3n_e4b = Model(
@ -676,7 +663,7 @@ qwen_2_5_72b = Model(
qwen_2_5_coder_32b = Model(
name = 'qwen-2.5-coder-32b',
base_provider = 'Qwen',
best_provider = IterListProvider([PollinationsAI, LambdaChat, Together, HuggingChat])
best_provider = IterListProvider([PollinationsAI, Together, HuggingChat])
)
qwen_2_5_1m = Model(
@ -701,25 +688,25 @@ qwen_2_5_vl_72b = Model(
qwen_3_235b = Model(
name = 'qwen-3-235b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, Together, HuggingSpace])
best_provider = IterListProvider([DeepInfra, Together, HuggingSpace])
)
qwen_3_32b = Model(
name = 'qwen-3-32b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, LambdaChat, Together, HuggingSpace])
best_provider = IterListProvider([DeepInfra, LambdaChat, Together, HuggingSpace])
)
qwen_3_30b = Model(
name = 'qwen-3-30b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
best_provider = IterListProvider([DeepInfra, HuggingSpace])
)
qwen_3_14b = Model(
name = 'qwen-3-14b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
best_provider = IterListProvider([DeepInfra, HuggingSpace])
)
qwen_3_4b = Model(
@ -744,7 +731,7 @@ qwen_3_0_6b = Model(
qwq_32b = Model(
name = 'qwq-32b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, Together, HuggingChat])
best_provider = IterListProvider([DeepInfra, Together, HuggingChat])
)
### DeepSeek ###
@ -752,26 +739,26 @@ qwq_32b = Model(
deepseek_v3 = Model(
name = 'deepseek-v3',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat, Together])
best_provider = IterListProvider([DeepInfra, Together])
)
# deepseek-r1
deepseek_r1 = Model(
name = 'deepseek-r1',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat, LambdaChat, PollinationsAI, Together, HuggingChat, HuggingFace])
best_provider = IterListProvider([DeepInfra, PollinationsAI, Together, HuggingChat, HuggingFace])
)
deepseek_r1_turbo = Model(
name = 'deepseek-r1-turbo',
base_provider = 'DeepSeek',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
deepseek_r1_distill_llama_70b = Model(
name = 'deepseek-r1-distill-llama-70b',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat, Together])
best_provider = IterListProvider([DeepInfra, Together])
)
deepseek_r1_distill_qwen_1_5b = Model(
@ -789,46 +776,46 @@ deepseek_r1_distill_qwen_14b = Model(
deepseek_r1_distill_qwen_32b = Model(
name = 'deepseek-r1-distill-qwen-32b',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat])
best_provider = IterListProvider([DeepInfra])
)
# deepseek-v2
deepseek_prover_v2 = Model(
name = 'deepseek-prover-v2',
base_provider = 'DeepSeek',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
deepseek_prover_v2_671b = Model(
name = 'deepseek-prover-v2-671b',
base_provider = 'DeepSeek',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
# deepseek-v3-0324
deepseek_v3_0324 = Model(
name = 'deepseek-v3-0324',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat, LambdaChat])
best_provider = DeepInfra
)
deepseek_v3_0324_turbo = Model(
name = 'deepseek-v3-0324-turbo',
base_provider = 'DeepSeek',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
# deepseek-r1-0528
deepseek_r1_0528 = Model(
name = 'deepseek-r1-0528',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat, LambdaChat, PollinationsAI])
best_provider = IterListProvider([DeepInfra, PollinationsAI])
)
deepseek_r1_0528_turbo = Model(
name = 'deepseek-r1-0528-turbo',
base_provider = 'DeepSeek',
best_provider = DeepInfraChat
best_provider = DeepInfra
)
# janus
@ -860,7 +847,7 @@ grok_3_r1 = Model(
kimi = Model(
name = 'kimi-k2',
base_provider = 'kimi.com',
best_provider = IterListProvider([Kimi, HuggingFace, DeepInfraChat, Groq]),
best_provider = IterListProvider([Kimi, HuggingFace, DeepInfra, Groq]),
long_name = "moonshotai/Kimi-K2-Instruct"
)
@ -899,7 +886,7 @@ r1_1776 = Model(
nemotron_70b = Model(
name = 'nemotron-70b',
base_provider = 'Nvidia',
best_provider = IterListProvider([LambdaChat, Together, HuggingChat, HuggingFace])
best_provider = IterListProvider([Together, HuggingChat, HuggingFace])
)
### Cognitive Computations ###
@ -907,34 +894,27 @@ nemotron_70b = Model(
dolphin_2_6 = Model(
name = "dolphin-2.6",
base_provider = "Cognitive Computations",
best_provider = DeepInfraChat
best_provider = DeepInfra
)
dolphin_2_9 = Model(
name = "dolphin-2.9",
base_provider = "Cognitive Computations",
best_provider = DeepInfraChat
best_provider = DeepInfra
)
### DeepInfra ###
airoboros_70b = Model(
name = "airoboros-70b",
base_provider = "DeepInfra",
best_provider = DeepInfraChat
best_provider = DeepInfra
)
### Lizpreciatior ###
lzlv_70b = Model(
name = "lzlv-70b",
base_provider = "Lizpreciatior",
best_provider = DeepInfraChat
)
### Liquid AI ###
lfm_40b = Model(
name = "lfm-40b",
base_provider = "Liquid AI",
best_provider = LambdaChat
best_provider = DeepInfra
)
### Opera ###
@ -955,7 +935,7 @@ evil = Model(
sdxl_turbo = ImageModel(
name = 'sdxl-turbo',
base_provider = 'Stability AI',
best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage, ImageLabs])
best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage])
)
sd_3_5_large = ImageModel(
@ -1010,7 +990,7 @@ flux_canny = ImageModel(
flux_kontext_max = ImageModel(
name = 'flux-kontext',
base_provider = 'Black Forest Labs',
best_provider = IterListProvider([PollinationsAI, Azure, LMArenaBeta, Together])
best_provider = IterListProvider([PollinationsAI, Azure, LMArena, Together])
)
flux_dev_lora = ImageModel(

View file

@ -8,7 +8,7 @@ model_map = {
"OIVSCodeSer2": "",
"Blackbox": "",
"Copilot": "",
"DeepInfraChat": "",
"DeepInfra": "",
"OperaAria": "",
"Startnest": "",
"LambdaChat": "",
@ -102,7 +102,7 @@ model_map = {
},
"o3-mini": {
"OpenaiChat": "o3-mini",
"LMArenaBeta": "o3-mini",
"LMArena": "o3-mini",
"PuterJS": [
"o3-mini",
"openrouter:openai/o3-mini",
@ -119,7 +119,7 @@ model_map = {
"o4-mini": {
"OpenaiChat": "o4-mini",
"Azure": "o4-mini",
"LMArenaBeta": "o4-mini-2025-04-16",
"LMArena": "o4-mini-2025-04-16",
"PuterJS": [
"o4-mini",
"openrouter:openai/o4-mini"
@ -135,7 +135,7 @@ model_map = {
"PollinationsAI": "openai-large",
"OpenaiChat": "gpt-4-1",
"Azure": "gpt-4.1",
"LMArenaBeta": "gpt-4.1-2025-04-14",
"LMArena": "gpt-4.1-2025-04-14",
"PuterJS": [
"gpt-4.1",
"openrouter:openai/gpt-4.1"
@ -146,7 +146,7 @@ model_map = {
"Blackbox": "gpt-4.1-mini",
"OIVSCodeSer0501": "gpt-4.1-mini",
"OpenaiChat": "gpt-4-1-mini",
"LMArenaBeta": "gpt-4.1-mini-2025-04-14",
"LMArena": "gpt-4.1-mini-2025-04-14",
"PuterJS": [
"gpt-4.1-mini",
"openrouter:openai/gpt-4.1-mini"
@ -173,7 +173,7 @@ model_map = {
},
"gpt-oss-120b": {
"Together": "openai/gpt-oss-120b",
"DeepInfraChat": "openai/gpt-oss-120b",
"DeepInfra": "openai/gpt-oss-120b",
"HuggingFace": "openai/gpt-oss-120b",
"OpenRouter": "openai/gpt-oss-120b",
"Groq": "openai/gpt-oss-120b",
@ -191,7 +191,7 @@ model_map = {
"OpenaiAccount": "dall-e-3",
"MicrosoftDesigner": "dall-e-3",
"BingCreateImages": "dall-e-3",
"LMArenaBeta": "dall-e-3",
"LMArena": "dall-e-3",
"ApiAirforce": "dall-e-3",
"OpenaiChat": "gpt-image"
},
@ -250,7 +250,6 @@ model_map = {
"Replicate": "meta/meta-llama-3-70b-instruct"
},
"llama-3.1-8b": {
"DeepInfraChat": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"Together": [
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
"blackbox/meta-llama-3-1-8b"
@ -335,13 +334,11 @@ model_map = {
"HuggingFaceAPI": "meta-llama/Llama-3.2-11B-Vision-Instruct"
},
"llama-3.2-90b": {
"DeepInfraChat": "meta-llama/Llama-3.2-90B-Vision-Instruct",
"Together": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
"PuterJS": "openrouter:meta-llama/llama-3.2-90b-vision-instruct",
"DeepInfra": "meta-llama/Llama-3.2-90B-Vision-Instruct"
},
"llama-3.3-70b": {
"DeepInfraChat": "meta-llama/Llama-3.3-70B-Instruct",
"LambdaChat": "llama3.3-70b-instruct-fp8",
"Together": [
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
@ -349,7 +346,7 @@ model_map = {
],
"HuggingFace": "meta-llama/Llama-3.3-70B-Instruct",
"OpenRouterFree": "meta-llama/llama-3.3-70b-instruct",
"LMArenaBeta": "llama-3.3-70b-instruct",
"LMArena": "llama-3.3-70b-instruct",
"PuterJS": [
"openrouter:meta-llama/llama-3.3-70b-instruct:free",
"openrouter:meta-llama/llama-3.3-70b-instruct"
@ -364,8 +361,6 @@ model_map = {
"OpenRouter": "meta-llama/llama-3.3-70b-instruct:free"
},
"llama-4-scout": {
"DeepInfraChat": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"LambdaChat": "llama-4-scout-17b-16e-instruct",
"PollinationsAI": "llamascout",
"Together": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"Cloudflare": "@cf/meta/llama-4-scout-17b-16e-instruct",
@ -379,10 +374,8 @@ model_map = {
"OpenRouter": "meta-llama/llama-4-scout"
},
"llama-4-maverick": {
"DeepInfraChat": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"LambdaChat": "llama-4-maverick-17b-128e-instruct-fp8",
"Together": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"LMArenaBeta": "llama-4-maverick-03-26-experimental",
"LMArena": "llama-4-maverick-03-26-experimental",
"PuterJS": [
"openrouter:meta-llama/llama-4-maverick:free",
"openrouter:meta-llama/llama-4-maverick"
@ -436,7 +429,6 @@ model_map = {
"Nvidia": "mistralai/mistral-small-24b-instruct"
},
"mistral-small-3.1-24b": {
"DeepInfraChat": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
"PollinationsAI": "mistral",
"OpenRouterFree": "mistralai/mistral-small-3.1-24b-instruct",
"Cloudflare": "@cf/mistralai/mistral-small-3.1-24b-instruct",
@ -448,7 +440,6 @@ model_map = {
"PuterJS": "openrouter:nousresearch/nous-hermes-2-mixtral-8x7b-dpo"
},
"phi-4": {
"DeepInfraChat": "microsoft/phi-4",
"HuggingSpace": "phi-4-multimodal",
"PuterJS": "openrouter:microsoft/phi-4",
"DeepInfra": "microsoft/phi-4",
@ -457,7 +448,6 @@ model_map = {
"OpenRouter": "microsoft/phi-4"
},
"phi-4-multimodal": {
"DeepInfraChat": "microsoft/Phi-4-multimodal-instruct",
"HuggingSpace": "phi-4-multimodal",
"PuterJS": "openrouter:microsoft/phi-4-multimodal-instruct",
"DeepInfra": "microsoft/Phi-4-multimodal-instruct",
@ -466,7 +456,6 @@ model_map = {
"OpenRouter": "microsoft/phi-4-multimodal-instruct"
},
"phi-4-reasoning-plus": {
"DeepInfraChat": "microsoft/phi-4-reasoning-plus",
"PuterJS": [
"openrouter:microsoft/phi-4-reasoning-plus:free",
"openrouter:microsoft/phi-4-reasoning-plus"
@ -475,11 +464,9 @@ model_map = {
"OpenRouter": "microsoft/phi-4-reasoning-plus"
},
"wizardlm-2-7b": {
"DeepInfraChat": "microsoft/WizardLM-2-7B",
"DeepInfra": "microsoft/WizardLM-2-7B"
},
"wizardlm-2-8x22b": {
"DeepInfraChat": "microsoft/WizardLM-2-8x22B",
"PuterJS": "openrouter:microsoft/wizardlm-2-8x22b",
"DeepInfra": "microsoft/WizardLM-2-8x22B",
"OpenRouter": "microsoft/wizardlm-2-8x22b"
@ -488,7 +475,6 @@ model_map = {
"Gemini": ""
},
"gemini-1.5-flash": {
"TeachAnything": "gemini-1.5-flash",
"PuterJS": [
"gemini-1.5-flash",
"openrouter:google/gemini-flash-1.5",
@ -496,7 +482,6 @@ model_map = {
]
},
"gemini-1.5-pro": {
"TeachAnything": "gemini-1.5-pro",
"PuterJS": "openrouter:google/gemini-pro-1.5"
},
"gemini-2.0-flash": {
@ -507,7 +492,7 @@ model_map = {
],
"GeminiPro": "gemma-7b",
"EasyChat": "gemini-2.0-flash-free",
"LMArenaBeta": "gemini-2.0-flash-001",
"LMArena": "gemini-2.0-flash-001",
"PuterJS": [
"gemini-2.0-flash",
"openrouter:google/gemini-2.0-flash-lite-001",
@ -528,8 +513,7 @@ model_map = {
"GeminiPro": "gemini-2.5-flash",
"GeminiCLI": "gemini-2.5-flash",
"EasyChat": "gemini-2.5-flash-free",
"DeepInfraChat": "google/gemini-2.5-flash",
"LMArenaBeta": "gemini-2.5-flash",
"LMArena": "gemini-2.5-flash",
"PuterJS": "openrouter:google/gemini-2.5-flash-preview",
"ApiAirforce": "gemini-2.5-flash",
"DeepInfra": "google/gemini-2.5-flash",
@ -539,8 +523,7 @@ model_map = {
"Gemini": "gemini-2.5-pro",
"GeminiPro": "gemini-2.5-pro",
"GeminiCLI": "gemini-2.5-pro",
"DeepInfraChat": "google/gemini-2.5-pro",
"LMArenaBeta": "gemini-2.5-pro",
"LMArena": "gemini-2.5-pro",
"PuterJS": [
"openrouter:google/gemini-2.5-pro-preview",
"openrouter:google/gemini-2.5-pro-exp-03-25"
@ -550,7 +533,6 @@ model_map = {
"OpenRouter": "google/gemini-2.5-pro-preview-05-06"
},
"codegemma-7b": {
"DeepInfraChat": "google/codegemma-7b-it",
"DeepInfra": "google/codegemma-7b-it",
"FenayAI": "codegemma-7b",
"Nvidia": "google/codegemma-7b"
@ -560,11 +542,9 @@ model_map = {
"Nvidia": "google/gemma-2b"
},
"gemma-1.1-7b": {
"DeepInfraChat": "google/gemma-1.1-7b-it",
"DeepInfra": "google/gemma-1.1-7b-it"
},
"gemma-2-9b": {
"DeepInfraChat": "google/gemma-2-9b-it",
"PuterJS": [
"openrouter:google/gemma-2-9b-it:free",
"openrouter:google/gemma-2-9b-it"
@ -573,14 +553,12 @@ model_map = {
},
"gemma-2-27b": {
"Together": "google/gemma-2-27b-it",
"DeepInfraChat": "google/gemma-2-27b-it",
"HuggingFace": "google/gemma-2-27b-it",
"PuterJS": "openrouter:google/gemma-2-27b-it",
"DeepInfra": "google/gemma-2-27b-it",
"HuggingFaceAPI": "google/gemma-2-27b-it"
},
"gemma-3-4b": {
"DeepInfraChat": "google/gemma-3-4b-it",
"PuterJS": [
"openrouter:google/gemma-3-4b-it:free",
"openrouter:google/gemma-3-4b-it"
@ -588,7 +566,6 @@ model_map = {
"DeepInfra": "google/gemma-3-4b-it"
},
"gemma-3-12b": {
"DeepInfraChat": "google/gemma-3-12b-it",
"PuterJS": [
"openrouter:google/gemma-3-12b-it:free",
"openrouter:google/gemma-3-12b-it"
@ -597,7 +574,6 @@ model_map = {
"DeepInfra": "google/gemma-3-12b-it"
},
"gemma-3-27b": {
"DeepInfraChat": "google/gemma-3-27b-it",
"Together": "google/gemma-3-27b-it",
"PuterJS": [
"openrouter:google/gemma-3-27b-it:free",
@ -713,7 +689,6 @@ model_map = {
},
"qwen-2.5-coder-32b": {
"PollinationsAI": "qwen-coder",
"LambdaChat": "qwen25-coder-32b-instruct",
"Together": "Qwen/Qwen2.5-Coder-32B-Instruct",
"Qwen": "qwen2.5-coder-32b-instruct",
"OpenRouterFree": "qwen/qwen-2.5-coder-32b-instruct",
@ -747,7 +722,6 @@ model_map = {
"OpenRouter": "qwen/qwen2.5-vl-72b-instruct:free"
},
"qwen-3-235b": {
"DeepInfraChat": "Qwen/Qwen3-235B-A22B",
"Together": [
"Qwen/Qwen3-235B-A22B-fp8",
"Qwen/Qwen3-235B-A22B-fp8-tput"
@ -763,7 +737,6 @@ model_map = {
"Qwen_Qwen_3": "qwen3-235b-a22b"
},
"qwen-3-32b": {
"DeepInfraChat": "Qwen/Qwen3-32B",
"LambdaChat": "qwen3-32b-fp8",
"Together": "Qwen/Qwen3-32B-FP8",
"HuggingSpace": "qwen3-32b",
@ -779,7 +752,6 @@ model_map = {
"Qwen_Qwen_3": "qwen3-32b"
},
"qwen-3-30b": {
"DeepInfraChat": "Qwen/Qwen3-30B-A3B",
"HuggingSpace": "qwen3-30b-a3b",
"PuterJS": [
"openrouter:qwen/qwen3-30b-a3b:free",
@ -790,7 +762,6 @@ model_map = {
"Qwen_Qwen_3": "qwen3-30b-a3b"
},
"qwen-3-14b": {
"DeepInfraChat": "Qwen/Qwen3-14B",
"HuggingSpace": "qwen3-14b",
"OpenRouterFree": "qwen/qwen3-14b",
"PuterJS": [
@ -827,12 +798,11 @@ model_map = {
"Qwen_Qwen_3": "qwen3-0.6b"
},
"qwq-32b": {
"DeepInfraChat": "Qwen/QwQ-32B",
"Together": "Qwen/QwQ-32B",
"Qwen": "qwq-32b",
"OpenRouterFree": "qwen/qwq-32b",
"HuggingFace": "Qwen/QwQ-32B",
"LMArenaBeta": "qwq-32b",
"LMArena": "qwq-32b",
"PuterJS": [
"openrouter:qwen/qwq-32b-preview",
"openrouter:qwen/qwq-32b:free",
@ -846,10 +816,6 @@ model_map = {
"OpenRouter": "qwen/qwq-32b:free"
},
"deepseek-v3": {
"DeepInfraChat": [
"deepseek-ai/DeepSeek-V3",
"deepseek-ai/DeepSeek-V3-0324"
],
"Together": "deepseek-ai/DeepSeek-V3",
"PuterJS": "openrouter:deepseek/deepseek-v3-base:free",
"DeepInfra": [
@ -858,11 +824,6 @@ model_map = {
]
},
"deepseek-r1": {
"DeepInfraChat": [
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-R1-0528"
],
"LambdaChat": "deepseek-r1",
"PollinationsAI": "deepseek-reasoning",
"Together": "deepseek-ai/DeepSeek-R1",
"HuggingFace": "deepseek-ai/DeepSeek-R1",
@ -885,11 +846,9 @@ model_map = {
"OpenRouter": "deepseek/deepseek-r1:free"
},
"deepseek-r1-turbo": {
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-Turbo",
"DeepInfra": "deepseek-ai/DeepSeek-R1-Turbo"
},
"deepseek-r1-distill-llama-70b": {
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"Together": [
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free"
@ -923,7 +882,6 @@ model_map = {
"OpenRouter": "deepseek/deepseek-r1-distill-qwen-14b:free"
},
"deepseek-r1-distill-qwen-32b": {
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"HuggingFace": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"PuterJS": [
"openrouter:deepseek/deepseek-r1-distill-qwen-32b:free",
@ -935,7 +893,6 @@ model_map = {
"OpenRouter": "deepseek/deepseek-r1-distill-qwen-32b"
},
"deepseek-prover-v2": {
"DeepInfraChat": "deepseek-ai/DeepSeek-Prover-V2-671B",
"PuterJS": [
"openrouter:deepseek/deepseek-prover-v2:free",
"openrouter:deepseek/deepseek-prover-v2"
@ -943,14 +900,11 @@ model_map = {
"DeepInfra": "deepseek-ai/DeepSeek-Prover-V2-671B"
},
"deepseek-prover-v2-671b": {
"DeepInfraChat": "deepseek-ai/DeepSeek-Prover-V2-671B",
"DeepInfra": "deepseek-ai/DeepSeek-Prover-V2-671B",
"HuggingFaceAPI": "deepseek-ai/DeepSeek-Prover-V2-671B"
},
"deepseek-v3-0324": {
"DeepInfraChat": "deepseek-ai/DeepSeek-V3-0324",
"LambdaChat": "deepseek-v3-0324",
"LMArenaBeta": "deepseek-v3-0324",
"LMArena": "deepseek-v3-0324",
"PuterJS": [
"deepseek-chat",
"openrouter:deepseek/deepseek-chat-v3-0324:free",
@ -960,22 +914,18 @@ model_map = {
"HuggingFaceAPI": "deepseek-ai/DeepSeek-V3-0324"
},
"deepseek-v3-0324-turbo": {
"DeepInfraChat": "deepseek-ai/DeepSeek-V3-0324-Turbo",
"DeepInfra": "deepseek-ai/DeepSeek-V3-0324-Turbo"
},
"deepseek-r1-0528": {
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-0528",
"LambdaChat": "deepseek-r1-0528",
"PollinationsAI": "deepseek-reasoning",
"OpenRouterFree": "deepseek/deepseek-r1-0528",
"LMArenaBeta": "deepseek-r1-0528",
"LMArena": "deepseek-r1-0528",
"DeepInfra": "deepseek-ai/DeepSeek-R1-0528",
"HuggingFaceAPI": "deepseek-ai/DeepSeek-R1-0528",
"Nvidia": "deepseek-ai/deepseek-r1-0528",
"OpenRouter": "deepseek/deepseek-r1-0528:free"
},
"deepseek-r1-0528-turbo": {
"DeepInfraChat": "deepseek-ai/DeepSeek-R1-0528-Turbo",
"DeepInfra": "deepseek-ai/DeepSeek-R1-0528-Turbo"
},
"janus-pro-7b": {
@ -993,7 +943,7 @@ model_map = {
"Grok": "grok-3",
"Azure": "grok-3",
"EasyChat": "grok-3-free",
"LMArenaBeta": "grok-3-preview-02-24",
"LMArena": "grok-3-preview-02-24",
"PuterJS": "grok-3",
"OpenRouter": "x-ai/grok-3"
},
@ -1003,7 +953,6 @@ model_map = {
"kimi-k2": {
"Kimi": "kimi-k2",
"HuggingFace": "moonshotai/Kimi-K2-Instruct",
"DeepInfraChat": "moonshotai/Kimi-K2-Instruct",
"Groq": "moonshotai/kimi-k2-instruct",
"OpenRouterFree": "moonshotai/kimi-k2",
"ApiAirforce": "kimi-k2",
@ -1040,30 +989,24 @@ model_map = {
"OpenRouter": "perplexity/r1-1776"
},
"nemotron-70b": {
"LambdaChat": "llama3.1-nemotron-70b-instruct",
"Together": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"HuggingFace": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"PuterJS": "openrouter:nvidia/llama-3.1-nemotron-70b-instruct",
"HuggingFaceAPI": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
},
"dolphin-2.6": {
"DeepInfraChat": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
"DeepInfra": "cognitivecomputations/dolphin-2.6-mixtral-8x7b"
},
"dolphin-2.9": {
"DeepInfraChat": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
"DeepInfra": "cognitivecomputations/dolphin-2.9.1-llama-3-70b"
},
"airoboros-70b": {
"DeepInfraChat": "deepinfra/airoboros-70b",
"DeepInfra": "deepinfra/airoboros-70b"
},
"lzlv-70b": {
"DeepInfraChat": "lizpreciatior/lzlv_70b_fp16_hf",
"DeepInfra": "lizpreciatior/lzlv_70b_fp16_hf"
},
"lfm-40b": {
"LambdaChat": "lfm-40b",
"PuterJS": "openrouter:liquid/lfm-40b",
"OpenRouter": "liquid/lfm-40b"
},
@ -1076,7 +1019,6 @@ model_map = {
"sdxl-turbo": {
"HuggingFaceMedia": "stabilityai/sdxl-turbo",
"PollinationsImage": "sdxl-turbo",
"ImageLabs": "sdxl-turbo",
"PollinationsAI": "turbo",
"HuggingFace": "stabilityai/sdxl-turbo"
},
@ -1157,7 +1099,7 @@ model_map = {
"flux-kontext": {
"PollinationsAI": "kontext",
"Azure": "flux.1-kontext-pro",
"LMArenaBeta": "flux-1-kontext-pro",
"LMArena": "flux-1-kontext-pro",
"Together": "flux-kontext"
},
"flux-dev-lora": {
@ -1318,7 +1260,7 @@ model_map = {
},
"claude-3-5-haiku": {
"PollinationsAI": "claude",
"LMArenaBeta": "claude-3-5-haiku-20241022",
"LMArena": "claude-3-5-haiku-20241022",
"Anthropic": "claude-3-5-haiku-20241022"
},
"gemini-2.5-flash-lite": {
@ -1327,9 +1269,7 @@ model_map = {
},
"llama-4-scout-17b-16e": {
"PollinationsAI": "llamascout",
"LambdaChat": "llama-4-scout-17b-16e-instruct",
"DeepInfraChat": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"LMArenaBeta": "llama-4-scout-17b-16e-instruct",
"LMArena": "llama-4-scout-17b-16e-instruct",
"DeepInfra": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"Groq": "meta-llama/llama-4-scout-17b-16e-instruct",
"HuggingFaceAPI": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
@ -1348,7 +1288,7 @@ model_map = {
},
"o3": {
"PollinationsAI": "openai-reasoning",
"LMArenaBeta": "o3-2025-04-16",
"LMArena": "o3-2025-04-16",
"PuterJS": [
"o3",
"openrouter:openai/o3"
@ -1371,7 +1311,7 @@ model_map = {
"qwen-3-235b-a22b": {
"Qwen": "qwen3-235b-a22b",
"OpenRouterFree": "qwen/qwen3-235b-a22b",
"LMArenaBeta": "qwen3-235b-a22b",
"LMArena": "qwen3-235b-a22b",
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-FP8",
"Nvidia": "qwen/qwen3-235b-a22b",
"OpenRouter": "qwen/qwen3-235b-a22b:free"
@ -1382,9 +1322,8 @@ model_map = {
"qwen-3-30b-a3b": {
"Qwen": "qwen3-30b-a3b",
"OpenRouterFree": "qwen/qwen3-30b-a3b",
"DeepInfraChat": "Qwen/Qwen3-30B-A3B",
"HuggingFace": "Qwen/Qwen3-30B-A3B",
"LMArenaBeta": "qwen3-30b-a3b",
"LMArena": "qwen3-30b-a3b",
"HuggingSpace": "qwen-3-30b-a3b",
"DeepInfra": "Qwen/Qwen3-30B-A3B",
"FenayAI": "qwen3-30b-a3b",
@ -1399,7 +1338,7 @@ model_map = {
},
"qwen-max": {
"Qwen": "qwen-max-latest",
"LMArenaBeta": "qwen-max-2025-01-25",
"LMArena": "qwen-max-2025-01-25",
"PuterJS": "openrouter:qwen/qwen-max",
"OpenRouter": "qwen/qwen-max"
},
@ -1449,7 +1388,6 @@ model_map = {
},
"glm-4.5": {
"GLM": "GLM-4.5",
"DeepInfraChat": "zai-org/GLM-4.5",
"HuggingFace": "zai-org/GLM-4.5",
"ApiAirforce": "glm-4.5",
"DeepInfra": "zai-org/GLM-4.5",
@ -1459,7 +1397,6 @@ model_map = {
"glm-4.5-air": {
"GLM": "GLM-4.5-Air",
"OpenRouterFree": "z-ai/glm-4.5-air",
"DeepInfraChat": "zai-org/GLM-4.5-Air",
"HuggingFace": "zai-org/GLM-4.5-Air",
"DeepInfra": "zai-org/GLM-4.5-Air",
"HuggingFaceAPI": "zai-org/GLM-4.5-Air-FP8",
@ -1467,7 +1404,6 @@ model_map = {
},
"glm-4.5v": {
"GLM": "GLM-4.5V",
"DeepInfraChat": "zai-org/GLM-4.5V",
"DeepInfra": "zai-org/GLM-4.5V",
"HuggingFaceAPI": "zai-org/GLM-4.5V",
"OpenRouter": "z-ai/glm-4.5v"
@ -1546,7 +1482,6 @@ model_map = {
},
"gemma-3-12b-it": {
"OpenRouterFree": "google/gemma-3-12b-it",
"DeepInfraChat": "google/gemma-3-12b-it",
"DeepInfra": "google/gemma-3-12b-it",
"GeminiPro": "gemma-3-12b-it",
"HuggingFaceAPI": "google/gemma-3-12b-it",
@ -1555,8 +1490,7 @@ model_map = {
},
"gemma-3-27b-it": {
"OpenRouterFree": "google/gemma-3-27b-it",
"DeepInfraChat": "google/gemma-3-27b-it",
"LMArenaBeta": "gemma-3-27b-it",
"LMArena": "gemma-3-27b-it",
"DeepInfra": "google/gemma-3-27b-it",
"FenayAI": "gemma-3-27b-it",
"GeminiPro": "gemma-3-27b-it",
@ -1566,7 +1500,6 @@ model_map = {
},
"gemma-3-4b-it": {
"OpenRouterFree": "google/gemma-3-4b-it",
"DeepInfraChat": "google/gemma-3-4b-it",
"DeepInfra": "google/gemma-3-4b-it",
"GeminiPro": "gemma-3-4b-it",
"Nvidia": "google/gemma-3-4b-it",
@ -1580,7 +1513,7 @@ model_map = {
},
"gemma-3n-e4b-it": {
"OpenRouterFree": "google/gemma-3n-e4b-it",
"LMArenaBeta": "gemma-3n-e4b-it",
"LMArena": "gemma-3n-e4b-it",
"ApiAirforce": "gemma-3n-e4b-it",
"GeminiPro": "gemma-3n-e4b-it",
"Nvidia": "google/gemma-3n-e4b-it",
@ -1635,7 +1568,6 @@ model_map = {
},
"gpt-oss-20b": {
"OpenRouterFree": "openai/gpt-oss-20b",
"DeepInfraChat": "openai/gpt-oss-20b",
"HuggingFace": "openai/gpt-oss-20b",
"ApiAirforce": "gpt-oss-20b",
"DeepInfra": "openai/gpt-oss-20b",
@ -1697,15 +1629,10 @@ model_map = {
"LambdaChat": "apriel-5b-instruct"
},
"hermes-3-llama-3.1-405b": {
"LambdaChat": "hermes-3-llama-3.1-405b-fp8",
"HuggingFaceAPI": "NousResearch/Hermes-3-Llama-3.1-405B",
"OpenRouter": "nousresearch/hermes-3-llama-3.1-405b"
},
"hermes3-405b-fp8-128k": {
"LambdaChat": "hermes3-405b-fp8-128k"
},
"llama-3.1-nemotron-70b": {
"LambdaChat": "llama3.1-nemotron-70b-instruct",
"HuggingFace": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"GlhfChat": "hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"Nvidia": "nvidia/llama-3.1-nemotron-70b-instruct",
@ -1715,67 +1642,49 @@ model_map = {
"LambdaChat": "qwen25-coder-32b-instruct"
},
"llama-4-maverick-17b-128e": {
"LambdaChat": "llama-4-maverick-17b-128e-instruct-fp8",
"DeepInfraChat": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"LMArenaBeta": "llama-4-maverick-17b-128e-instruct",
"LMArena": "llama-4-maverick-17b-128e-instruct",
"DeepInfra": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"Groq": "meta-llama/llama-4-maverick-17b-128e-instruct",
"HuggingFaceAPI": "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
"Nvidia": "meta/llama-4-maverick-17b-128e-instruct"
},
"hermes-3": {
"LambdaChat": "hermes3-405b-fp8-128k"
},
"hermes-3-405b": {
"LambdaChat": [
"hermes3-405b-fp8-128k",
"hermes-3-llama-3.1-405b-fp8"
],
"PuterJS": "openrouter:nousresearch/hermes-3-llama-3.1-405b"
},
"qwen-3-coder-480b-a35b-turbo": {
"DeepInfraChat": "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo",
"DeepInfra": "Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo"
},
"olmocr-7b-0725": {
"DeepInfraChat": "allenai/olmOCR-7B-0725-FP8",
"DeepInfra": "allenai/olmOCR-7B-0725-FP8"
},
"qwen-3-235b-a22b-thinking-2507": {
"DeepInfraChat": "Qwen/Qwen3-235B-A22B-Thinking-2507",
"HuggingFace": "Qwen/Qwen3-235B-A22B-Thinking-2507",
"DeepInfra": "Qwen/Qwen3-235B-A22B-Thinking-2507",
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-Thinking-2507",
"OpenRouter": "qwen/qwen3-235b-a22b-thinking-2507"
},
"qwen-3-coder-480b-a35b": {
"DeepInfraChat": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
"HuggingFace": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
"DeepInfra": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
"HuggingFaceAPI": "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8"
},
"qwen-3-235b-a22b-2507": {
"DeepInfraChat": "Qwen/Qwen3-235B-A22B-Instruct-2507",
"HuggingFace": "Qwen/Qwen3-235B-A22B-Instruct-2507",
"DeepInfra": "Qwen/Qwen3-235B-A22B-Instruct-2507",
"HuggingFaceAPI": "Qwen/Qwen3-235B-A22B-Instruct-2507",
"OpenRouter": "qwen/qwen3-235b-a22b-2507"
},
"llama-4-maverick-17b-128e-turbo": {
"DeepInfraChat": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo",
"DeepInfra": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-Turbo"
},
"devstral-small-2507": {
"DeepInfraChat": "mistralai/Devstral-Small-2507",
"PuterJS": "devstral-small-2507",
"DeepInfra": "mistralai/Devstral-Small-2507"
},
"mistral-small-3.2-24b-2506": {
"DeepInfraChat": "mistralai/Mistral-Small-3.2-24B-Instruct-2506",
"DeepInfra": "mistralai/Mistral-Small-3.2-24B-Instruct-2506"
},
"llama-guard-4-12b": {
"DeepInfraChat": "meta-llama/Llama-Guard-4-12B",
"DeepInfra": "meta-llama/Llama-Guard-4-12B",
"Groq": "meta-llama/llama-guard-4-12b",
"HuggingFaceAPI": "meta-llama/Llama-Guard-4-12B",
@ -1783,15 +1692,12 @@ model_map = {
"OpenRouter": "meta-llama/llama-guard-4-12b"
},
"claude-4-opus": {
"DeepInfraChat": "anthropic/claude-4-opus",
"DeepInfra": "anthropic/claude-4-opus"
},
"claude-4-sonnet": {
"DeepInfraChat": "anthropic/claude-4-sonnet",
"DeepInfra": "anthropic/claude-4-sonnet"
},
"deepseek": {
"DeepInfraChat": "deepseek-ai/DeepSeek-V3",
"HuggingFace": "deepseek-ai/DeepSeek-V3",
"PuterJS": "deepseek-v3",
"ApiAirforce": "deepseek-v3",
@ -1804,7 +1710,6 @@ model_map = {
"OpenRouter": "deepseek/deepseek-chat"
},
"llama-3.3-70b-turbo": {
"DeepInfraChat": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"BlackboxPro": "Meta-Llama-3.3-70B-Instruct-Turbo",
"DeepInfra": "meta-llama/Llama-3.3-70B-Instruct-Turbo"
},
@ -1915,196 +1820,196 @@ model_map = {
"HuggingFaceMedia": "Lightricks/LTX-Video-0.9.7-dev:fal-ai"
},
"claude-opus-4": {
"LMArenaBeta": "claude-opus-4-20250514",
"LMArena": "claude-opus-4-20250514",
"PuterJS": "claude-opus-4-latest",
"OpenRouter": "anthropic/claude-opus-4"
},
"chatgpt-4o": {
"LMArenaBeta": "chatgpt-4o-latest-20250326",
"LMArena": "chatgpt-4o-latest-20250326",
"OpenRouter": "openai/chatgpt-4o-latest"
},
"mistral-small-3.1-24b-2503": {
"LMArenaBeta": "mistral-small-3.1-24b-instruct-2503",
"LMArena": "mistral-small-3.1-24b-instruct-2503",
"HuggingFaceAPI": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
"Nvidia": "mistralai/mistral-small-3.1-24b-instruct-2503"
},
"steve": {
"LMArenaBeta": "steve"
"LMArena": "steve"
},
"command-a25": {
"LMArenaBeta": "command-a-03-2025",
"LMArena": "command-a-03-2025",
"HuggingSpace": "command-a-03-2025",
"CohereForAI_C4AI_Command": "command-a-03-2025"
},
"amazon.nova-pro": {
"LMArenaBeta": "amazon.nova-pro-v1:0"
"LMArena": "amazon.nova-pro-v1:0"
},
"grok-3-mini-beta": {
"LMArenaBeta": "grok-3-mini-beta",
"LMArena": "grok-3-mini-beta",
"OpenRouter": "x-ai/grok-3-mini-beta"
},
"gemini-2.5-flash-lite-preview-thinking": {
"LMArenaBeta": "gemini-2.5-flash-lite-preview-06-17-thinking"
"LMArena": "gemini-2.5-flash-lite-preview-06-17-thinking"
},
"amazon-nova-experimental": {
"LMArenaBeta": "amazon-nova-experimental-chat-05-14"
"LMArena": "amazon-nova-experimental-chat-05-14"
},
"claude-3-7-sonnet-20250219-thinking-32k": {
"LMArenaBeta": "claude-3-7-sonnet-20250219-thinking-32k"
"LMArena": "claude-3-7-sonnet-20250219-thinking-32k"
},
"mistral-medium-2505": {
"LMArenaBeta": "mistral-medium-2505"
"LMArena": "mistral-medium-2505"
},
"magistral-medium-2506": {
"LMArenaBeta": "magistral-medium-2506",
"LMArena": "magistral-medium-2506",
"OpenRouter": "mistralai/magistral-medium-2506:thinking"
},
"x": {
"LMArenaBeta": "X-preview"
"LMArena": "X-preview"
},
"stephen": {
"LMArenaBeta": "stephen-v2"
"LMArena": "stephen-v2"
},
"glm-4-air-250414": {
"LMArenaBeta": "glm-4-air-250414"
"LMArena": "glm-4-air-250414"
},
"claude-sonnet-4": {
"LMArenaBeta": "claude-sonnet-4-20250514",
"LMArena": "claude-sonnet-4-20250514",
"PuterJS": "claude-sonnet-4-latest",
"ApiAirforce": "claude-sonnet-4",
"OpenRouter": "anthropic/claude-sonnet-4"
},
"stonebloom": {
"LMArenaBeta": "stonebloom"
"LMArena": "stonebloom"
},
"claude-3-7-sonnet": {
"LMArenaBeta": "claude-3-7-sonnet-20250219",
"LMArena": "claude-3-7-sonnet-20250219",
"PuterJS": "claude-3-7-sonnet-latest"
},
"minimax-m1": {
"LMArenaBeta": "minimax-m1",
"LMArena": "minimax-m1",
"OpenRouter": "minimax/minimax-m1"
},
"step-1o-turbo-202506": {
"LMArenaBeta": "step-1o-turbo-202506"
"LMArena": "step-1o-turbo-202506"
},
"claude-sonnet-4-20250514-thinking-32k": {
"LMArenaBeta": "claude-sonnet-4-20250514-thinking-32k"
"LMArena": "claude-sonnet-4-20250514-thinking-32k"
},
"qwen-3-235b-a22b-no-thinking": {
"LMArenaBeta": "qwen3-235b-a22b-no-thinking"
"LMArena": "qwen3-235b-a22b-no-thinking"
},
"claude-opus-4-20250514-thinking-16k": {
"LMArenaBeta": "claude-opus-4-20250514-thinking-16k"
"LMArena": "claude-opus-4-20250514-thinking-16k"
},
"stephen-vision-csfix": {
"LMArenaBeta": "stephen-vision-csfix"
"LMArena": "stephen-vision-csfix"
},
"mistral-small-2506": {
"LMArenaBeta": "mistral-small-2506",
"LMArena": "mistral-small-2506",
"PuterJS": "mistral-small-2506"
},
"wolfstride": {
"LMArenaBeta": "wolfstride"
"LMArena": "wolfstride"
},
"grok-3-mini-high": {
"LMArenaBeta": "grok-3-mini-high"
"LMArena": "grok-3-mini-high"
},
"grok-4-0709": {
"LMArenaBeta": "grok-4-0709"
"LMArena": "grok-4-0709"
},
"cresylux": {
"LMArenaBeta": "cresylux"
"LMArena": "cresylux"
},
"hunyuan-turbos": {
"LMArenaBeta": "hunyuan-turbos-20250416"
"LMArena": "hunyuan-turbos-20250416"
},
"ernie-x1-turbo-32k": {
"LMArenaBeta": "ernie-x1-turbo-32k-preview"
"LMArena": "ernie-x1-turbo-32k-preview"
},
"kimi-k2-0711": {
"LMArenaBeta": "kimi-k2-0711-preview"
"LMArena": "kimi-k2-0711-preview"
},
"nettle": {
"LMArenaBeta": "nettle"
"LMArena": "nettle"
},
"clownfish": {
"LMArenaBeta": "clownfish"
"LMArena": "clownfish"
},
"octopus": {
"LMArenaBeta": "octopus"
"LMArena": "octopus"
},
"kraken-07152025-1": {
"LMArenaBeta": "kraken-07152025-1"
"LMArena": "kraken-07152025-1"
},
"kraken-07152025-2": {
"LMArenaBeta": "kraken-07152025-2"
"LMArena": "kraken-07152025-2"
},
"folsom-07152025-1": {
"LMArenaBeta": "folsom-07152025-1"
"LMArena": "folsom-07152025-1"
},
"claude-3-5-sonnet": {
"LMArenaBeta": "claude-3-5-sonnet-20241022",
"LMArena": "claude-3-5-sonnet-20241022",
"PuterJS": "claude-3-5-sonnet-20240620",
"Anthropic": "claude-3-5-sonnet-20241022"
},
"hunyuan-large-vision": {
"LMArenaBeta": "hunyuan-large-vision"
"LMArena": "hunyuan-large-vision"
},
"flux-1-kontext-pro": {
"LMArenaBeta": "flux-1-kontext-pro"
"LMArena": "flux-1-kontext-pro"
},
"gpt-image-1": {
"LMArenaBeta": "gpt-image-1"
"LMArena": "gpt-image-1"
},
"flux-1-kontext-max": {
"LMArenaBeta": "flux-1-kontext-max"
"LMArena": "flux-1-kontext-max"
},
"imagen-4.0-ultra-generate": {
"LMArenaBeta": "imagen-4.0-ultra-generate-preview-06-06"
"LMArena": "imagen-4.0-ultra-generate-preview-06-06"
},
"imagen-3.0-generate": {
"LMArenaBeta": "imagen-3.0-generate-002"
"LMArena": "imagen-3.0-generate-002"
},
"ideogram": {
"LMArenaBeta": "ideogram-v2"
"LMArena": "ideogram-v2"
},
"photon": {
"LMArenaBeta": "photon"
"LMArena": "photon"
},
"step1x-edit": {
"LMArenaBeta": "step1x-edit"
"LMArena": "step1x-edit"
},
"recraft": {
"LMArenaBeta": "recraft-v3"
"LMArena": "recraft-v3"
},
"anonymous-bot-0514": {
"LMArenaBeta": "anonymous-bot-0514"
"LMArena": "anonymous-bot-0514"
},
"flux-1.1-pro": {
"LMArenaBeta": "flux-1.1-pro"
"LMArena": "flux-1.1-pro"
},
"ideogram-v3-quality": {
"LMArenaBeta": "ideogram-v3-quality"
"LMArena": "ideogram-v3-quality"
},
"imagen-4.0-generate": {
"LMArenaBeta": "imagen-4.0-generate-preview-06-06"
"LMArena": "imagen-4.0-generate-preview-06-06"
},
"seedream-3": {
"LMArenaBeta": "seedream-3",
"LMArena": "seedream-3",
"ApiAirforce": "seedream-3"
},
"seededit-3.0": {
"LMArenaBeta": "seededit-3.0"
"LMArena": "seededit-3.0"
},
"flux-1-kontext-dev": {
"LMArenaBeta": "flux-1-kontext-dev"
"LMArena": "flux-1-kontext-dev"
},
"bagel": {
"LMArenaBeta": "bagel"
"LMArena": "bagel"
},
"gemini-2.0-flash-preview-image-generation": {
"LMArenaBeta": "gemini-2.0-flash-preview-image-generation"
"LMArena": "gemini-2.0-flash-preview-image-generation"
},
"gpt-5-mini": {
"PuterJS": "gpt-5-mini",
@ -3078,7 +2983,7 @@ models_count = {
"llama-3.3-70b-instruct": 4,
"llama-4-maverick-17b-128e-instruct": 5,
"qwen-332b": 3,
"hermes-3-405b": 2,
"hermes-3-405b": 1,
"gpt-oss-20b": 12,
"glm-4.5": 7,
"qwen-3235b-a22b-thinking-2507": 2,
@ -3256,7 +3161,7 @@ parents = {
"CopilotAccount"
],
"DeepInfra": [
"DeepInfraChat"
"DeepInfra"
],
"HuggingFace": [
"HuggingFaceAPI",
@ -3418,7 +3323,6 @@ model_aliases = {
"openrouter:perplexity/sonar-reasoning": "sonar-reasoning",
"openrouter:perplexity/sonar-reasoning-pro": "sonar-reasoning-pro",
"openrouter:perplexity/r1-1776": "r1-1776",
"llama3.1-nemotron-70b-instruct": "llama-3.1-nemotron-70b",
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "llama-3.1-nemotron-70b",
"openrouter:nvidia/llama-3.1-nemotron-70b-instruct": "nemotron-70b",
"cognitivecomputations/dolphin-2.6-mixtral-8x7b": "dolphin-2.6",
@ -3537,7 +3441,6 @@ model_aliases = {
"openrouter:microsoft/phi-3-mini-128k-instruct": "phi-3-mini",
"deepseek-llama3.3-70b": "deepseek-llama-3.3-70b",
"apriel-5b-instruct": "apriel-5b",
"hermes-3-llama-3.1-405b-fp8": "hermes-3-llama-3.1-405b",
"openrouter:nousresearch/hermes-3-llama-3.1-405b": "hermes-3-405b",
"openai/gpt-oss-20b": "gpt-oss-20b",
"Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo": "qwen-3-coder-480b-a35b-turbo",

View file

@ -10,7 +10,7 @@ from ..providers.retry_provider import RotatedProvider
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
from ..Provider.hf_space import HuggingSpace
from ..Provider import Custom, PollinationsImage, OpenaiAccount, Copilot, Cloudflare, Gemini, Grok, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, LMArenaBeta, EdgeTTS, gTTS, MarkItDown, OpenAIFM
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfra, LMArena, EdgeTTS, gTTS, MarkItDown, OpenAIFM
from ..Provider import HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree, GeminiPro
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .. import Provider
@ -25,7 +25,7 @@ PROVIDERS_LIST_2 = [
# Add all models to the model map
PROVIDERS_LIST_3 = [
LambdaChat, DeepInfraChat, HuggingFace, HuggingFaceMedia, LMArenaBeta,
LambdaChat, DeepInfra, HuggingFace, HuggingFaceMedia, LMArena,
PuterJS, Cloudflare, HuggingSpace
]