diff --git a/etc/tool/commit.py b/etc/tool/commit.py
index 0107886e..b42d7e35 100755
--- a/etc/tool/commit.py
+++ b/etc/tool/commit.py
@@ -31,7 +31,7 @@ from g4f import debug
debug.logging = True
# Constants
-DEFAULT_MODEL = "gpt-4o"
+DEFAULT_MODEL = "claude-3.7-sonnet"
FALLBACK_MODELS = []
MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit
MAX_RETRIES = 3
diff --git a/g4f/Provider/ARTA.py b/g4f/Provider/ARTA.py
index 1b6c6650..e71444cb 100644
--- a/g4f/Provider/ARTA.py
+++ b/g4f/Provider/ARTA.py
@@ -151,10 +151,12 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
# Step 1: Get Authentication Token
auth_data = await cls.read_and_refresh_token(proxy)
+ auth_token = auth_data.get("idToken")
async with ClientSession() as session:
# Step 2: Generate Images
- image_payload = {
+ # Create a form data structure as the API might expect form data instead of JSON
+ form_data = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"style": model,
@@ -165,11 +167,16 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
"seed": str(seed),
}
+ # Debug: Print the payload being sent
+ print(f"Sending payload: {form_data}")
+
headers = {
- "Authorization": auth_data.get("idToken"),
+ "Authorization": auth_token,
+ # No Content-Type header for multipart/form-data, aiohttp sets it automatically
}
- async with session.post(cls.image_generation_url, data=image_payload, headers=headers, proxy=proxy) as image_response:
+ # Try with form data instead of JSON
+ async with session.post(cls.image_generation_url, data=form_data, headers=headers, proxy=proxy) as image_response:
await raise_error(f"Failed to initiate image generation", image_response)
image_data = await image_response.json()
record_id = image_data.get("record_id")
@@ -208,4 +215,4 @@ async def raise_error(message: str, response: ClientResponse):
return
error_text = await response.text()
content_type = response.headers.get('Content-Type', 'unknown')
- raise ResponseError(f"{message}. Content-Type: {content_type}, Response: {error_text}")
\ No newline at end of file
+ raise ResponseError(f"{message}. Content-Type: {content_type}, Response: {error_text}")
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 1f9db172..14ff67c7 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -6,7 +6,6 @@ import re
import json
import random
import string
-import base64
from pathlib import Path
from typing import Optional
from datetime import datetime, timedelta
@@ -14,13 +13,11 @@ from datetime import datetime, timedelta
from ..typing import AsyncResult, Messages, MediaListType
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .openai.har_file import get_har_files
from ..image import to_data_uri
-from ..cookies import get_cookies_dir
-from .helper import format_image_prompt, render_messages
-from ..providers.response import JsonConversation, ImageResponse
+from .helper import render_messages
+from ..providers.response import JsonConversation
from ..tools.media import merge_media
-from ..errors import RateLimitError, NoValidHarFileError
+from ..errors import RateLimitError
from .. import debug
class Conversation(JsonConversation):
@@ -43,83 +40,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "blackboxai"
default_vision_model = default_model
- default_image_model = 'flux'
-
- # Free models (available without subscription)
- fallback_models = [
- default_model,
- "gpt-4o-mini",
- "DeepSeek-V3",
- "DeepSeek-R1",
- "Meta-Llama-3.3-70B-Instruct-Turbo",
- "Mistral-Small-24B-Instruct-2501",
- "DeepSeek-LLM-Chat-(67B)",
- "Qwen-QwQ-32B-Preview",
- # Image models
- "flux",
- # Trending agent modes
- 'Python Agent',
- 'HTML Agent',
- 'Builder Agent',
- 'Java Agent',
- 'JavaScript Agent',
- 'React Agent',
- 'Android Agent',
- 'Flutter Agent',
- 'Next.js Agent',
- 'AngularJS Agent',
- 'Swift Agent',
- 'MongoDB Agent',
- 'PyTorch Agent',
- 'Xcode Agent',
- 'Azure Agent',
- 'Bitbucket Agent',
- 'DigitalOcean Agent',
- 'Docker Agent',
- 'Electron Agent',
- 'Erlang Agent',
- 'FastAPI Agent',
- 'Firebase Agent',
- 'Flask Agent',
- 'Git Agent',
- 'Gitlab Agent',
- 'Go Agent',
- 'Godot Agent',
- 'Google Cloud Agent',
- 'Heroku Agent'
+
+ # OpenRouter Free
+ openrouter_models = [
+ "Deepcoder 14B Preview",
+ "DeepHermes 3 Llama 3 8B Preview",
+ "DeepSeek R1 Zero",
+ "Dolphin3.0 Mistral 24B",
+ "Dolphin3.0 R1 Mistral 24B",
+ "Flash 3", # FIX ( ◁)
+ "Gemini 2.0 Flash Experimental",
+ "Gemma 2 9B",
+ "Gemma 3 12B",
+ "Gemma 3 1B",
+ "Gemma 3 27B",
+ "Gemma 3 4B",
+ "Kimi VL A3B Thinking", # FIX (◁think▷ ◁/think▷)
+ "Llama 3.1 8B Instruct",
+ "Llama 3.1 Nemotron Ultra 253B v1",
+ "Llama 3.2 11B Vision Instruct",
+ "Llama 3.2 1B Instruct",
+ "Llama 3.2 3B Instruct",
+ "Llama 3.3 70B Instruct",
+ "Llama 3.3 Nemotron Super 49B v1",
+ "Llama 4 Maverick",
+ "Llama 4 Scout",
+ "Mistral 7B Instruct",
+ "Mistral Nemo",
+ "Mistral Small 3",
+ "Mistral Small 3.1 24B",
+ "Molmo 7B D",
+ "Moonlight 16B A3B Instruct",
+ "Qwen2.5 72B Instruct",
+ "Qwen2.5 7B Instruct",
+ "Qwen2.5 Coder 32B Instruct",
+ "Qwen2.5 VL 32B Instruct",
+ "Qwen2.5 VL 3B Instruct",
+ "Qwen2.5 VL 72B Instruct",
+ "Qwen2.5-VL 7B Instruct",
+ "Qwerky 72B",
+ "QwQ 32B",
+ "QwQ 32B Preview",
+ "QwQ 32B RpR v1",
+ "R1",
+ "R1 Distill Llama 70B",
+ "R1 Distill Qwen 14B",
+ "R1 Distill Qwen 32B",
]
-
- # Premium models (require subscription)
- premium_models = [
- "GPT-4o",
- "o1",
- "o3-mini",
- "Claude-sonnet-3.7",
- "Claude-sonnet-3.5",
- "Gemini-Flash-2.0",
- "DBRX-Instruct",
- "blackboxai-pro",
- "Gemini-PRO"
- ]
-
- # Models available in the demo account
- demo_models = [
+
+ models = [
default_model,
- "blackboxai-pro",
- "gpt-4o-mini",
- "GPT-4o",
- "o1",
"o3-mini",
+ "gpt-4.1-nano",
"Claude-sonnet-3.7",
"Claude-sonnet-3.5",
- "DeepSeek-V3",
"DeepSeek-R1",
- "DeepSeek-LLM-Chat-(67B)",
- "Meta-Llama-3.3-70B-Instruct-Turbo",
"Mistral-Small-24B-Instruct-2501",
- "Qwen-QwQ-32B-Preview",
- # Image models
- "flux",
+
+ # OpenRouter Free
+ *openrouter_models,
+
# Trending agent modes
'Python Agent',
'HTML Agent',
@@ -152,35 +132,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'Heroku Agent'
]
- image_models = [default_image_model]
- vision_models = [default_vision_model, 'GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Gemini Agent', 'llama-3.1-8b Agent', 'llama-3.1-70b Agent', 'llama-3.1-405 Agent', 'Gemini-Flash-2.0', 'DeepSeek-V3']
+ vision_models = [default_vision_model, 'o3-mini']
- userSelectedModel = ['GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0']
+ userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
# Agent mode configurations
agentMode = {
- 'GPT-4o': {'mode': True, 'id': "GPT-4o", 'name': "GPT-4o"},
- 'Gemini-PRO': {'mode': True, 'id': "Gemini-PRO", 'name': "Gemini-PRO"},
+ # OpenRouter Free
+ 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
+ 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
+ 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
+ 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
+ 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
+ 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
+ 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
+ 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
+ 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
+ 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
+ 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
+ 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
+ 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
+ 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
+ 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
+ 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
+ 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
+ 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
+ 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
+ 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
+ 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
+ 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
+ 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
+ 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
+ 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"},
+ 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
+ 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
+ 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
+ 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
+ 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
+ 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
+ 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
+ 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
+ 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
+ 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
+ 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
+ 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
+ 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
+ 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
+ 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"},
+ 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
+ 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
+ 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
+
+ # Default
'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
- 'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
- 'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
- 'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
- 'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
- 'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
- 'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
- 'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
}
# Trending agent modes
trendingAgentMode = {
- 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
- "Gemini Agent": {'mode': True, 'id': 'gemini'},
- "llama-3.1-405 Agent": {'mode': True, 'id': "llama-3.1-405"},
- 'llama-3.1-70b Agent': {'mode': True, 'id': "llama-3.1-70b"},
- 'llama-3.1-8b Agent': {'mode': True, 'id': "llama-3.1-8b"},
'Python Agent': {'mode': True, 'id': "python"},
'HTML Agent': {'mode': True, 'id': "html"},
'Builder Agent': {'mode': True, 'id': "builder"},
@@ -214,180 +225,78 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
# Complete list of all models (for authorized users)
_all_models = list(dict.fromkeys([
- *fallback_models, # Include all free models
- *premium_models, # Include all premium models
- *image_models,
+ *models, # Include all free models
*list(agentMode.keys()),
*list(trendingAgentMode.keys())
]))
-
- # Initialize models with fallback_models
- models = fallback_models
model_aliases = {
- "gpt-4o": "GPT-4o",
+ "gpt-4": default_model,
+ "gpt-4o": default_model,
+ "gpt-4o-mini": default_model,
"claude-3.7-sonnet": "Claude-sonnet-3.7",
"claude-3.5-sonnet": "Claude-sonnet-3.5",
- "deepseek-v3": "DeepSeek-V3",
"deepseek-r1": "DeepSeek-R1",
- "deepseek-chat": "DeepSeek-LLM-Chat-(67B)",
- "llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo",
- "mixtral-small-24b": "Mistral-Small-24B-Instruct-2501",
- "qwq-32b": "Qwen-QwQ-32B-Preview",
+ #
+ "deepcoder-14b": "Deepcoder 14B Preview",
+ "deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
+ "deepseek-r1-zero": "DeepSeek R1 Zero",
+ "deepseek-r1": "DeepSeek R1 Zero",
+ "dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
+ "dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
+ "reka-flash": "Flash 3",
+ "gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
+ "gemma-2-9b": "Gemma 2 9B",
+ "gemma-3-12b": "Gemma 3 12B",
+ "gemma-3-1b": "Gemma 3 1B",
+ "gemma-3-27b": "Gemma 3 27B",
+ "gemma-3-4b": "Gemma 3 4B",
+ "kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
+ "llama-3.1-8b": "Llama 3.1 8B Instruct",
+ "nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
+ "llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
+ "llama-3.2-1b": "Llama 3.2 1B Instruct",
+ "llama-3.2-3b": "Llama 3.2 3B Instruct",
+ "llama-3.3-70b": "Llama 3.3 70B Instruct",
+ "nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
+ "llama-4-maverick": "Llama 4 Maverick",
+ "llama-4-scout": "Llama 4 Scout",
+ "mistral-7b": "Mistral 7B Instruct",
+ "mistral-nemo": "Mistral Nemo",
+ "mistral-small-24b": "Mistral Small 3",
+ "mistral-small-24b": "Mistral-Small-24B-Instruct-2501",
+ "mistral-small-3.1-24b": "Mistral Small 3.1 24B",
+ "molmo-7b": "Molmo 7B D",
+ "moonlight-16b": "Moonlight 16B A3B Instruct",
+ "qwen-2.5-72b": "Qwen2.5 72B Instruct",
+ "qwen-2.5-7b": "Qwen2.5 7B Instruct",
+ "qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
+ "qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
+ "qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
+ "qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
+ "qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
+ "qwerky-72b": "Qwerky 72B",
+ "qwq-32b": "QwQ 32B",
+ "qwq-32b-preview": "QwQ 32B Preview",
+ "qwq-32b": "QwQ 32B Preview",
+ "qwq-32b-arliai": "QwQ 32B RpR v1",
+ "qwq-32b": "QwQ 32B RpR v1",
+ "deepseek-r1": "R1",
+ "deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
+ "deepseek-r1": "R1 Distill Llama 70B",
+ "deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
+ "deepseek-r1": "R1 Distill Qwen 14B",
+ "deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
+ "deepseek-r1": "R1 Distill Qwen 32B",
}
@classmethod
- async def get_models_async(cls) -> list:
+ def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 365) -> dict:
"""
- Asynchronous version of get_models that checks subscription status.
- Returns a list of available models based on subscription status.
- Premium users get the full list of models.
- Free users get fallback_models.
- Demo accounts get demo_models.
- """
- # Check if there are valid session data in HAR files
- session_data = cls._find_session_in_har_files()
-
- if not session_data:
- # For demo accounts - return demo models
- debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
- return cls.demo_models
-
- # Check if this is a demo session
- demo_session = cls.generate_session()
- is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
-
- if is_demo:
- # For demo accounts - return demo models
- debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
- return cls.demo_models
-
- # For non-demo accounts, check subscription status
- if 'user' in session_data and 'email' in session_data['user']:
- subscription = await cls.check_subscription(session_data['user']['email'])
- if subscription['status'] == "PREMIUM":
- debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
- return cls._all_models
-
- # For free accounts - return free models
- debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
- return cls.fallback_models
-
- @classmethod
- def get_models(cls) -> list:
- """
- Returns a list of available models based on authorization status.
- Authorized users get the full list of models.
- Free users get fallback_models.
- Demo accounts get demo_models.
-
- Note: This is a synchronous method that can't check subscription status,
- so it falls back to the basic premium access check.
- For more accurate results, use get_models_async when possible.
- """
- # Check if there are valid session data in HAR files
- session_data = cls._find_session_in_har_files()
-
- if not session_data:
- # For demo accounts - return demo models
- debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
- return cls.demo_models
-
- # Check if this is a demo session
- demo_session = cls.generate_session()
- is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
-
- if is_demo:
- # For demo accounts - return demo models
- debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
- return cls.demo_models
-
- # For non-demo accounts, check premium access
- has_premium_access = cls._check_premium_access()
-
- if has_premium_access:
- # For premium users - all models
- debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
- return cls._all_models
-
- # For free accounts - return free models
- debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
- return cls.fallback_models
-
- @classmethod
- async def check_subscription(cls, email: str) -> dict:
- """
- Check subscription status for a given email using the Blackbox API.
-
- Args:
- email: The email to check subscription for
-
- Returns:
- dict: Subscription status information with keys:
- - status: "PREMIUM" or "FREE"
- - customerId: Customer ID if available
- - isTrialSubscription: Whether this is a trial subscription
- """
- if not email:
- return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en',
- 'content-type': 'application/json',
- 'origin': 'https://www.blackbox.ai',
- 'referer': 'https://www.blackbox.ai/?ref=login-success',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
- }
-
- try:
- async with ClientSession(headers=headers) as session:
- async with session.post(
- 'https://www.blackbox.ai/api/check-subscription',
- json={"email": email}
- ) as response:
- if response.status != 200:
- debug.log(f"Blackbox: Subscription check failed with status {response.status}")
- return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
-
- result = await response.json()
- status = "PREMIUM" if result.get("hasActiveSubscription", False) else "FREE"
-
- return {
- "status": status,
- "customerId": result.get("customerId"),
- "isTrialSubscription": result.get("isTrialSubscription", False)
- }
- except Exception as e:
- debug.log(f"Blackbox: Error checking subscription: {e}")
- return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
-
- @classmethod
- def _check_premium_access(cls) -> bool:
- """
- Checks for an authorized session in HAR files.
- Returns True if a valid session is found that differs from the demo.
- """
- try:
- session_data = cls._find_session_in_har_files()
- if not session_data:
- return False
-
- # Check if this is not a demo session
- demo_session = cls.generate_session()
- if (session_data['user'].get('email') != demo_session['user'].get('email')):
- return True
- return False
- except Exception as e:
- debug.log(f"Blackbox: Error checking premium access: {e}")
- return False
-
- @classmethod
- def generate_session(cls, id_length: int = 21, days_ahead: int = 365) -> dict:
- """
- Generate a dynamic session with proper ID and expiry format.
+ Generate a dynamic session with proper ID and expiry format using a specific email.
Args:
+ email: The email to use for this session
id_length: Length of the numeric ID (default: 21)
days_ahead: Number of days ahead for expiry (default: 365)
@@ -401,10 +310,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
future_date = datetime.now() + timedelta(days=days_ahead)
expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
- # Decode the encoded email
- encoded_email = "Z2lzZWxlQGJsYWNrYm94LmFp" # Base64 encoded email
- email = base64.b64decode(encoded_email).decode('utf-8')
-
# Generate random image ID for the new URL format
chars = string.ascii_letters + string.digits + "-"
random_img_id = ''.join(random.choice(chars) for _ in range(48))
@@ -417,68 +322,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"image": image_url,
"id": numeric_id
},
- "expires": expiry
+ "expires": expiry,
+ "isNewUser": False
}
- @classmethod
- def _find_session_in_har_files(cls) -> Optional[dict]:
- """
- Search for valid session data in HAR files.
-
- Returns:
- Optional[dict]: Session data if found, None otherwise
- """
- try:
- for file in get_har_files():
- try:
- with open(file, 'rb') as f:
- har_data = json.load(f)
-
- for entry in har_data['log']['entries']:
- # Only look at blackbox API responses
- if 'blackbox.ai/api' in entry['request']['url']:
- # Look for a response that has the right structure
- if 'response' in entry and 'content' in entry['response']:
- content = entry['response']['content']
- # Look for both regular and Google auth session formats
- if ('text' in content and
- isinstance(content['text'], str) and
- '"user"' in content['text'] and
- '"email"' in content['text'] and
- '"expires"' in content['text']):
- try:
- # Remove any HTML or other non-JSON content
- text = content['text'].strip()
- if text.startswith('{') and text.endswith('}'):
- # Replace escaped quotes
- text = text.replace('\\"', '"')
- har_session = json.loads(text)
-
- # Check if this is a valid session object
- if (isinstance(har_session, dict) and
- 'user' in har_session and
- 'email' in har_session['user'] and
- 'expires' in har_session):
-
- debug.log(f"Blackbox: Found session in HAR file: {file}")
- return har_session
- except json.JSONDecodeError as e:
- # Only print error for entries that truly look like session data
- if ('"user"' in content['text'] and
- '"email"' in content['text']):
- debug.log(f"Blackbox: Error parsing likely session data: {e}")
- except Exception as e:
- debug.log(f"Blackbox: Error reading HAR file {file}: {e}")
- return None
- except NoValidHarFileError:
- pass
- except Exception as e:
- debug.log(f"Blackbox: Error searching HAR files: {e}")
- return None
-
@classmethod
async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
- cache_file = Path(get_cookies_dir()) / 'blackbox.json'
+ cache_path = Path(os.path.expanduser("~")) / ".g4f" / "cache"
+ cache_file = cache_path / 'blackbox.json'
if not force_refresh and cache_file.exists():
try:
@@ -517,7 +368,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if is_valid_context(context):
validated_value = match.group(1)
- cache_file.parent.mkdir(exist_ok=True)
+ cache_file.parent.mkdir(exist_ok=True, parents=True)
try:
with open(cache_file, 'w') as f:
json.dump({'validated_value': validated_value}, f)
@@ -592,41 +443,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"title": ""
}
- # Get session data - try HAR files first, fall back to generated session
- session_data = cls._find_session_in_har_files() or cls.generate_session()
+ # Generate a new email for each request instead of using the one stored in conversation
+ chars = string.ascii_lowercase + string.digits
+ random_team = ''.join(random.choice(chars) for _ in range(8))
+ request_email = f"{random_team}@blackbox.ai"
- # Log which session type is being used
- demo_session = cls.generate_session()
- is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
-
- if is_demo:
- debug.log("Blackbox: Using generated demo session")
- # For demo account, set default values without checking subscription
- subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
- # Check if the requested model is in demo_models
- is_premium = model in cls.demo_models
- if not is_premium:
- debug.log(f"Blackbox: Model {model} not available in demo account, falling back to default model")
- model = cls.default_model
- is_premium = True
- else:
- debug.log(f"Blackbox: Using session from HAR file (email: {session_data['user'].get('email', 'unknown')})")
- # Only check subscription for non-demo accounts
- subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
- if session_data.get('user', {}).get('email'):
- subscription_status = await cls.check_subscription(session_data['user']['email'])
- debug.log(f"Blackbox: Subscription status for {session_data['user']['email']}: {subscription_status['status']}")
-
- # Determine if user has premium access based on subscription status
- if subscription_status['status'] == "PREMIUM":
- is_premium = True
- else:
- # For free accounts, check if the requested model is in fallback_models
- is_premium = model in cls.fallback_models
- if not is_premium:
- debug.log(f"Blackbox: Model {model} not available in free account, falling back to default model")
- model = cls.default_model
- is_premium = True
+ # Generate a session with the new email
+ session_data = cls.generate_session(request_email)
+ debug.log(f"Blackbox: Using generated session with email {request_email}")
data = {
"messages": current_messages,
@@ -651,26 +475,28 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"mobileClient": False,
"userSelectedModel": model if model in cls.userSelectedModel else None,
"validated": conversation.validated_value,
- "imageGenerationMode": model == cls.default_image_model,
+ "imageGenerationMode": False,
"webSearchModePrompt": False,
"deepSearchMode": False,
+ "designerMode": False,
"domains": None,
"vscodeClient": False,
"codeInterpreterMode": False,
"customProfile": {
+ "additionalInfo": "",
+ "enableNewChats": False,
"name": "",
"occupation": "",
- "traits": [],
- "additionalInfo": "",
- "enableNewChats": False
+ "traits": []
},
"session": session_data,
- "isPremium": is_premium,
+ "isPremium": True,
"subscriptionCache": {
- "status": subscription_status['status'],
- "customerId": subscription_status['customerId'],
- "isTrialSubscription": subscription_status['isTrialSubscription'],
- "lastChecked": int(datetime.now().timestamp() * 1000)
+ "expiryTimestamp": None,
+ "isTrialSubscription": False,
+ "lastChecked": int(datetime.now().timestamp() * 1000),
+ "status": "FREE",
+ "customerId": None
},
"beastMode": False,
"reasoningMode": False,
@@ -689,24 +515,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if "You have reached your request limit for the hour" in chunk_text:
raise RateLimitError(chunk_text)
full_response.append(chunk_text)
- # Only yield chunks for non-image models
- if model != cls.default_image_model:
- yield chunk_text
+ yield chunk_text
full_response_text = ''.join(full_response)
- # For image models, check for image markdown
- if model == cls.default_image_model:
- image_url_match = re.search(r'!\[.*?\]\((.*?)\)', full_response_text)
- if image_url_match:
- image_url = image_url_match.group(1)
- yield ImageResponse(urls=[image_url], alt=format_image_prompt(messages, prompt))
- return
-
- # Handle conversation history once, in one place
+ # Handle conversation history
if return_conversation:
conversation.message_history.append({"role": "assistant", "content": full_response_text})
yield conversation
- # For image models that didn't produce an image, fall back to text response
- elif model == cls.default_image_model:
- yield full_response_text
diff --git a/g4f/Provider/Chatai.py b/g4f/Provider/Chatai.py
index 3c15745a..e53dbe56 100644
--- a/g4f/Provider/Chatai.py
+++ b/g4f/Provider/Chatai.py
@@ -35,9 +35,8 @@ class Chatai(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
default_model = 'gpt-4o-mini-2024-07-18'
- models = ['gpt-4o-mini-2024-07-18'] #
-
model_aliases = {"gpt-4o-mini":default_model}
+ models = list(model_aliases.keys())
# --- ProviderModelMixin Methods ---
@classmethod
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index b4cd89d1..6f286c8b 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -1,316 +1,77 @@
from __future__ import annotations
-import time
-from aiohttp import ClientSession, ClientTimeout
import json
-import asyncio
-import random
import base64
-import hashlib
-from yarl import URL
+import time
+import random
+import asyncio
+from datetime import datetime
+from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages, Cookies
-from ..requests.raise_for_status import raise_for_status
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_last_user_message
from ..providers.response import FinishReason, JsonConversation
-from ..errors import ModelNotSupportedError, ResponseStatusError, RateLimitError, TimeoutError, ConversationLimitError
-try:
- from bs4 import BeautifulSoup
- has_bs4 = True
-except ImportError:
- has_bs4 = False
-
-
-class DuckDuckGoSearchException(Exception):
- """Base exception class for duckduckgo_search."""
-
-class DuckDuckGoChallengeError(ResponseStatusError):
- """Raised when DuckDuckGo presents a challenge that needs to be solved."""
class Conversation(JsonConversation):
- vqd: str = None
- vqd_hash_1: str = None
+ """Conversation class for DDG provider.
+
+ Note: DDG doesn't actually support conversation history through its API,
+ so we simulate it by including the history in the user message.
+ """
message_history: Messages = []
- cookies: dict = {}
- fe_version: str = None
-
+
def __init__(self, model: str):
self.model = model
+ self.message_history = []
+
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
label = "DuckDuckGo AI Chat"
- url = "https://duckduckgo.com/aichat"
+ url = "https://duckduckgo.com"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
status_url = "https://duckduckgo.com/duckchat/v1/status"
-
working = True
+ needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini"
-
- # Model mapping from user-friendly names to API model names
- _chat_models = {
+ model_aliases = {
"gpt-4": default_model,
- "gpt-4o-mini": default_model,
+ "gpt-4o": default_model,
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"claude-3-haiku": "claude-3-haiku-20240307",
- "o3-mini": "o3-mini",
- "mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
+ "mistral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
}
-
- # Available models (user-friendly names)
- models = list(_chat_models.keys())
-
- last_request_time = 0
- max_retries = 3
- base_delay = 2
-
- # Class variable to store the x-fe-version across instances
- _chat_xfe = ""
-
- @staticmethod
- def sha256_base64(text: str) -> str:
- """Return the base64 encoding of the SHA256 digest of the text."""
- sha256_hash = hashlib.sha256(text.encode("utf-8")).digest()
- return base64.b64encode(sha256_hash).decode()
+ models = [default_model, "o3-mini"] + list(model_aliases.keys())
@staticmethod
- def parse_dom_fingerprint(js_text: str) -> str:
- if not has_bs4:
- # Fallback if BeautifulSoup is not available
- return "1000"
+ def generate_fe_signals():
+ """Generate a fake x-fe-signals header value"""
+ current_time = int(time.time() * 1000)
- try:
- html_snippet = js_text.split("e.innerHTML = '")[1].split("';")[0]
- offset_value = js_text.split("return String(")[1].split(" ")[0]
- soup = BeautifulSoup(html_snippet, "html.parser")
- corrected_inner_html = soup.body.decode_contents()
- inner_html_length = len(corrected_inner_html)
- fingerprint = int(offset_value) + inner_html_length
- return str(fingerprint)
- except Exception:
- # Return a fallback value if parsing fails
- return "1000"
+ signals_data = {
+ "start": current_time - 35000,
+ "events": [
+ {"name": "onboarding_impression_1", "delta": 383},
+ {"name": "onboarding_impression_2", "delta": 6004},
+ {"name": "onboarding_finish", "delta": 9690},
+ {"name": "startNewChat", "delta": 10082},
+ {"name": "initSwitchModel", "delta": 16586}
+ ],
+ "end": 35163
+ }
+
+ signals_json = json.dumps(signals_data)
+ return base64.b64encode(signals_json.encode()).decode()
@staticmethod
- def parse_server_hashes(js_text: str) -> list:
- try:
- return js_text.split('server_hashes: ["', maxsplit=1)[1].split('"]', maxsplit=1)[0].split('","')
- except Exception:
- # Return a fallback value if parsing fails
- return ["1", "2"]
-
- @classmethod
- def build_x_vqd_hash_1(cls, vqd_hash_1: str, headers: dict) -> str:
- """Build the x-vqd-hash-1 header value."""
- try:
- # If we received a valid base64 string, try to decode it
- if vqd_hash_1 and len(vqd_hash_1) > 20:
- try:
- # Try to decode and parse as JSON first
- decoded_json = json.loads(base64.b64decode(vqd_hash_1).decode())
- # If it's already a complete structure with meta, return it as is
- if isinstance(decoded_json, dict) and "meta" in decoded_json:
- return vqd_hash_1
-
- # Otherwise, extract what we can from it
- if isinstance(decoded_json, dict) and "server_hashes" in decoded_json:
- server_hashes = decoded_json.get("server_hashes", ["1", "2"])
- else:
- # Fall back to parsing from string
- decoded = base64.b64decode(vqd_hash_1).decode()
- server_hashes = cls.parse_server_hashes(decoded)
- except (json.JSONDecodeError, UnicodeDecodeError):
- # If it's not valid JSON, try to parse it as a string
- decoded = base64.b64decode(vqd_hash_1).decode()
- server_hashes = cls.parse_server_hashes(decoded)
- else:
- # Default server hashes if we can't extract them
- server_hashes = ["1", "2"]
-
- # Generate fingerprints
- dom_fingerprint = "1000" # Default value
- ua_fingerprint = headers.get("User-Agent", "") + headers.get("sec-ch-ua", "")
- ua_hash = cls.sha256_base64(ua_fingerprint)
- dom_hash = cls.sha256_base64(dom_fingerprint)
-
- # Create a challenge ID (random hex string)
- challenge_id = ''.join(random.choice('0123456789abcdef') for _ in range(40)) + 'h8jbt'
-
- # Build the complete structure including meta
- final_result = {
- "server_hashes": server_hashes,
- "client_hashes": [ua_hash, dom_hash],
- "signals": {},
- "meta": {
- "v": "1",
- "challenge_id": challenge_id,
- "origin": "https://duckduckgo.com",
- "stack": "Error\nat ke (https://duckduckgo.com/dist/wpm.chat.js:1:29526)\nat async dispatchServiceInitialVQD (https://duckduckgo.com/dist/wpm.chat.js:1:45076)"
- }
- }
-
- base64_final_result = base64.b64encode(json.dumps(final_result).encode()).decode()
- return base64_final_result
- except Exception as e:
- # If anything fails, return an empty string
- return ""
-
- @classmethod
- def validate_model(cls, model: str) -> str:
- """Validates and returns the correct model name for the API"""
- if not model:
- return cls.default_model
-
- # Check aliases first
- if model in cls.model_aliases:
- model = cls.model_aliases[model]
-
- # Check if it's a valid model name
- if model not in cls.models:
- raise ModelNotSupportedError(f"Model {model} not supported. Available models: {cls.models}")
-
- return model
-
- @classmethod
- async def sleep(cls, multiplier=1.0):
- """Implements rate limiting between requests"""
- now = time.time()
- if cls.last_request_time > 0:
- delay = max(0.0, 1.5 - (now - cls.last_request_time)) * multiplier
- if delay > 0:
- await asyncio.sleep(delay)
- cls.last_request_time = time.time()
-
- @classmethod
- async def get_default_cookies(cls, session: ClientSession) -> dict:
- """Obtains default cookies needed for API requests"""
- try:
- await cls.sleep()
- # Make initial request to get cookies
- async with session.get(cls.url) as response:
- # Set the required cookies
- cookies = {}
- cookies_dict = {'dcs': '1', 'dcm': '3'}
-
- # Add any cookies from the response
- for cookie in response.cookies.values():
- cookies[cookie.key] = cookie.value
-
- # Ensure our required cookies are set
- for name, value in cookies_dict.items():
- cookies[name] = value
- url_obj = URL(cls.url)
- session.cookie_jar.update_cookies({name: value}, url_obj)
-
- # Make a second request to the status endpoint to get any additional cookies
- headers = {
- "accept": "text/event-stream",
- "accept-language": "en",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
- "origin": "https://duckduckgo.com",
- "referer": "https://duckduckgo.com/",
- }
-
- await cls.sleep()
- async with session.get(cls.status_url, headers=headers) as status_response:
- # Add any cookies from the status response
- for cookie in status_response.cookies.values():
- cookies[cookie.key] = cookie.value
- url_obj = URL(cls.url)
- session.cookie_jar.update_cookies({cookie.key: cookie.value}, url_obj)
-
- return cookies
- except Exception as e:
- # Return at least the required cookies on error
- cookies = {'dcs': '1', 'dcm': '3'}
- url_obj = URL(cls.url)
- for name, value in cookies.items():
- session.cookie_jar.update_cookies({name: value}, url_obj)
- return cookies
-
- @classmethod
- async def fetch_fe_version(cls, session: ClientSession) -> str:
- """Fetches the fe-version from the initial page load."""
- if cls._chat_xfe:
- return cls._chat_xfe
-
- try:
- url = "https://duckduckgo.com/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1"
- await cls.sleep()
- async with session.get(url) as response:
- await raise_for_status(response)
- content = await response.text()
-
- # Extract x-fe-version components
- try:
- # Try to extract the version components
- xfe1 = content.split('__DDG_BE_VERSION__="', 1)[1].split('"', 1)[0]
- xfe2 = content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
-
- # Format it like "serp_YYYYMMDD_HHMMSS_ET-hash"
- from datetime import datetime
- current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
- cls._chat_xfe = f"serp_{current_date}_ET-{xfe2}"
-
- return cls._chat_xfe
- except Exception:
- # Fallback to a default format if extraction fails
- from datetime import datetime
- current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
- cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
- return cls._chat_xfe
- except Exception:
- # Fallback to a default format if request fails
- from datetime import datetime
- current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
- cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
- return cls._chat_xfe
-
- @classmethod
- async def fetch_vqd_and_hash(cls, session: ClientSession, retry_count: int = 0) -> tuple[str, str]:
- """Fetches the required VQD token and hash for the chat session with retries."""
- headers = {
- "accept": "text/event-stream",
- "accept-language": "en",
- "cache-control": "no-cache",
- "pragma": "no-cache",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
- "origin": "https://duckduckgo.com",
- "referer": "https://duckduckgo.com/",
- "x-vqd-accept": "1",
- }
-
- # Make sure we have cookies first
- if len(session.cookie_jar) == 0:
- await cls.get_default_cookies(session)
-
- try:
- await cls.sleep(multiplier=1.0 + retry_count * 0.5)
- async with session.get(cls.status_url, headers=headers) as response:
- await raise_for_status(response)
-
- vqd = response.headers.get("x-vqd-4", "")
- vqd_hash_1 = response.headers.get("x-vqd-hash-1", "")
-
- if vqd:
- # Return the fetched vqd and vqd_hash_1
- return vqd, vqd_hash_1
-
- response_text = await response.text()
- raise RuntimeError(f"Failed to fetch VQD token and hash: {response.status} {response_text}")
-
- except Exception as e:
- if retry_count < cls.max_retries:
- wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
- await asyncio.sleep(wait_time)
- return await cls.fetch_vqd_and_hash(session, retry_count + 1)
- else:
- raise RuntimeError(f"Failed to fetch VQD token and hash after {cls.max_retries} attempts: {str(e)}")
+ def generate_fe_version():
+ """Generate a fake x-fe-version header value"""
+ return "serp_20250510_052906_ET-ed4f51dc2e106020bc4b"
@classmethod
async def create_async_generator(
@@ -318,229 +79,221 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- timeout: int = 60,
- cookies: Cookies = None,
conversation: Conversation = None,
return_conversation: bool = True,
+ retry_count: int = 0,
**kwargs
) -> AsyncResult:
- model = cls.validate_model(model)
- retry_count = 0
-
- while retry_count <= cls.max_retries:
+ model = cls.get_model(model)
+
+ # Initialize conversation if not provided
+ if conversation is None:
+ conversation = Conversation(model)
+ # Initialize message history from the provided messages
+ conversation.message_history = messages.copy()
+ else:
+ # Update message history with the last user message
+ last_message = None
+ for msg in reversed(messages):
+ if msg["role"] == "user":
+ last_message = msg
+ break
+
+ if last_message and last_message not in conversation.message_history:
+ conversation.message_history.append(last_message)
+
+ # Base headers for all requests
+ base_headers = {
+ "accept-language": "en-US,en;q=0.9",
+ "dnt": "1",
+ "origin": "https://duckduckgo.com",
+ "referer": "https://duckduckgo.com/",
+ "sec-ch-ua": '"Chromium";v="135", "Not-A.Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
+ }
+
+ cookies = {'dcs': '1', 'dcm': '3'}
+
+ # Format the conversation history as a single prompt using format_prompt
+ if len(conversation.message_history) > 1:
+ # If we have conversation history, format it as a single prompt
+ formatted_prompt = format_prompt(conversation.message_history)
+ else:
+ # If we don't have conversation history, just use the last user message
+ formatted_prompt = get_last_user_message(messages)
+
+ # Prepare the request data
+ data = {
+ "model": model,
+ "messages": [{"role": "user", "content": formatted_prompt}],
+ "canUseTools": False
+ }
+
+ # Create a new session for each request
+ async with ClientSession(cookies=cookies) as session:
+ # Step 1: Visit the main page to get initial cookies
+ main_headers = base_headers.copy()
+ main_headers.update({
+ "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+ "priority": "u=0, i",
+ "upgrade-insecure-requests": "1",
+ })
+
try:
- session_timeout = ClientTimeout(total=timeout)
- async with ClientSession(timeout=session_timeout, cookies=cookies) as session:
- # Step 1: Ensure we have the fe_version
- if not cls._chat_xfe:
- cls._chat_xfe = await cls.fetch_fe_version(session)
+ async with session.get(f"{cls.url}/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1",
+ headers=main_headers,
+ proxy=proxy) as main_response:
+ main_response.raise_for_status()
- # Step 2: Initialize or update conversation
- if conversation is None:
- # Get initial cookies if not provided
- if not cookies:
- await cls.get_default_cookies(session)
+ # Extract fe_version from the page
+ page_content = await main_response.text()
+ fe_version = cls.generate_fe_version()
+ try:
+ xfe1 = page_content.split('__DDG_BE_VERSION__="', 1)[1].split('"', 1)[0]
+ xfe2 = page_content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
+ fe_version = f"serp_20250510_052906_ET-{xfe2}"
+ except Exception:
+ pass
+
+ # Step 2: Get the VQD token from the status endpoint
+ status_headers = base_headers.copy()
+ status_headers.update({
+ "accept": "*/*",
+ "cache-control": "no-store",
+ "priority": "u=1, i",
+ "x-vqd-accept": "1",
+ })
+
+ async with session.get(cls.status_url,
+ headers=status_headers,
+ proxy=proxy) as status_response:
+ status_response.raise_for_status()
+
+ # Get VQD token from headers
+ vqd = status_response.headers.get("x-vqd-4", "")
+
+ if not vqd:
+ # If we couldn't get a VQD token, try to generate one
+ vqd = f"4-{random.randint(10**29, 10**30 - 1)}"
+
+ # Step 3: Send the chat request
+ chat_headers = base_headers.copy()
+ chat_headers.update({
+ "accept": "text/event-stream",
+ "content-type": "application/json",
+ "priority": "u=1, i",
+ "x-fe-signals": cls.generate_fe_signals(),
+ "x-fe-version": fe_version,
+ "x-vqd-4": vqd,
+ })
+
+ async with session.post(cls.api_endpoint,
+ json=data,
+ headers=chat_headers,
+ proxy=proxy) as response:
+ if response.status != 200:
+ error_text = await response.text()
- # Create a new conversation
- conversation = Conversation(model)
- conversation.fe_version = cls._chat_xfe
+ # If we get an ERR_INVALID_VQD error and haven't retried too many times, try again
+ if "ERR_INVALID_VQD" in error_text and retry_count < 3:
+ # Wait a bit before retrying
+ await asyncio.sleep(1)
+ # Try again with a new session
+ async for chunk in cls.create_async_generator(
+ model=model,
+ messages=messages,
+ proxy=proxy,
+ conversation=conversation,
+ return_conversation=return_conversation,
+ retry_count=retry_count + 1,
+ **kwargs
+ ):
+ yield chunk
+ return
- # Step 3: Get VQD tokens
- vqd, vqd_hash_1 = await cls.fetch_vqd_and_hash(session)
- conversation.vqd = vqd
- conversation.vqd_hash_1 = vqd_hash_1
- conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
- else:
- # Update existing conversation with new message
- last_message = get_last_user_message(messages.copy())
- conversation.message_history.append({"role": "user", "content": last_message})
+ yield f"Error: HTTP {response.status} - {error_text}"
+ return
- # Step 4: Prepare headers with proper x-vqd-hash-1
- headers = {
- "accept": "text/event-stream",
- "accept-language": "en",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
- "origin": "https://duckduckgo.com",
- "referer": "https://duckduckgo.com/",
- "pragma": "no-cache",
- "priority": "u=1, i",
- "sec-ch-ua": '"Not:A-Brand";v="24", "Chromium";v="134"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "x-fe-version": conversation.fe_version or cls._chat_xfe,
- "x-vqd-4": conversation.vqd,
- }
+ full_message = ""
- # For the first request, send an empty x-vqd-hash-1 header
- # This matches the behavior in the duckduckgo_search module
- headers["x-vqd-hash-1"] = ""
-
- # Step 5: Prepare the request data
- # Convert the user-friendly model name to the API model name
- api_model = cls._chat_models.get(model, model)
-
- data = {
- "model": api_model,
- "messages": conversation.message_history,
- }
-
- # Step 6: Send the request
- await cls.sleep(multiplier=1.0 + retry_count * 0.5)
- async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
- # Handle 429 and 418 errors specifically
- if response.status == 429:
- response_text = await response.text()
+ async for line in response.content:
+ line_text = line.decode("utf-8").strip()
+
+ if line_text.startswith("data:"):
+ data_content = line_text[5:].strip()
- if retry_count < cls.max_retries:
- retry_count += 1
- wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
- await asyncio.sleep(wait_time)
+ # Handle [DONE] marker
+ if data_content == "[DONE]":
+ # Add the assistant's response to the conversation history
+ if full_message:
+ conversation.message_history.append({
+ "role": "assistant",
+ "content": full_message
+ })
- # Get fresh tokens and cookies
- cookies = await cls.get_default_cookies(session)
- continue
- else:
- raise RateLimitError(f"Rate limited after {cls.max_retries} retries")
- elif response.status == 418:
- # Check if it's a challenge error
+ # Return the conversation if requested
+ if return_conversation:
+ yield conversation
+
+ yield FinishReason("stop")
+ break
+
try:
- response_text = await response.text()
- try:
- response_json = json.loads(response_text)
+ message_data = json.loads(data_content)
+
+ # Handle error responses
+ if message_data.get("action") == "error":
+ error_type = message_data.get("type", "Unknown error")
- # Extract challenge data if available
- challenge_data = None
- if response_json.get("type") == "ERR_CHALLENGE" and "cd" in response_json:
- challenge_data = response_json["cd"]
+ # If we get an ERR_INVALID_VQD error and haven't retried too many times, try again
+ if error_type == "ERR_INVALID_VQD" and retry_count < 3:
+ # Wait a bit before retrying
+ await asyncio.sleep(1)
+ # Try again with a new session
+ async for chunk in cls.create_async_generator(
+ model=model,
+ messages=messages,
+ proxy=proxy,
+ conversation=conversation,
+ return_conversation=return_conversation,
+ retry_count=retry_count + 1,
+ **kwargs
+ ):
+ yield chunk
+ return
- if retry_count < cls.max_retries:
- retry_count += 1
- wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
- await asyncio.sleep(wait_time)
-
- # Reset tokens and try again with fresh session
- conversation = None
- cls._chat_xfe = ""
-
- # Get fresh cookies
- cookies = await cls.get_default_cookies(session)
-
- # If we have challenge data, try to use it
- if challenge_data and isinstance(challenge_data, dict):
- # Extract any useful information from challenge data
- # This could be used to build a better response in the future
- pass
-
- continue
- else:
- raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
- except json.JSONDecodeError:
- # If we can't parse the JSON, assume it's a challenge error anyway
- if retry_count < cls.max_retries:
- retry_count += 1
- wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
- await asyncio.sleep(wait_time)
-
- # Reset tokens and try again with fresh session
- conversation = None
- cls._chat_xfe = ""
- cookies = await cls.get_default_cookies(session)
- continue
- else:
- raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
- except Exception as e:
- # If any other error occurs during handling, still try to recover
- if retry_count < cls.max_retries:
- retry_count += 1
- wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
- await asyncio.sleep(wait_time)
-
- # Reset tokens and try again with fresh session
- conversation = None
- cls._chat_xfe = ""
- cookies = await cls.get_default_cookies(session)
- continue
- else:
- raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries: {str(e)}")
-
- # For other status codes, use the standard error handler
- await raise_for_status(response)
- reason = None
- full_message = ""
-
- # Step 7: Process the streaming response
- async for line in response.content:
- line = line.decode("utf-8").strip()
-
- if line.startswith("data:"):
- try:
- message = json.loads(line[5:].strip())
- except json.JSONDecodeError:
- continue
-
- if "action" in message and message["action"] == "error":
- error_type = message.get("type", "")
- if message.get("status") == 429:
- if error_type == "ERR_CONVERSATION_LIMIT":
- raise ConversationLimitError(error_type)
- raise RateLimitError(error_type)
- elif message.get("status") == 418 and error_type == "ERR_CHALLENGE":
- # Handle challenge error by refreshing tokens and retrying
- if retry_count < cls.max_retries:
- # Don't raise here, let the outer exception handler retry
- raise DuckDuckGoChallengeError(f"Challenge detected: {error_type}")
- raise DuckDuckGoSearchException(error_type)
-
- if "message" in message:
- if message["message"]:
- yield message["message"]
- full_message += message["message"]
- reason = "length"
- else:
- reason = "stop"
-
- # Step 8: Update conversation with response information
- # Always update the VQD tokens from the response headers
- conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd)
- conversation.vqd_hash_1 = response.headers.get("x-vqd-hash-1", conversation.vqd_hash_1)
-
- # Update cookies
- conversation.cookies = {
- n: c.value
- for n, c in session.cookie_jar.filter_cookies(URL(cls.url)).items()
- }
-
- # If requested, return the updated conversation
- if return_conversation:
- conversation.message_history.append({"role": "assistant", "content": full_message})
- yield conversation
-
- if reason is not None:
- yield FinishReason(reason)
-
- # If we got here, the request was successful
- break
-
- except (RateLimitError, ResponseStatusError, DuckDuckGoChallengeError) as e:
- if ("429" in str(e) or isinstance(e, DuckDuckGoChallengeError)) and retry_count < cls.max_retries:
- retry_count += 1
- wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
- await asyncio.sleep(wait_time)
-
- # For challenge errors, refresh tokens and cookies
- if isinstance(e, DuckDuckGoChallengeError):
- # Reset conversation to force new token acquisition
- conversation = None
- # Clear class cache to force refresh
- cls._chat_xfe = ""
- else:
- raise
- except asyncio.TimeoutError as e:
- raise TimeoutError(f"Request timed out: {str(e)}")
+ yield f"Error: {error_type}"
+ break
+
+ # Extract message content
+ if "message" in message_data:
+ message_content = message_data.get("message", "")
+ if message_content:
+ yield message_content
+ full_message += message_content
+
+ except json.JSONDecodeError:
+ continue
except Exception as e:
- raise
+ # If we get an exception and haven't retried too many times, try again
+ if retry_count < 3:
+ # Wait a bit before retrying
+ await asyncio.sleep(1)
+ # Try again with a new session
+ async for chunk in cls.create_async_generator(
+ model=model,
+ messages=messages,
+ proxy=proxy,
+ conversation=conversation,
+ return_conversation=return_conversation,
+ retry_count=retry_count + 1,
+ **kwargs
+ ):
+ yield chunk
+ else:
+ yield f"Error: {str(e)}"
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index 05f55c59..1f6caf69 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -8,12 +8,24 @@ class DeepInfraChat(OpenaiTemplate):
working = True
default_model = 'deepseek-ai/DeepSeek-V3'
- default_vision_model = 'openbmb/MiniCPM-Llama3-V-2_5'
+ default_vision_model = 'microsoft/Phi-4-multimodal-instruct'
vision_models = [default_vision_model, 'meta-llama/Llama-3.2-90B-Vision-Instruct']
models = [
+ 'deepseek-ai/DeepSeek-Prover-V2-671B',
+ 'Qwen/Qwen3-235B-A22B',
+ 'Qwen/Qwen3-30B-A3B',
+ 'Qwen/Qwen3-32B',
+ 'Qwen/Qwen3-14B',
+ 'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
+ 'meta-llama/Llama-4-Scout-17B-16E-Instruct',
+ 'microsoft/phi-4-reasoning-plus',
+ 'microsoft/meta-llama/Llama-Guard-4-12B',
+ 'Qwen/QwQ-32B',
+ 'deepseek-ai/DeepSeek-V3-0324',
+ 'google/gemma-3-27b-it',
+ 'google/gemma-3-12b-it',
'meta-llama/Meta-Llama-3.1-8B-Instruct',
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
- 'meta-llama/Llama-3.3-70B-Instruct',
default_model,
'mistralai/Mistral-Small-24B-Instruct-2501',
'deepseek-ai/DeepSeek-R1',
@@ -23,37 +35,48 @@ class DeepInfraChat(OpenaiTemplate):
'microsoft/phi-4',
'microsoft/WizardLM-2-8x22B',
'Qwen/Qwen2.5-72B-Instruct',
- '01-ai/Yi-34B-Chat',
'Qwen/Qwen2-72B-Instruct',
'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
- 'databricks/dbrx-instruct',
'deepinfra/airoboros-70b',
'lizpreciatior/lzlv_70b_fp16_hf',
'microsoft/WizardLM-2-7B',
'mistralai/Mixtral-8x22B-Instruct-v0.1',
] + vision_models
model_aliases = {
+ "deepseek-prover-v2-671b": "deepseek-ai/DeepSeek-Prover-V2-671B",
+ "qwen-3-235b": "Qwen/Qwen3-235B-A22B",
+ "qwen-3-30b": "Qwen/Qwen3-30B-A3B",
+ "qwen-3-32b": "Qwen/Qwen3-32B",
+ "qwen-3-14b": "Qwen/Qwen3-14B",
+ "llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
+ "llama-4-maverick-17b": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
+ "llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
+ "llama-4-scout-17b": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
+ "phi-4-reasoning-plus": "microsoft/phi-4-reasoning-plus",
+ #"": "meta-llama/Llama-Guard-4-12B",
+ "qwq-32b": "Qwen/QwQ-32B",
+ "deepseek-v3": "deepseek-ai/DeepSeek-V3-0324",
+ "deepseek-v3-0324": "deepseek-ai/DeepSeek-V3-0324",
+ "gemma-3-27b": "google/gemma-3-27b-it",
+ "gemma-3-12b": "google/gemma-3-12b-it",
+ "phi-4-multimodal": "microsoft/Phi-4-multimodal-instruct",
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
- "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
"deepseek-v3": default_model,
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
- "deepseek-r1": "deepseek-ai/DeepSeek-R1-Turbo",
+ "deepseek-r1-turbo": "deepseek-ai/DeepSeek-R1-Turbo",
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
- "deepseek-r1-distill-llama": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
- "deepseek-r1-distill-qwen": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
+ "deepseek-r1-distill-llama-70b": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
+ "deepseek-r1-distill-qwen-32b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"phi-4": "microsoft/phi-4",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
- "yi-34b": "01-ai/Yi-34B-Chat",
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
"dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
- "dbrx-instruct": "databricks/dbrx-instruct",
"airoboros-70b": "deepinfra/airoboros-70b",
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
- "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
- "minicpm-2.5": "openbmb/MiniCPM-Llama3-V-2_5",
+ "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1"
}
diff --git a/g4f/Provider/DuckDuckGo.py b/g4f/Provider/DuckDuckGo.py
index e860bf10..9fe1be00 100644
--- a/g4f/Provider/DuckDuckGo.py
+++ b/g4f/Provider/DuckDuckGo.py
@@ -48,4 +48,4 @@ class DuckDuckGo(AbstractProvider, ProviderModelMixin):
if cls.duck_ai is None:
cls.duck_ai = DuckAI(proxy=proxy, timeout=timeout)
model = cls.get_model(model)
- yield cls.duck_ai.chat(get_last_user_message(messages), model, timeout)
\ No newline at end of file
+ yield cls.duck_ai.chat(get_last_user_message(messages), model, timeout)
diff --git a/g4f/Provider/Dynaspark.py b/g4f/Provider/Dynaspark.py
index b8228fbd..42ee4340 100644
--- a/g4f/Provider/Dynaspark.py
+++ b/g4f/Provider/Dynaspark.py
@@ -12,7 +12,7 @@ from .helper import format_prompt
class Dynaspark(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://dynaspark.onrender.com"
login_url = None
- api_endpoint = "https://dynaspark.onrender.com/generate_response"
+ api_endpoint = "https://dynaspark.onrender.com/dsai_fuck_u_spammer"
working = True
needs_auth = False
diff --git a/g4f/Provider/Goabror.py b/g4f/Provider/Goabror.py
deleted file mode 100644
index e8088d3b..00000000
--- a/g4f/Provider/Goabror.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..requests.raise_for_status import raise_for_status
-from .helper import format_prompt, get_system_prompt
-
-class Goabror(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://goabror.uz"
- api_endpoint = "https://goabror.uz/api/gpt.php"
- working = True
-
- default_model = 'gpt-4'
- models = [default_model]
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
- 'accept-language': 'en-US,en;q=0.9',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
- }
- async with ClientSession(headers=headers) as session:
- params = {
- "user": format_prompt(messages, include_system=False),
- "system": get_system_prompt(messages),
- }
- async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
- await raise_for_status(response)
- text_response = await response.text()
- try:
- json_response = json.loads(text_response)
- if "data" in json_response:
- yield json_response["data"]
- else:
- yield text_response
- except json.JSONDecodeError:
- yield text_response
diff --git a/g4f/Provider/Jmuz.py b/g4f/Provider/Jmuz.py
deleted file mode 100644
index 5b4c6632..00000000
--- a/g4f/Provider/Jmuz.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from __future__ import annotations
-
-from ..typing import AsyncResult, Messages
-from .template import OpenaiTemplate
-
-class Jmuz(OpenaiTemplate):
- url = "https://discord.gg/Ew6JzjA2NR"
- api_base = "https://jmuz.me/gpt/api/v2"
- api_key = "prod"
- working = True
- supports_system_message = False
-
- default_model = "gpt-4o"
- model_aliases = {
- "qwq-32b": "qwq-32b-preview",
- "gemini-1.5-flash": "gemini-flash",
- "gemini-1.5-pro": "gemini-pro",
- "gemini-2.0-flash-thinking": "gemini-thinking",
- "deepseek-chat": "deepseek-v3",
- }
-
- @classmethod
- def get_models(cls, **kwargs):
- if not cls.models:
- cls.models = super().get_models(api_key=cls.api_key, api_base=cls.api_base)
- return cls.models
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- stream: bool = True,
- api_key: str = None, # Remove api_key from kwargs
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
- headers = {
- "Authorization": f"Bearer {cls.api_key}",
- "Content-Type": "application/json",
- "accept": "*/*",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
- }
-
- started = False
- buffer = ""
- async for chunk in super().create_async_generator(
- model=model,
- messages=messages,
- api_base=cls.api_base,
- api_key=cls.api_key,
- stream=cls.supports_stream,
- headers=headers,
- **kwargs
- ):
- if isinstance(chunk, str):
- buffer += chunk
- if "Join for free".startswith(buffer) or buffer.startswith("Join for free"):
- if buffer.endswith("\n"):
- buffer = ""
- continue
- if "https://discord.gg/".startswith(buffer) or "https://discord.gg/" in buffer:
- if "..." in buffer:
- buffer = ""
- continue
- if "o1-preview".startswith(buffer) or buffer.startswith("o1-preview"):
- if "\n" in buffer:
- buffer = ""
- continue
- if not started:
- buffer = buffer.lstrip()
- if buffer:
- started = True
- yield buffer
- buffer = ""
- else:
- yield chunk
diff --git a/g4f/Provider/LMArenaProvider.py b/g4f/Provider/LMArenaProvider.py
index b2de322f..36131a76 100644
--- a/g4f/Provider/LMArenaProvider.py
+++ b/g4f/Provider/LMArenaProvider.py
@@ -368,4 +368,4 @@ class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin)
if return_conversation:
yield conversation
if count == max_tokens:
- yield FinishReason("length")
\ No newline at end of file
+ yield FinishReason("length")
diff --git a/g4f/Provider/LambdaChat.py b/g4f/Provider/LambdaChat.py
index 156f2984..733a5e73 100644
--- a/g4f/Provider/LambdaChat.py
+++ b/g4f/Provider/LambdaChat.py
@@ -1,28 +1,190 @@
from __future__ import annotations
-from .hf.HuggingChat import HuggingChat
+import json
+import re
+import uuid
+from aiohttp import ClientSession, FormData
-class LambdaChat(HuggingChat):
+from ..typing import AsyncResult, Messages
+from ..requests import raise_for_status
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt, get_last_user_message
+from ..providers.response import JsonConversation, TitleGeneration, Reasoning, FinishReason
+
+class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "Lambda Chat"
- domain = "lambda.chat"
- url = f"https://{domain}"
+ url = "https://lambda.chat"
+ conversation_url = f"{url}/conversation"
+
working = True
- use_nodriver = False
- needs_auth = False
default_model = "deepseek-llama3.3-70b"
reasoning_model = "deepseek-r1"
- image_models = []
- fallback_models = [
+ models = [
default_model,
reasoning_model,
"hermes-3-llama-3.1-405b-fp8",
+ "hermes3-405b-fp8-128k",
"llama3.1-nemotron-70b-instruct",
"lfm-40b",
- "llama3.3-70b-instruct-fp8"
+ "llama3.3-70b-instruct-fp8",
+ "qwen25-coder-32b-instruct"
]
model_aliases = {
+ "deepseek-v3": default_model,
"hermes-3": "hermes-3-llama-3.1-405b-fp8",
+ "hermes-3-405b": "hermes3-405b-fp8-128k",
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
- "llama-3.3-70b": "llama3.3-70b-instruct-fp8"
- }
\ No newline at end of file
+ "qwen-2.5-coder-32b": "qwen25-coder-32b-instruct"
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls, model: str, messages: Messages,
+ api_key: str = None,
+ proxy: str = None,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ headers = {
+ "Origin": cls.url,
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Referer": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Priority": "u=1, i",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache"
+ }
+
+ # Initialize cookies if not provided
+ if cookies is None:
+ cookies = {
+ "hf-chat": str(uuid.uuid4()) # Generate a session ID
+ }
+
+ async with ClientSession(headers=headers, cookies=cookies) as session:
+ # Step 1: Create a new conversation
+ data = {"model": model}
+ async with session.post(cls.conversation_url, json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ conversation_response = await response.json()
+ conversation_id = conversation_response["conversationId"]
+
+ # Update cookies with any new ones from the response
+ for cookie_name, cookie in response.cookies.items():
+ cookies[cookie_name] = cookie.value
+
+ # Step 2: Get data for this conversation to extract message ID
+ async with session.get(
+ f"{cls.conversation_url}/{conversation_id}/__data.json?x-sveltekit-invalidated=11",
+ proxy=proxy
+ ) as response:
+ await raise_for_status(response)
+ response_text = await response.text()
+
+ # Update cookies again
+ for cookie_name, cookie in response.cookies.items():
+ cookies[cookie_name] = cookie.value
+
+ # Parse the JSON response to find the message ID
+ message_id = None
+ try:
+ # Try to parse each line as JSON
+ for line in response_text.splitlines():
+ if not line.strip():
+ continue
+
+ try:
+ data_json = json.loads(line)
+ if "type" in data_json and data_json["type"] == "data" and "nodes" in data_json:
+ for node in data_json["nodes"]:
+ if "type" in node and node["type"] == "data" and "data" in node:
+ # Look for system message ID
+ for item in node["data"]:
+ if isinstance(item, dict) and "id" in item and "from" in item and item.get("from") == "system":
+ message_id = item["id"]
+ break
+
+ # If we found the ID, break out of the loop
+ if message_id:
+ break
+ except json.JSONDecodeError:
+ continue
+
+ # If we still don't have a message ID, try to find any UUID in the response
+ if not message_id:
+ uuid_pattern = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
+ uuids = re.findall(uuid_pattern, response_text)
+ if uuids:
+ message_id = uuids[0]
+
+ if not message_id:
+ raise ValueError("Could not find message ID in response")
+
+ except (IndexError, KeyError, ValueError) as e:
+ raise RuntimeError(f"Failed to parse conversation data: {str(e)}")
+
+ # Step 3: Send the user message
+ user_message = get_last_user_message(messages)
+
+ # Prepare form data exactly as in the curl example
+ form_data = FormData()
+ form_data.add_field(
+ "data",
+ json.dumps({
+ "inputs": user_message,
+ "id": message_id,
+ "is_retry": False,
+ "is_continue": False,
+ "web_search": False,
+ "tools": []
+ }),
+ content_type="application/json"
+ )
+
+ async with session.post(
+ f"{cls.conversation_url}/{conversation_id}",
+ data=form_data,
+ proxy=proxy
+ ) as response:
+ await raise_for_status(response)
+
+ async for chunk in response.content:
+ if not chunk:
+ continue
+
+ chunk_str = chunk.decode('utf-8', errors='ignore')
+
+ try:
+ data = json.loads(chunk_str)
+ except json.JSONDecodeError:
+ continue
+
+ # Handling different types of responses
+ if data.get("type") == "stream" and "token" in data:
+ # Remove null characters from the token
+ token = data["token"].replace("\u0000", "")
+ if token:
+ yield token
+ elif data.get("type") == "title":
+ yield TitleGeneration(data.get("title", ""))
+ elif data.get("type") == "reasoning":
+ subtype = data.get("subtype")
+ token = data.get("token", "").replace("\u0000", "")
+ status = data.get("status", "")
+
+ if subtype == "stream" and token:
+ yield Reasoning(token=token)
+ elif subtype == "status" and status:
+ yield Reasoning(status=status)
+ elif data.get("type") == "finalAnswer":
+ yield FinishReason("stop")
+ break
+ elif data.get("type") == "status" and data.get("status") == "keepAlive":
+ # Just a keepalive, ignore
+ continue
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 3651b290..60c05502 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -8,189 +8,271 @@ from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..requests import raise_for_status
+from ..errors import RateLimitError
models = {
"claude-3-5-sonnet-20241022": {
"id": "claude-3-5-sonnet-20241022",
- "name": "Claude-3.5-Sonnet-V2",
- "model": "Claude",
+ "name": "claude-3-5-sonnet-20241022",
+ "model": "claude-3-5-sonnet-20241022",
"provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 25.366666666666667,
},
"claude-3-5-sonnet-20241022-t": {
"id": "claude-3-5-sonnet-20241022-t",
- "name": "Claude-3.5-Sonnet-V2-T",
- "model": "Claude",
+ "name": "claude-3-5-sonnet-20241022-t",
+ "model": "claude-3-5-sonnet-20241022-t",
"provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 39.820754716981135,
},
"claude-3-7-sonnet-20250219": {
"id": "claude-3-7-sonnet-20250219",
- "name": "Claude-3.7-Sonnet",
- "model": "Claude",
+ "name": "claude-3-7-sonnet-20250219",
+ "model": "claude-3-7-sonnet-20250219",
"provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 47.02970297029703,
},
"claude-3-7-sonnet-20250219-t": {
"id": "claude-3-7-sonnet-20250219-t",
- "name": "Claude-3.7-Sonnet-T",
- "model": "Claude",
+ "name": "claude-3-7-sonnet-20250219-t",
+ "model": "claude-3-7-sonnet-20250219-t",
"provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-7-sonnet-20250219-thinking": {
- "id": "claude-3-7-sonnet-20250219-thinking",
- "name": "Claude-3.7-Sonnet-Thinking",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-opus-20240229": {
- "id": "claude-3-opus-20240229",
- "name": "Claude-3-Opus",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "claude-3-sonnet-20240229": {
- "id": "claude-3-sonnet-20240229",
- "name": "Claude-3-Sonnet",
- "model": "Claude",
- "provider": "Anthropic",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
- },
- "deepseek-r1": {
- "id": "deepseek-r1",
- "name": "DeepSeek-R1",
- "model": "DeepSeek-R1",
- "provider": "DeepSeek",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "128K",
- },
- "deepseek-r1-distill-llama-70b": {
- "id": "deepseek-r1-distill-llama-70b",
- "name": "DeepSeek-R1-70B",
- "model": "DeepSeek-R1-70B",
- "provider": "DeepSeek",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "128K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 39.04289693593315,
},
"deepseek-v3": {
"id": "deepseek-v3",
- "name": "DeepSeek-V3",
- "model": "DeepSeek-V3",
+ "name": "deepseek-v3",
+ "model": "deepseek-v3",
"provider": "DeepSeek",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "128K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 40.484657419083646,
+ },
+ "gemini-1.0-pro-latest-123": {
+ "id": "gemini-1.0-pro-latest-123",
+ "name": "gemini-1.0-pro-latest-123",
+ "model": "gemini-1.0-pro-latest-123",
+ "provider": "Google",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 10,
},
"gemini-2.0-flash": {
"id": "gemini-2.0-flash",
- "name": "Gemini-2.0-Flash",
- "model": "Gemini",
+ "name": "gemini-2.0-flash",
+ "model": "gemini-2.0-flash",
"provider": "Google",
- "maxLength": 4000000,
- "tokenLimit": 1000000,
- "context": "1024K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 216.44162436548223,
+ },
+ "gemini-2.0-flash-exp": {
+ "id": "gemini-2.0-flash-exp",
+ "name": "gemini-2.0-flash-exp",
+ "model": "gemini-2.0-flash-exp",
+ "provider": "Google",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 0,
+ "tps": 0,
},
"gemini-2.0-flash-thinking-exp": {
"id": "gemini-2.0-flash-thinking-exp",
- "name": "Gemini-2.0-Flash-Thinking-Exp",
- "model": "Gemini",
+ "name": "gemini-2.0-flash-thinking-exp",
+ "model": "gemini-2.0-flash-thinking-exp",
"provider": "Google",
- "maxLength": 4000000,
- "tokenLimit": 1000000,
- "context": "1024K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 0,
+ "tps": 0,
},
- "gemini-2.0-pro-exp": {
- "id": "gemini-2.0-pro-exp",
- "name": "Gemini-2.0-Pro-Exp",
- "model": "Gemini",
+ "gemini-2.5-flash-preview-04-17": {
+ "id": "gemini-2.5-flash-preview-04-17",
+ "name": "gemini-2.5-flash-preview-04-17",
+ "model": "gemini-2.5-flash-preview-04-17",
"provider": "Google",
- "maxLength": 4000000,
- "tokenLimit": 1000000,
- "context": "1024K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 189.84010840108402,
},
- "gpt-4o-2024-08-06": {
- "id": "gpt-4o-2024-08-06",
- "name": "GPT-4o",
- "model": "ChatGPT",
+ "gemini-2.5-pro-official": {
+ "id": "gemini-2.5-pro-official",
+ "name": "gemini-2.5-pro-official",
+ "model": "gemini-2.5-pro-official",
+ "provider": "Google",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 91.00613496932516,
+ },
+ "gemini-2.5-pro-preview-03-25": {
+ "id": "gemini-2.5-pro-preview-03-25",
+ "name": "gemini-2.5-pro-preview-03-25",
+ "model": "gemini-2.5-pro-preview-03-25",
+ "provider": "Google",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 99.05660377358491,
+ "tps": 45.050511247443765,
+ },
+ "gemini-2.5-pro-preview-05-06": {
+ "id": "gemini-2.5-pro-preview-05-06",
+ "name": "gemini-2.5-pro-preview-05-06",
+ "model": "gemini-2.5-pro-preview-05-06",
+ "provider": "Google",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 99.29617834394904,
+ },
+ "gpt-4-turbo-2024-04-09": {
+ "id": "gpt-4-turbo-2024-04-09",
+ "name": "gpt-4-turbo-2024-04-09",
+ "model": "gpt-4-turbo-2024-04-09",
"provider": "OpenAI",
- "maxLength": 260000,
- "tokenLimit": 126000,
- "context": "128K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 1,
+ },
+ "gpt-4.1": {
+ "id": "gpt-4.1",
+ "name": "gpt-4.1",
+ "model": "gpt-4.1",
+ "provider": "OpenAI",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 42.857142857142854,
+ "tps": 19.58032786885246,
+ },
+ "gpt-4.1-mini": {
+ "id": "gpt-4.1-mini",
+ "name": "gpt-4.1-mini",
+ "model": "gpt-4.1-mini",
+ "provider": "OpenAI",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 68.75,
+ "tps": 12.677576601671309,
+ },
+ "gpt-4.1-mini-2025-04-14": {
+ "id": "gpt-4.1-mini-2025-04-14",
+ "name": "gpt-4.1-mini-2025-04-14",
+ "model": "gpt-4.1-mini-2025-04-14",
+ "provider": "OpenAI",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 94.23076923076923,
+ "tps": 8.297687861271676,
+ },
+ "gpt-4o-2024-11-20": {
+ "id": "gpt-4o-2024-11-20",
+ "name": "gpt-4o-2024-11-20",
+ "model": "gpt-4o-2024-11-20",
+ "provider": "OpenAI",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 73.3955223880597,
},
"gpt-4o-mini-2024-07-18": {
"id": "gpt-4o-mini-2024-07-18",
- "name": "GPT-4o-Mini",
- "model": "ChatGPT",
+ "name": "gpt-4o-mini-2024-07-18",
+ "model": "gpt-4o-mini-2024-07-18",
"provider": "OpenAI",
- "maxLength": 260000,
- "tokenLimit": 126000,
- "context": "128K",
- },
- "gpt-4o-mini-free": {
- "id": "gpt-4o-mini-free",
- "name": "GPT-4o-Mini-Free",
- "model": "ChatGPT",
- "provider": "OpenAI",
- "maxLength": 31200,
- "tokenLimit": 7800,
- "context": "8K",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 26.874455100261553,
},
"grok-3": {
"id": "grok-3",
- "name": "Grok-3",
- "model": "Grok",
- "provider": "x.ai",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
+ "name": "grok-3",
+ "model": "grok-3",
+ "provider": "xAI",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 51.110652663165794,
},
- "grok-3-r1": {
- "id": "grok-3-r1",
- "name": "Grok-3-Thinking",
- "model": "Grok",
- "provider": "x.ai",
- "maxLength": 800000,
- "tokenLimit": 200000,
- "context": "200K",
+ "grok-3-reason": {
+ "id": "grok-3-reason",
+ "name": "grok-3-reason",
+ "model": "grok-3-reason",
+ "provider": "xAI",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 62.81976744186046,
},
- "o3-mini": {
- "id": "o3-mini",
- "name": "o3-mini",
- "model": "o3",
- "provider": "OpenAI",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "128K",
+ "o3-mini-2025-01-31": {
+ "id": "o3-mini-2025-01-31",
+ "name": "o3-mini-2025-01-31",
+ "model": "o3-mini-2025-01-31",
+ "provider": "Unknown",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 125.31410256410257,
+ },
+ "qwen3-235b-a22b": {
+ "id": "qwen3-235b-a22b",
+ "name": "qwen3-235b-a22b",
+ "model": "qwen3-235b-a22b",
+ "provider": "Alibaba",
+ "maxLength": 0,
+ "tokenLimit": 0,
+ "context": 0,
+ "success_rate": 100,
+ "tps": 25.846153846153847,
},
}
-
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://liaobots.site"
+ url = "https://liaobots.work"
working = True
supports_message_history = True
supports_system_message = True
- default_model = "gpt-4o-2024-08-06"
+ default_model = "grok-3"
models = list(models.keys())
model_aliases = {
# Anthropic
@@ -198,25 +280,33 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
- "claude-3.7-sonnet-thinking": "claude-3-7-sonnet-20250219-thinking",
- "claude-3-opus": "claude-3-opus-20240229",
- "claude-3-sonnet": "claude-3-sonnet-20240229",
# DeepSeek
- "deepseek-r1": "deepseek-r1-distill-llama-70b",
+ #"deepseek-v3": "deepseek-v3",
# Google
+ "gemini-1.0-pro": "gemini-1.0-pro-latest-123",
+ "gemini-2.0-flash": "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
- "gemini-2.0-pro": "gemini-2.0-pro-exp",
+ "gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
+ "gemini-2.5-pro": "gemini-2.5-pro-official",
+ "gemini-2.5-pro": "gemini-2.5-pro-preview-03-25",
+ "gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
# OpenAI
- "gpt-4": default_model,
- "gpt-4o": default_model,
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
+ "gpt-4": "gpt-4o-2024-11-20",
+ "gpt-4o": "gpt-4o-2024-11-20",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
- "gpt-4o-mini": "gpt-4o-mini-free",
+
+ # xAI
+ "grok-3-reason": "grok-3-reason",
+ "o3-mini": "o3-mini-2025-01-31",
+ "qwen-3-235b": "qwen3-235b-a22b",
}
- _auth_code = ""
+ _auth_code = None
_cookie_jar = None
@classmethod
@@ -238,92 +328,213 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
model = cls.get_model(model)
headers = {
- "referer": "https://liaobots.work/",
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
"origin": "https://liaobots.work",
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
+ "priority": "u=1, i",
+ "referer": "https://liaobots.work/en",
+ "sec-ch-ua": "\"Chromium\";v=\"135\", \"Not-A.Brand\";v=\"8\"",
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": "\"Linux\"",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
}
+
async with ClientSession(
headers=headers,
cookie_jar=cls._cookie_jar,
connector=get_connector(connector, proxy, True)
) as session:
+ # First, get a valid auth code
+ await cls.get_auth_code(session)
+
+ # Create conversation ID
+ conversation_id = str(uuid.uuid4())
+
+ # Prepare request data
data = {
- "conversationId": str(uuid.uuid4()),
- "model": models[model],
+ "conversationId": conversation_id,
+ "models": [{
+ "modelId": model,
+ "provider": models[model]["provider"]
+ }],
+ "search": "false",
"messages": messages,
"key": "",
- "prompt": kwargs.get("system_message", "You are a helpful assistant."),
+ "prompt": kwargs.get("system_message", "你是 {{model}},一个由 {{provider}} 训练的大型语言模型,请仔细遵循用户的指示。")
}
- if not cls._auth_code:
- async with session.post(
- "https://liaobots.work/recaptcha/api/login",
- data={"token": "abcdefghijklmnopqrst"},
- verify_ssl=False
- ) as response:
- await raise_for_status(response)
+
+ # Try to make the chat request
try:
+ # Make the chat request with the current auth code
async with session.post(
- "https://liaobots.work/api/user",
- json={"authcode": cls._auth_code},
- verify_ssl=False
- ) as response:
- await raise_for_status(response)
- cls._auth_code = (await response.json(content_type=None))["authCode"]
- if not cls._auth_code:
- raise RuntimeError("Empty auth code")
- cls._cookie_jar = session.cookie_jar
- async with session.post(
- "https://liaobots.work/api/chat",
+ f"{cls.url}/api/chat",
json=data,
headers={"x-auth-code": cls._auth_code},
verify_ssl=False
) as response:
- await raise_for_status(response)
- async for line in response.content:
- if line.startswith(b"data: "):
- yield json.loads(line[6:]).get("content")
- except:
- async with session.post(
- "https://liaobots.work/api/user",
- json={"authcode": "jGDRFOqHcZKAo"},
- verify_ssl=False
- ) as response:
- await raise_for_status(response)
- cls._auth_code = (await response.json(content_type=None))["authCode"]
- if not cls._auth_code:
- raise RuntimeError("Empty auth code")
- cls._cookie_jar = session.cookie_jar
- async with session.post(
- "https://liaobots.work/api/chat",
- json=data,
- headers={"x-auth-code": cls._auth_code},
- verify_ssl=False
- ) as response:
- await raise_for_status(response)
- async for line in response.content:
- if line.startswith(b"data: "):
- yield json.loads(line[6:]).get("content")
+ # Check if we got a streaming response
+ content_type = response.headers.get("Content-Type", "")
+ if "text/event-stream" in content_type:
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ response_data = json.loads(line[6:])
+
+ # Check for error response
+ if response_data.get("error") is True:
+ # Raise RateLimitError for payment required or other errors
+ if "402" in str(response_data.get("res_status", "")):
+ raise RateLimitError("This model requires payment or credits")
+ else:
+ error_msg = response_data.get('message', 'Unknown error')
+ raise RateLimitError(f"Error: {error_msg}")
+
+ # Process normal response
+ if response_data.get("role") == "assistant" and "content" in response_data:
+ content = response_data.get("content")
+ yield content
+ except json.JSONDecodeError:
+ continue
+ else:
+ # Not a streaming response, might be an error or HTML
+ response_text = await response.text()
+
+ # If we got HTML, we need to bypass CAPTCHA
+ if response_text.startswith(""):
+ await cls.bypass_captcha(session)
+
+ # Get a fresh auth code
+ await cls.get_auth_code(session)
+
+ # Try the request again
+ async with session.post(
+ f"{cls.url}/api/chat",
+ json=data,
+ headers={"x-auth-code": cls._auth_code},
+ verify_ssl=False
+ ) as response2:
+ # Check if we got a streaming response
+ content_type = response2.headers.get("Content-Type", "")
+ if "text/event-stream" in content_type:
+ async for line in response2.content:
+ if line.startswith(b"data: "):
+ try:
+ response_data = json.loads(line[6:])
+
+ # Check for error response
+ if response_data.get("error") is True:
+ # Raise RateLimitError for payment required or other errors
+ if "402" in str(response_data.get("res_status", "")):
+ raise RateLimitError("This model requires payment or credits")
+ else:
+ error_msg = response_data.get('message', 'Unknown error')
+ raise RateLimitError(f"Error: {error_msg}")
+
+ # Process normal response
+ if response_data.get("role") == "assistant" and "content" in response_data:
+ content = response_data.get("content")
+ yield content
+ except json.JSONDecodeError:
+ continue
+ else:
+ raise RateLimitError("Failed to get streaming response")
+ else:
+ raise RateLimitError("Failed to connect to the service")
+ except Exception as e:
+ # If it's already a RateLimitError, re-raise it
+ if isinstance(e, RateLimitError):
+ raise
+ # Otherwise, wrap it in a RateLimitError
+ raise RateLimitError(f"Error processing request: {str(e)}")
@classmethod
- async def initialize_auth_code(cls, session: ClientSession) -> None:
+ async def bypass_captcha(cls, session: ClientSession) -> None:
"""
- Initialize the auth code by making the necessary login requests.
+ Bypass the CAPTCHA verification by directly making the recaptcha API request.
"""
- async with session.post(
- "https://liaobots.work/api/user",
- json={"authcode": "pTIQr4FTnVRfr"},
- verify_ssl=False
- ) as response:
- await raise_for_status(response)
- cls._auth_code = (await response.json(content_type=None))["authCode"]
- if not cls._auth_code:
- raise RuntimeError("Empty auth code")
- cls._cookie_jar = session.cookie_jar
+ try:
+ # First, try the direct recaptcha API request
+ async with session.post(
+ f"{cls.url}/recaptcha/api/login",
+ json={"token": "abcdefghijklmnopqrst"},
+ verify_ssl=False
+ ) as response:
+ if response.status == 200:
+ try:
+ response_text = await response.text()
+
+ # Try to parse as JSON
+ try:
+ response_data = json.loads(response_text)
+
+ # Check if we got a successful response
+ if response_data.get("code") == 200:
+ cls._cookie_jar = session.cookie_jar
+ except json.JSONDecodeError:
+ pass
+ except Exception:
+ pass
+ except Exception:
+ pass
@classmethod
- async def ensure_auth_code(cls, session: ClientSession) -> None:
+ async def get_auth_code(cls, session: ClientSession) -> None:
"""
- Ensure the auth code is initialized, and if not, perform the initialization.
+ Get a valid auth code by sending a request with an empty authcode.
"""
- if not cls._auth_code:
- await cls.initialize_auth_code(session)
+ try:
+ # Send request with empty authcode to get a new one
+ auth_request_data = {
+ "authcode": "",
+ "recommendUrl": "https://liaobots.work/zh"
+ }
+
+ async with session.post(
+ f"{cls.url}/api/user",
+ json=auth_request_data,
+ verify_ssl=False
+ ) as response:
+ if response.status == 200:
+ response_text = await response.text()
+
+ try:
+ response_data = json.loads(response_text)
+
+ if "authCode" in response_data:
+ cls._auth_code = response_data["authCode"]
+ cls._cookie_jar = session.cookie_jar
+ return
+ except json.JSONDecodeError:
+ # If we got HTML, it might be the CAPTCHA page
+ if response_text.startswith(""):
+ await cls.bypass_captcha(session)
+
+ # Try again after bypassing CAPTCHA
+ async with session.post(
+ f"{cls.url}/api/user",
+ json=auth_request_data,
+ verify_ssl=False
+ ) as response2:
+ if response2.status == 200:
+ response_text2 = await response2.text()
+
+ try:
+ response_data2 = json.loads(response_text2)
+
+ if "authCode" in response_data2:
+ cls._auth_code = response_data2["authCode"]
+ cls._cookie_jar = session.cookie_jar
+ return
+ except json.JSONDecodeError:
+ pass
+ except Exception:
+ pass
+
+ # If we're here, we couldn't get a valid auth code
+ # Set a default one as a fallback
+ cls._auth_code = "DvS3A5GTE9f0D" # Fallback to one of the provided auth codes
diff --git a/g4f/Provider/OIVSCode.py b/g4f/Provider/OIVSCode.py
deleted file mode 100644
index 6110a339..00000000
--- a/g4f/Provider/OIVSCode.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from __future__ import annotations
-
-from .template import OpenaiTemplate
-
-class OIVSCode(OpenaiTemplate):
- label = "OI VSCode Server"
- url = "https://oi-vscode-server.onrender.com"
- api_base = "https://oi-vscode-server-2.onrender.com/v1"
-
- working = True
- needs_auth = False
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = "gpt-4o-mini-2024-07-18"
- default_vision_model = default_model
- vision_models = [default_model, "gpt-4o-mini"]
- models = vision_models + ["deepseek-ai/DeepSeek-V3"]
-
- model_aliases = {
- "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
- "deepseek-v3": "deepseek-ai/DeepSeek-V3"
- }
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index 08c6e44c..988d172b 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -81,15 +81,31 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
await ws.send_str("3")
continue
try:
- if last_message == 0 and model == cls.default_model:
- yield ""
- data = json.loads(message[2:])[1]
- yield data["output"][last_message:]
- last_message = len(data["output"])
- if data["final"]:
- if data["citations"]:
- yield Sources(data["citations"])
- yield FinishReason("stop")
- break
+ if not message.startswith("42"):
+ continue
+
+ parsed_data = json.loads(message[2:])
+ message_type = parsed_data[0]
+ data = parsed_data[1]
+
+ # Handle error responses
+ if message_type.endswith("_query_progress") and data.get("status") == "failed":
+ error_message = data.get("text", "Unknown API error")
+ raise ResponseError(f"API Error: {error_message}")
+
+ # Handle normal responses
+ if "output" in data:
+ if last_message == 0 and model == cls.default_model:
+ yield ""
+ yield data["output"][last_message:]
+ last_message = len(data["output"])
+ if data["final"]:
+ if data["citations"]:
+ yield Sources(data["citations"])
+ yield FinishReason("stop")
+ break
+ except ResponseError as e:
+ # Re-raise ResponseError directly
+ raise e
except Exception as e:
- raise ResponseError(f"Message: {message}") from e
+ raise ResponseError(f"Error processing message: {message}") from e
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index d7d959e9..2b5b3697 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -51,33 +51,55 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
image_models = [default_image_model]
audio_models = {default_audio_model: []}
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
- vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"]
+ vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "openai-reasoning", "searchgpt"]
_models_loaded = False
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
model_aliases = {
### Text Models ###
"gpt-4o-mini": "openai",
+ "gpt-4.1": "openai",
+ "gpt-4.1-mini": "openai",
+ "gpt-4.1-nano": "openai-fast",
+ "gpt-4.1-nano": "openai-small",
"gpt-4": "openai-large",
"gpt-4o": "openai-large",
- "gpt-4.1": "openai",
- "gpt-4.1-nano": "openai",
- "gpt-4.1-mini": "openai-large",
- "gpt-4.1-xlarge": "openai-xlarge",
+ "gpt-4.1": "openai-large",
+ "gpt-4.1": "openai-xlarge",
"o4-mini": "openai-reasoning",
+ "gpt-4.1-mini": "openai-roblox",
+ "gpt-4.1-mini": "roblox-rp",
+ "command-r-plus-08-2024": "command-r",
+ "gemini-2.5-flash": "gemini",
+ "gemini-2.0-flash-thinking": "gemini-thinking",
"qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama",
"llama-4-scout": "llamascout",
- "mistral-nemo": "mistral",
- "llama-3.1-8b": "llamalight",
- "llama-3.3-70b": "llama-scaleway",
- "phi-4": "phi",
+ "llama-4-scout-17b": "llamascout",
+ "mistral-small-3.1-24b": "mistral",
"deepseek-r1": "deepseek-reasoning-large",
"deepseek-r1-distill-llama-70b": "deepseek-reasoning-large",
+ "deepseek-r1-distill-llama-70b": "deepseek-r1-llama",
+ #"mistral-small-3.1-24b": "unity", # Personas
+ #"mirexa": "mirexa", # Personas
+ #"midijourney": "midijourney", # Personas
+ #"rtist": "rtist", # Personas
+ #"searchgpt": "searchgpt",
+ #"evil": "evil", # Personas
+ "deepseek-r1": "deepseek-reasoning",
"deepseek-r1-distill-qwen-32b": "deepseek-reasoning",
+ "phi-4": "phi",
+ #"pixtral-12b": "pixtral",
+ #"hormoz-8b": "hormoz",
+ "qwq-32b": "qwen-qwq",
+ #"hypnosis-tracy-7b": "hypnosis-tracy", # Personas
+ #"mistral-?": "sur", # Personas
"deepseek-v3": "deepseek",
- "llama-3.2-11b": "llama-vision",
+ "deepseek-v3-0324": "deepseek",
+ #"bidara": "bidara", # Personas
+
+ ### Audio Models ###
"gpt-4o-audio": "openai-audio",
- "gpt-4o-audio-preview": "openai-audio",
+ #"gpt-4o-audio-preview": "openai-audio",
### Image Models ###
"sdxl-turbo": "turbo",
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 5321e465..e029435d 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -35,13 +35,11 @@ except ImportError as e:
debug.error("Audio providers not loaded:", e)
try:
- from .AllenAI import AllenAI
from .ARTA import ARTA
from .Blackbox import Blackbox
from .Chatai import Chatai
from .ChatGLM import ChatGLM
from .ChatGpt import ChatGpt
- from .ChatGptEs import ChatGptEs
from .Cloudflare import Cloudflare
from .Copilot import Copilot
from .DDG import DDG
@@ -53,16 +51,11 @@ except ImportError as e:
try:
from .Free2GPT import Free2GPT
from .FreeGpt import FreeGpt
- from .FreeRouter import FreeRouter
from .GizAI import GizAI
- from .Glider import Glider
- from .Goabror import Goabror
from .ImageLabs import ImageLabs
- from .Jmuz import Jmuz
from .LambdaChat import LambdaChat
from .Liaobots import Liaobots
from .LMArenaProvider import LMArenaProvider
- from .OIVSCode import OIVSCode
except ImportError as e:
debug.error("Providers not loaded (F-L):", e)
try:
diff --git a/g4f/Provider/hf_space/BlackForestLabs_Flux1Schnell.py b/g4f/Provider/hf_space/BlackForestLabs_Flux1Schnell.py
deleted file mode 100644
index 374f5cd6..00000000
--- a/g4f/Provider/hf_space/BlackForestLabs_Flux1Schnell.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-
-from ...typing import AsyncResult, Messages
-from ...providers.response import ImageResponse
-from ...errors import ResponseError
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_image_prompt
-from .raise_for_status import raise_for_status
-
-class BlackForestLabs_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
- label = "BlackForestLabs Flux-1-Schnell"
- url = "https://black-forest-labs-flux-1-schnell.hf.space"
- api_endpoint = "https://black-forest-labs-flux-1-schnell.hf.space/call/infer"
-
- working = True
-
- default_model = "black-forest-labs-flux-1-schnell"
- default_image_model = default_model
- model_aliases = {"flux-schnell": default_image_model, "flux": default_image_model}
- image_models = list(model_aliases.keys())
- models = image_models
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- prompt: str = None,
- width: int = 768,
- height: int = 768,
- num_inference_steps: int = 2,
- seed: int = 0,
- randomize_seed: bool = True,
- **kwargs
- ) -> AsyncResult:
- width = max(32, width - (width % 8))
- height = max(32, height - (height % 8))
- prompt = format_image_prompt(messages, prompt)
- payload = {
- "data": [
- prompt,
- seed,
- randomize_seed,
- width,
- height,
- num_inference_steps
- ]
- }
- async with ClientSession() as session:
- async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
- await raise_for_status(response)
- response_data = await response.json()
- event_id = response_data['event_id']
- while True:
- async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response:
- await raise_for_status(status_response)
- while not status_response.content.at_eof():
- event = await status_response.content.readuntil(b'\n\n')
- if event.startswith(b'event:'):
- event_parts = event.split(b'\ndata: ')
- if len(event_parts) < 2:
- continue
- event_type = event_parts[0].split(b': ')[1]
- data = event_parts[1]
- if event_type == b'error':
- raise ResponseError(f"Error generating image: {data.decode(errors='ignore')}")
- elif event_type == b'complete':
- json_data = json.loads(data)
- image_url = json_data[0]['url']
- yield ImageResponse(images=[image_url], alt=prompt)
- return
diff --git a/g4f/Provider/hf_space/CohereForAI_C4AI_Command.py b/g4f/Provider/hf_space/CohereForAI_C4AI_Command.py
index 465c717d..b7012156 100644
--- a/g4f/Provider/hf_space/CohereForAI_C4AI_Command.py
+++ b/g4f/Provider/hf_space/CohereForAI_C4AI_Command.py
@@ -17,14 +17,15 @@ class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = "command-a-03-2025"
- model_aliases = {
- "command-a": default_model,
- "command-r-plus": "command-r-plus-08-2024",
- "command-r": "command-r-08-2024",
- "command-r": "command-r",
- "command-r7b": "command-r7b-12-2024",
- }
- models = list(model_aliases.keys())
+ models = [
+ default_model,
+ "command-r-plus-08-2024",
+ "command-r-08-2024",
+ "command-r-plus",
+ "command-r",
+ "command-r7b-12-2024",
+ "command-r7b-arabic-02-2025",
+ ]
@classmethod
def get_model(cls, model: str, **kwargs) -> str:
diff --git a/g4f/Provider/hf_space/Microsoft_Phi_4.py b/g4f/Provider/hf_space/Microsoft_Phi_4_Multimodal.py
similarity index 98%
rename from g4f/Provider/hf_space/Microsoft_Phi_4.py
rename to g4f/Provider/hf_space/Microsoft_Phi_4_Multimodal.py
index 6cbe1306..6ea0afb9 100644
--- a/g4f/Provider/hf_space/Microsoft_Phi_4.py
+++ b/g4f/Provider/hf_space/Microsoft_Phi_4_Multimodal.py
@@ -15,7 +15,7 @@ from ... import debug
from .DeepseekAI_JanusPro7b import get_zerogpu_token
from .raise_for_status import raise_for_status
-class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
+class Microsoft_Phi_4_Multimodal(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Phi-4"
space = "microsoft/phi-4-multimodal"
url = f"https://huggingface.co/spaces/{space}"
@@ -29,9 +29,9 @@ class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "phi-4-multimodal"
default_vision_model = default_model
- model_aliases = {"phi-4": default_vision_model}
- vision_models = list(model_aliases.keys())
+ vision_models = [default_vision_model]
models = vision_models
+ model_aliases = {"phi-4": default_vision_model}
@classmethod
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation, media: list = None):
diff --git a/g4f/Provider/hf_space/Qwen_Qwen_3.py b/g4f/Provider/hf_space/Qwen_Qwen_3.py
index 3b8550ec..d95d622c 100644
--- a/g4f/Provider/hf_space/Qwen_Qwen_3.py
+++ b/g4f/Provider/hf_space/Qwen_Qwen_3.py
@@ -31,7 +31,15 @@ class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
"qwen3-1.7b",
"qwen3-0.6b",
}
- model_aliases = {model: model for model in models}
+ model_aliases = {
+ "qwen-3-235b": default_model,
+ "qwen-3-32b": "qwen3-32b",
+ "qwen-3-30b": "qwen3-30b-a3b",
+ "qwen-3-14b": "qwen3-14b",
+ "qwen-3-4b": "qwen3-4b",
+ "qwen-3-1.7b": "qwen3-1.7b",
+ "qwen-3-0.6b": "qwen3-0.6b",
+ }
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/hf_space/Voodoohop_Flux1Schnell.py b/g4f/Provider/hf_space/Voodoohop_Flux1Schnell.py
index 552dff31..800f2a97 100644
--- a/g4f/Provider/hf_space/Voodoohop_Flux1Schnell.py
+++ b/g4f/Provider/hf_space/Voodoohop_Flux1Schnell.py
@@ -19,7 +19,10 @@ class Voodoohop_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "voodoohop-flux-1-schnell"
default_image_model = default_model
- model_aliases = {"flux-schnell": default_model, "flux": default_model}
+ model_aliases = {
+ "flux-schnell": default_image_model,
+ "flux": default_image_model
+ }
image_models = list(model_aliases.keys())
models = image_models
diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py
index d24565f1..6cc7ed1b 100644
--- a/g4f/Provider/hf_space/__init__.py
+++ b/g4f/Provider/hf_space/__init__.py
@@ -7,10 +7,9 @@ from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
-from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
-from .Microsoft_Phi_4 import Microsoft_Phi_4
+from .Microsoft_Phi_4_Multimodal import Microsoft_Phi_4_Multimodal
from .Qwen_QVQ_72B import Qwen_QVQ_72B
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M
@@ -30,10 +29,9 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = Qwen_QVQ_72B.default_model
providers = [
BlackForestLabs_Flux1Dev,
- BlackForestLabs_Flux1Schnell,
CohereForAI_C4AI_Command,
DeepseekAI_JanusPro7b,
- Microsoft_Phi_4,
+ Microsoft_Phi_4_Multimodal,
Qwen_QVQ_72B,
Qwen_Qwen_2_5,
Qwen_Qwen_2_5M,
diff --git a/g4f/Provider/needs_auth/BlackboxPro.py b/g4f/Provider/needs_auth/BlackboxPro.py
new file mode 100644
index 00000000..627fe7d3
--- /dev/null
+++ b/g4f/Provider/needs_auth/BlackboxPro.py
@@ -0,0 +1,1355 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import os
+import re
+import json
+import random
+import string
+from pathlib import Path
+from typing import Optional
+from datetime import datetime, timedelta
+
+from ...typing import AsyncResult, Messages, MediaListType
+from ...requests.raise_for_status import raise_for_status
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..openai.har_file import get_har_files
+from ...image import to_data_uri
+from ...cookies import get_cookies_dir
+from ..helper import format_image_prompt, render_messages
+from ...providers.response import JsonConversation, ImageResponse
+from ...tools.media import merge_media
+from ...errors import RateLimitError, NoValidHarFileError
+from ... import debug
+
+class Conversation(JsonConversation):
+ validated_value: str = None
+ chat_id: str = None
+ message_history: Messages = []
+
+ def __init__(self, model: str):
+ self.model = model
+
+class BlackboxPro(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Blackbox AI Pro"
+ url = "https://www.blackbox.ai"
+ login_url = None
+ api_endpoint = "https://www.blackbox.ai/api/chat"
+
+ working = True
+ needs_auth = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = "blackboxai"
+ default_vision_model = default_model
+ default_image_model = 'flux'
+
+ # OpenRouter Free
+ openrouter_free_models = [
+ "Deepcoder 14B Preview",
+ "DeepHermes 3 Llama 3 8B Preview",
+ "DeepSeek R1 Zero",
+ "DeepSeek V3",
+ "DeepSeek V3 0324",
+ "DeepSeek V3 Base",
+ "Dolphin3.0 Mistral 24B",
+ "Dolphin3.0 R1 Mistral 24B",
+ "Flash 3",
+ "Gemini 2.0 Flash Experimental",
+ "Gemini 2.0 Flash Thinking Experimental",
+ "Gemini 2.0 Flash Thinking Experimental 01-21",
+ "Gemma 2 9B",
+ "Gemma 3 12B",
+ "Gemma 3 1B",
+ "Gemma 3 27B",
+ "Gemma 3 4B",
+ "Kimi VL A3B Thinking",
+ "LearnLM 1.5 Pro Experimental",
+ "Llama 3.1 8B Instruct",
+ "Llama 3.1 Nemotron 70B Instruct",
+ "Llama 3.1 Nemotron Nano 8B v1",
+ "Llama 3.1 Nemotron Ultra 253B v1",
+ "Llama 3.2 11B Vision Instruct",
+ "Llama 3.2 1B Instruct",
+ "Llama 3.2 3B Instruct",
+ "Llama 3.3 70B Instruct",
+ "Llama 3.3 Nemotron Super 49B v1",
+ "Llama 4 Maverick",
+ "Llama 4 Scout",
+ "Mistral 7B Instruct",
+ "Mistral Nemo",
+ "Mistral Small 3",
+ "Mistral Small 3.1 24B",
+ "Molmo 7B D",
+ "Moonlight 16B A3B Instruct",
+ "OlympicCoder 32B",
+ "OlympicCoder 7B",
+ "Qwen2.5 72B Instruct",
+ "Qwen2.5 7B Instruct",
+ "Qwen2.5 Coder 32B Instruct",
+ "Qwen2.5 VL 32B Instruct",
+ "Qwen2.5 VL 3B Instruct",
+ "Qwen2.5 VL 72B Instruct",
+ "Qwen2.5-VL 7B Instruct",
+ "Qwerky 72B",
+ "QwQ 32B",
+ "QwQ 32B Preview",
+ "QwQ 32B RpR v1",
+ "R1",
+ "R1 Distill Llama 70B",
+ "R1 Distill Qwen 14B",
+ "R1 Distill Qwen 32B",
+ "Rogue Rose 103B v0.2",
+ "UI-TARS 72B",
+ "Zephyr 7B",
+ ]
+
+ # Free models (available without subscription)
+ fallback_models = [
+ default_model,
+ "gpt-4o-mini",
+ "DeepSeek-V3",
+ "DeepSeek-R1",
+ "Meta-Llama-3.3-70B-Instruct-Turbo",
+ "Mistral-Small-24B-Instruct-2501",
+ "DeepSeek-LLM-Chat-(67B)",
+ "Qwen-QwQ-32B-Preview",
+
+ # OpenRouter Free
+ *openrouter_free_models,
+
+ # Image models
+ "flux",
+
+ # Trending agent modes
+ 'Python Agent',
+ 'HTML Agent',
+ 'Builder Agent',
+ 'Java Agent',
+ 'JavaScript Agent',
+ 'React Agent',
+ 'Android Agent',
+ 'Flutter Agent',
+ 'Next.js Agent',
+ 'AngularJS Agent',
+ 'Swift Agent',
+ 'MongoDB Agent',
+ 'PyTorch Agent',
+ 'Xcode Agent',
+ 'Azure Agent',
+ 'Bitbucket Agent',
+ 'DigitalOcean Agent',
+ 'Docker Agent',
+ 'Electron Agent',
+ 'Erlang Agent',
+ 'FastAPI Agent',
+ 'Firebase Agent',
+ 'Flask Agent',
+ 'Git Agent',
+ 'Gitlab Agent',
+ 'Go Agent',
+ 'Godot Agent',
+ 'Google Cloud Agent',
+ 'Heroku Agent',
+ ]
+
+ # Premium models (require subscription)
+ premium_models = [
+ "GPT-4o",
+ "o1",
+ "o3-mini",
+ "Claude-sonnet-3.7",
+ "Claude-sonnet-3.5",
+ "Gemini-Flash-2.0",
+ "DBRX-Instruct",
+ "blackboxai-pro",
+ "Gemini-PRO",
+ ]
+
+ # Premium/Pro models (require subscription) (OpenRouter)
+ openrouter_pro_models = [
+ "Aion-1.0",
+ "Aion-1.0-Mini",
+ "Aion-RP 1.0 (8B)",
+ "Airoboros 70B",
+ "Anubis Pro 105B V1",
+ "Arctic Instruct",
+ "Auto Router",
+ "Bagel 34B v0.2",
+ "Capybara 34B",
+ "Capybara 7B",
+ "ChatGPT-4o",
+ "Chronos Hermes 13B v2",
+ "Cinematika 7B (alpha)",
+ "Claude 3 Haiku",
+ "Claude 3 Haiku (self-moderated)",
+ "Claude 3 Opus",
+ "Claude 3 Opus (self-moderated)",
+ "Claude 3 Sonnet",
+ "Claude 3 Sonnet (self-moderated)",
+ "Claude 3.5 Haiku",
+ "Claude 3.5 Haiku (2024-10-22)",
+ "Claude 3.5 Haiku (2024-10-22) (self-moderated)",
+ "Claude 3.5 Haiku (self-moderated)",
+ "Claude 3.5 Sonnet",
+ "Claude 3.5 Sonnet (2024-06-20)",
+ "Claude 3.5 Sonnet (2024-06-20) (self-moderated)",
+ "Claude 3.5 Sonnet (self-moderated)",
+ "Claude 3.7 Sonnet",
+ "Claude 3.7 Sonnet (self-moderated)",
+ "Claude 3.7 Sonnet (thinking)",
+ "Claude Instant v1",
+ "Claude Instant v1.0",
+ "Claude Instant v1.1",
+ "Claude v1",
+ "Claude v1.2",
+ "Claude v2",
+ "Claude v2 (self-moderated)",
+ "Claude v2.0",
+ "Claude v2.0 (self-moderated)",
+ "Claude v2.1",
+ "Claude v2.1 (self-moderated)",
+ "CodeLlama 34B Instruct",
+ "CodeLlama 34B v2",
+ "CodeLlama 70B Instruct",
+ "CodeLLaMa 7B Instruct Solidity",
+ "Codestral 2501",
+ "Codestral Mamba",
+ "Command",
+ "Command A",
+ "Command R",
+ "Command R (03-2024)",
+ "Command R (08-2024)",
+ "Command R+",
+ "Command R+ (04-2024)",
+ "Command R+ (08-2024)",
+ "Command R7B (12-2024)",
+ "DBRX 132B Instruct",
+ "DeepSeek R1",
+ "DeepSeek V2.5",
+ "DeepSeek V3",
+ "DeepSeek V3 0324",
+ "DeepSeek-Coder-V2",
+ "Dolphin 2.6 Mixtral 8x7B \uD83D\uDC2C",
+ "Dolphin 2.9.2 Mixtral 8x22B \uD83D\uDC2C",
+ "Dolphin Llama 3 70B \uD83D\uDC2C",
+ "Eagle 7B",
+ "EVA Llama 3.33 70B",
+ "EVA Qwen2.5 14B",
+ "EVA Qwen2.5 32B",
+ "EVA Qwen2.5 72B",
+ "Fimbulvetr 11B v2",
+ "FireLLaVA 13B",
+ "Gemini 1.5 Flash ",
+ "Gemini 1.5 Flash 8B",
+ "Gemini 1.5 Flash 8B Experimental",
+ "Gemini 1.5 Flash Experimental",
+ "Gemini 1.5 Pro",
+ "Gemini 1.5 Pro Experimental",
+ "Gemini 2.0 Flash",
+ "Gemini 2.0 Flash Lite",
+ "Gemini 2.5 Pro",
+ "Gemini Experimental 1114",
+ "Gemini Experimental 1121",
+ "Gemini Pro 1.0",
+ "Gemini Pro Vision 1.0",
+ "Gemma 2 27B",
+ "Gemma 2 9B",
+ "Gemma 3 12B",
+ "Gemma 3 27B",
+ "Gemma 3 4B",
+ "Gemma 7B",
+ "Goliath 120B",
+ "GPT-3.5 Turbo",
+ "GPT-3.5 Turbo (older v0301)",
+ "GPT-3.5 Turbo (older v0613)",
+ "GPT-3.5 Turbo 16k",
+ "GPT-3.5 Turbo 16k",
+ "GPT-3.5 Turbo 16k (older v1106)",
+ "GPT-3.5 Turbo Instruct",
+ "GPT-4",
+ "GPT-4 (older v0314)",
+ "GPT-4 32k",
+ "GPT-4 32k (older v0314)",
+ "GPT-4 Turbo",
+ "GPT-4 Turbo (older v1106)",
+ "GPT-4 Turbo Preview",
+ "GPT-4 Vision",
+ "GPT-4.1",
+ "GPT-4.1 Mini",
+ "GPT-4.1 Nano",
+ "GPT-4.5 (Preview)",
+ "GPT-4o",
+ "GPT-4o (2024-05-13)",
+ "GPT-4o (2024-08-06)",
+ "GPT-4o (2024-11-20)",
+ "GPT-4o (extended)",
+ "GPT-4o Search Preview",
+ "GPT-4o-mini",
+ "GPT-4o-mini (2024-07-18)",
+ "GPT-4o-mini Search Preview",
+ "Grok 2",
+ "Grok 2 1212",
+ "Grok 2 mini",
+ "Grok 2 Vision 1212",
+ "Grok 3",
+ "Grok 3 Mini Beta",
+ "Grok Beta",
+ "Grok Vision Beta",
+ "Hermes 13B",
+ "Hermes 2 Mistral 7B DPO",
+ "Hermes 2 Mixtral 8x7B DPO",
+ "Hermes 2 Mixtral 8x7B SFT",
+ "Hermes 2 Pro - Llama-3 8B",
+ "Hermes 2 Theta 8B",
+ "Hermes 2 Vision 7B (alpha)",
+ "Hermes 2 Yi 34B",
+ "Hermes 3 405B Instruct",
+ "Hermes 3 70B Instruct",
+ "Hermes 70B",
+ "Inflection 3 Pi",
+ "Inflection 3 Productivity",
+ "Jamba 1.5 Large",
+ "Jamba 1.5 Mini",
+ "Jamba 1.6 Large",
+ "Jamba Instruct",
+ "Jamba Mini 1.6",
+ "L3.3 Electra R1 70B",
+ "LFM 3B",
+ "LFM 40B MoE",
+ "LFM 7B",
+ "Llama 2 13B Chat",
+ "Llama 2 70B Chat",
+ "Llama 3 70B (Base)",
+ "Llama 3 70B Instruct",
+ "Llama 3 8B (Base)",
+ "Llama 3 8B Instruct",
+ "Llama 3 8B Lunaris",
+ "Llama 3 Euryale 70B v2.1",
+ "Llama 3 Lumimaid 70B",
+ "Llama 3 Lumimaid 8B",
+ "Llama 3 Lumimaid 8B (extended)",
+ "Llama 3 Soliloquy 7B v3 32K",
+ "Llama 3 Soliloquy 8B v2",
+ "Llama 3 Stheno 8B v3.3 32K",
+ "Llama 3.1 405B (base)",
+ "Llama 3.1 405B Instruct",
+ "Llama 3.1 70B Hanami x1",
+ "Llama 3.1 70B Instruct",
+ "Llama 3.1 8B Instruct",
+ "Llama 3.1 Euryale 70B v2.2",
+ "Llama 3.1 Nemotron 70B Instruct",
+ "Llama 3.1 Swallow 70B Instruct V0.3",
+ "Llama 3.1 Swallow 8B Instruct V0.3",
+ "Llama 3.1 Tulu 3 405B",
+ "Llama 3.2 11B Vision Instruct",
+ "Llama 3.2 1B Instruct",
+ "Llama 3.2 3B Instruct",
+ "Llama 3.2 90B Vision Instruct",
+ "Llama 3.3 70B Instruct",
+ "Llama 3.3 Euryale 70B",
+ "Llama 4 Maverick",
+ "Llama 4 Scout",
+ "Llama Guard 3 8B",
+ "LlamaGuard 2 8B",
+ "LLaVA 13B",
+ "LLaVA v1.6 34B",
+ "Llemma 7b",
+ "Lumimaid v0.2 70B",
+ "Lumimaid v0.2 8B",
+ "lzlv 70B",
+ "Mag Mell R1 12B",
+ "Magnum 72B",
+ "Magnum v2 72B",
+ "Magnum v4 72B",
+ "Midnight Rose 70B",
+ "MiniMax-01",
+ "Ministral 3B",
+ "Ministral 8B",
+ "Ministral 8B",
+ "Mistral 7B Instruct",
+ "Mistral 7B Instruct v0.1",
+ "Mistral 7B Instruct v0.2",
+ "Mistral 7B Instruct v0.3",
+ "Mistral Large",
+ "Mistral Large 2407",
+ "Mistral Large 2411",
+ "Mistral Medium",
+ "Mistral Nemo",
+ "Mistral Nemo 12B Celeste",
+ "Mistral Nemo Inferor 12B",
+ "Mistral OpenOrca 7B",
+ "Mistral Small",
+ "Mistral Small 3",
+ "Mistral Small 3.1 24B",
+ "Mistral Tiny",
+ "Mixtral 8x22B (base)",
+ "Mixtral 8x22B Instruct",
+ "Mixtral 8x7B Instruct",
+ "Mythalion 13B",
+ "MythoMax 13B",
+ "MythoMist 7B",
+ "Nemotron-4 340B Instruct",
+ "Neural Chat 7B v3.1",
+ "Noromaid 20B",
+ "Noromaid Mixtral 8x7B Instruct",
+ "Nova Lite 1.0",
+ "Nova Micro 1.0",
+ "Nova Pro 1.0",
+ "o1",
+ "o1-mini",
+ "o1-mini (2024-09-12)",
+ "o1-preview",
+ "o1-preview (2024-09-12)",
+ "o1-pro",
+ "o3 Mini",
+ "o3 Mini High",
+ "Olmo 2 32B Instruct",
+ "OLMo 7B Instruct",
+ "OpenChat 3.5 7B",
+ "OpenChat 3.6 8B",
+ "OpenHands LM 32B V0.1",
+ "OpenHermes 2 Mistral 7B",
+ "OpenHermes 2.5 Mistral 7B",
+ "Optimus Alpha",
+ "PaLM 2 Chat",
+ "PaLM 2 Chat 32k",
+ "PaLM 2 Code Chat",
+ "PaLM 2 Code Chat 32k",
+ "Phi 4",
+ "Phi 4 Multimodal Instruct",
+ "Phi-3 Medium 128K Instruct",
+ "Phi-3 Medium 4K Instruct",
+ "Phi-3 Mini 128K Instruct",
+ "Phi-3.5 Mini 128K Instruct",
+ "Pixtral 12B",
+ "Pixtral Large 2411",
+ "Psyfighter 13B",
+ "Psyfighter v2 13B",
+ "Quasar Alpha",
+ "Qwen 1.5 110B Chat",
+ "Qwen 1.5 14B Chat",
+ "Qwen 1.5 32B Chat",
+ "Qwen 1.5 4B Chat",
+ "Qwen 1.5 72B Chat",
+ "Qwen 1.5 7B Chat",
+ "Qwen 2 72B Instruct",
+ "Qwen 2 7B Instruct",
+ "Qwen VL Max",
+ "Qwen VL Plus",
+ "Qwen-Max ",
+ "Qwen-Plus",
+ "Qwen-Turbo",
+ "Qwen2.5 32B Instruct",
+ "Qwen2.5 72B Instruct",
+ "Qwen2.5 7B Instruct",
+ "Qwen2.5 Coder 32B Instruct",
+ "Qwen2.5 VL 32B Instruct",
+ "Qwen2.5 VL 72B Instruct",
+ "Qwen2.5-VL 72B Instruct",
+ "Qwen2.5-VL 7B Instruct",
+ "QwQ 32B",
+ "QwQ 32B Preview",
+ "R1",
+ "R1 Distill Llama 70B",
+ "R1 Distill Llama 8B",
+ "R1 Distill Qwen 1.5B",
+ "R1 Distill Qwen 14B",
+ "R1 Distill Qwen 32B",
+ "Reflection 70B",
+ "ReMM SLERP 13B",
+ "Rocinante 12B",
+ "RWKV v5 3B AI Town",
+ "RWKV v5 World 3B",
+ "Saba",
+ "Skyfall 36B V2",
+ "SorcererLM 8x22B",
+ "Starcannon 12B",
+ "StarCoder2 15B Instruct",
+ "StripedHyena Hessian 7B (base)",
+ "StripedHyena Nous 7B",
+ "Synthia 70B",
+ "Toppy M 7B",
+ "Typhoon2 70B Instruct",
+ "Typhoon2 8B Instruct",
+ "Unslopnemo 12B",
+ "Wayfarer Large 70B Llama 3.3",
+ "Weaver (alpha)",
+ "WizardLM-2 7B",
+ "WizardLM-2 8x22B",
+ "Xwin 70B",
+ "Yi 1.5 34B Chat",
+ "Yi 34B (base)",
+ "Yi 34B 200K",
+ "Yi 34B Chat",
+ "Yi 6B (base)",
+ "Yi Large",
+ "Yi Large FC",
+ "Yi Large Turbo",
+ "Yi Vision",
+ "Zephyr 141B-A35B",
+ ]
+
+ image_models = [default_image_model]
+ vision_models = [default_vision_model, 'GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Gemini Agent', 'llama-3.1-8b Agent', 'llama-3.1-70b Agent', 'llama-3.1-405 Agent', 'Gemini-Flash-2.0', 'DeepSeek-V3']
+
+ userSelectedModel = ['GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0'] + openrouter_pro_models
+
+ # Agent mode configurations
+ agentMode = {
+ # Free (OpenRouter)
+ 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
+ 'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
+ 'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
+ 'DeepSeek V3': {'mode': True, 'id': "deepseek/deepseek-chat:free", 'name': "DeepSeek V3"},
+ 'DeepSeek V3 0324': {'mode': True, 'id': "deepseek/deepseek-chat-v3-0324:free", 'name': "DeepSeek V3 0324"},
+ 'DeepSeek V3 Base': {'mode': True, 'id': "deepseek/deepseek-v3-base:free", 'name': "DeepSeek V3 Base"},
+ 'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
+ 'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
+ 'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
+ 'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
+ 'Gemini 2.0 Flash Thinking Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-thinking-exp-1219:free", 'name': "Gemini 2.0 Flash Thinking Experimental"},
+ 'Gemini 2.0 Flash Thinking Experimental 01-21': {'mode': True, 'id': "google/gemini-2.0-flash-thinking-exp:free", 'name': "Gemini 2.0 Flash Thinking Experimental 01-21"},
+ 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
+ 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
+ 'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
+ 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
+ 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
+ 'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
+ 'LearnLM 1.5 Pro Experimental': {'mode': True, 'id': "google/learnlm-1.5-pro-experimental:free", 'name': "LearnLM 1.5 Pro Experimental"},
+ 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
+ 'Llama 3.1 Nemotron 70B Instruct': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-70b-instruct:free", 'name': "Llama 3.1 Nemotron 70B Instruct"},
+ 'Llama 3.1 Nemotron Nano 8B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-nano-8b-v1:free", 'name': "Llama 3.1 Nemotron Nano 8B v1"},
+ 'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
+ 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
+ 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
+ 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
+ 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
+ 'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
+ 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
+ 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
+ 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
+ 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
+ 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"},
+ 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
+ 'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
+ 'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
+ 'OlympicCoder 32B': {'mode': True, 'id': "open-r1/olympiccoder-32b:free", 'name': "OlympicCoder 32B"},
+ 'OlympicCoder 7B': {'mode': True, 'id': "open-r1/olympiccoder-7b:free", 'name': "OlympicCoder 7B"},
+ 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
+ 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
+ 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
+ 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
+ 'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
+ 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
+ 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
+ 'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
+ 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
+ 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
+ 'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
+ 'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"},
+ 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
+ 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
+ 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
+ 'Rogue Rose 103B v0.2': {'mode': True, 'id': "sophosympatheia/rogue-rose-103b-v0.2:free", 'name': "Rogue Rose 103B v0.2"},
+ 'UI-TARS 72B ': {'mode': True, 'id': "bytedance-research/ui-tars-72b:free", 'name': "UI-TARS 72B "},
+ 'Zephyr 7B': {'mode': True, 'id': "huggingfaceh4/zephyr-7b-beta:free", 'name': "Zephyr 7B"},
+
+ # Pro
+ 'Aion-1.0': {'mode': True, 'id': "aion-labs/aion-1.0", 'name': "Aion-1.0"},
+ 'Aion-1.0-Mini': {'mode': True, 'id': "aion-labs/aion-1.0-mini", 'name': "Aion-1.0-Mini"},
+ 'Aion-RP 1.0 (8B)': {'mode': True, 'id': "aion-labs/aion-rp-llama-3.1-8b", 'name': "Aion-RP 1.0 (8B)"},
+ 'Airoboros 70B': {'mode': True, 'id': "jondurbin/airoboros-l2-70b", 'name': "Airoboros 70B"},
+ 'Anubis Pro 105B V1': {'mode': True, 'id': "thedrummer/anubis-pro-105b-v1", 'name': "Anubis Pro 105B V1"},
+ 'Arctic Instruct': {'mode': True, 'id': "snowflake/snowflake-arctic-instruct", 'name': "Arctic Instruct"},
+ 'Auto Router': {'mode': True, 'id': "openrouter/auto", 'name': "Auto Router"},
+ 'Bagel 34B v0.2': {'mode': True, 'id': "jondurbin/bagel-34b", 'name': "Bagel 34B v0.2"},
+ 'Capybara 34B': {'mode': True, 'id': "nousresearch/nous-capybara-34b", 'name': "Capybara 34B"},
+ 'Capybara 7B': {'mode': True, 'id': "nousresearch/nous-capybara-7b", 'name': "Capybara 7B"},
+ 'ChatGPT-4o': {'mode': True, 'id': "openai/chatgpt-4o-latest", 'name': "ChatGPT-4o"},
+ 'Chronos Hermes 13B v2': {'mode': True, 'id': "austism/chronos-hermes-13b", 'name': "Chronos Hermes 13B v2"},
+ 'Cinematika 7B (alpha)': {'mode': True, 'id': "openrouter/cinematika-7b", 'name': "Cinematika 7B (alpha)"},
+ 'Claude 3 Haiku': {'mode': True, 'id': "anthropic/claude-3-haiku", 'name': "Claude 3 Haiku"},
+ 'Claude 3 Haiku (self-moderated)': {'mode': True, 'id': "anthropic/claude-3-haiku:beta", 'name': "Claude 3 Haiku (self-moderated)"},
+ 'Claude 3 Opus': {'mode': True, 'id': "anthropic/claude-3-opus", 'name': "Claude 3 Opus"},
+ 'Claude 3 Opus (self-moderated)': {'mode': True, 'id': "anthropic/claude-3-opus:beta", 'name': "Claude 3 Opus (self-moderated)"},
+ 'Claude 3 Sonnet': {'mode': True, 'id': "anthropic/claude-3-sonnet", 'name': "Claude 3 Sonnet"},
+ 'Claude 3 Sonnet (self-moderated)': {'mode': True, 'id': "anthropic/claude-3-sonnet:beta", 'name': "Claude 3 Sonnet (self-moderated)"},
+ 'Claude 3.5 Haiku': {'mode': True, 'id': "anthropic/claude-3.5-haiku", 'name': "Claude 3.5 Haiku"},
+ 'Claude 3.5 Haiku (2024-10-22)': {'mode': True, 'id': "anthropic/claude-3.5-haiku-20241022", 'name': "Claude 3.5 Haiku (2024-10-22)"},
+ 'Claude 3.5 Haiku (2024-10-22) (self-moderated)': {'mode': True, 'id': "anthropic/claude-3.5-haiku-20241022:beta", 'name': "Claude 3.5 Haiku (2024-10-22) (self-moderated)"},
+ 'Claude 3.5 Haiku (self-moderated)': {'mode': True, 'id': "anthropic/claude-3.5-haiku:beta", 'name': "Claude 3.5 Haiku (self-moderated)"},
+ 'Claude 3.5 Sonnet': {'mode': True, 'id': "anthropic/claude-3.5-sonnet", 'name': "Claude 3.5 Sonnet"},
+ 'Claude 3.5 Sonnet (2024-06-20)': {'mode': True, 'id': "anthropic/claude-3.5-sonnet-20240620", 'name': "Claude 3.5 Sonnet (2024-06-20)"},
+ 'Claude 3.5 Sonnet (2024-06-20) (self-moderated)': {'mode': True, 'id': "anthropic/claude-3.5-sonnet-20240620:beta", 'name': "Claude 3.5 Sonnet (2024-06-20) (self-moderated)"},
+ 'Claude 3.5 Sonnet (self-moderated)': {'mode': True, 'id': "anthropic/claude-3.5-sonnet:beta", 'name': "Claude 3.5 Sonnet (self-moderated)"},
+ 'Claude 3.7 Sonnet': {'mode': True, 'id': "anthropic/claude-3.7-sonnet", 'name': "Claude 3.7 Sonnet"},
+ 'Claude 3.7 Sonnet (self-moderated)': {'mode': True, 'id': "anthropic/claude-3.7-sonnet:beta", 'name': "Claude 3.7 Sonnet (self-moderated)"},
+ 'Claude 3.7 Sonnet (thinking)': {'mode': True, 'id': "anthropic/claude-3.7-sonnet:thinking", 'name': "Claude 3.7 Sonnet (thinking)"},
+ 'Claude Instant v1': {'mode': True, 'id': "anthropic/claude-instant-1", 'name': "Claude Instant v1"},
+ 'Claude Instant v1.0': {'mode': True, 'id': "anthropic/claude-instant-1.0", 'name': "Claude Instant v1.0"},
+ 'Claude Instant v1.1': {'mode': True, 'id': "anthropic/claude-instant-1.1", 'name': "Claude Instant v1.1"},
+ 'Claude v1': {'mode': True, 'id': "anthropic/claude-1", 'name': "Claude v1"},
+ 'Claude v1.2': {'mode': True, 'id': "anthropic/claude-1.2", 'name': "Claude v1.2"},
+ 'Claude v2': {'mode': True, 'id': "anthropic/claude-2", 'name': "Claude v2"},
+ 'Claude v2 (self-moderated)': {'mode': True, 'id': "anthropic/claude-2:beta", 'name': "Claude v2 (self-moderated)"},
+ 'Claude v2.0': {'mode': True, 'id': "anthropic/claude-2.0", 'name': "Claude v2.0"},
+ 'Claude v2.0 (self-moderated)': {'mode': True, 'id': "anthropic/claude-2.0:beta", 'name': "Claude v2.0 (self-moderated)"},
+ 'Claude v2.1': {'mode': True, 'id': "anthropic/claude-2.1", 'name': "Claude v2.1"},
+ 'Claude v2.1 (self-moderated)': {'mode': True, 'id': "anthropic/claude-2.1:beta", 'name': "Claude v2.1 (self-moderated)"},
+ 'CodeLlama 34B Instruct': {'mode': True, 'id': "meta-llama/codellama-34b-instruct", 'name': "CodeLlama 34B Instruct"},
+ 'CodeLlama 34B v2': {'mode': True, 'id': "phind/phind-codellama-34b", 'name': "CodeLlama 34B v2"},
+ 'CodeLlama 70B Instruct': {'mode': True, 'id': "meta-llama/codellama-70b-instruct", 'name': "CodeLlama 70B Instruct"},
+ 'CodeLLaMa 7B Instruct Solidity': {'mode': True, 'id': "alfredpros/codellama-7b-instruct-solidity", 'name': "CodeLLaMa 7B Instruct Solidity"},
+ 'Codestral 2501': {'mode': True, 'id': "mistralai/codestral-2501", 'name': "Codestral 2501"},
+ 'Codestral Mamba': {'mode': True, 'id': "mistralai/codestral-mamba", 'name': "Codestral Mamba"},
+ 'Command': {'mode': True, 'id': "cohere/command", 'name': "Command"},
+ 'Command A': {'mode': True, 'id': "cohere/command-a", 'name': "Command A"},
+ 'Command R': {'mode': True, 'id': "cohere/command-r", 'name': "Command R"},
+ 'Command R (03-2024)': {'mode': True, 'id': "cohere/command-r-03-2024", 'name': "Command R (03-2024)"},
+ 'Command R (08-2024)': {'mode': True, 'id': "cohere/command-r-08-2024", 'name': "Command R (08-2024)"},
+ 'Command R+': {'mode': True, 'id': "cohere/command-r-plus", 'name': "Command R+"},
+ 'Command R+ (04-2024)': {'mode': True, 'id': "cohere/command-r-plus-04-2024", 'name': "Command R+ (04-2024)"},
+ 'Command R+ (08-2024)': {'mode': True, 'id': "cohere/command-r-plus-08-2024", 'name': "Command R+ (08-2024)"},
+ 'Command R7B (12-2024)': {'mode': True, 'id': "cohere/command-r7b-12-2024", 'name': "Command R7B (12-2024)"},
+ 'DBRX 132B Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX 132B Instruct"},
+ 'DeepSeek R1': {'mode': True, 'id': "deepseek/deepseek-r1", 'name': "DeepSeek R1"},
+ 'DeepSeek V2.5': {'mode': True, 'id': "deepseek/deepseek-chat-v2.5", 'name': "DeepSeek V2.5"},
+ 'DeepSeek V3': {'mode': True, 'id': "deepseek/deepseek-chat", 'name': "DeepSeek V3"},
+ 'DeepSeek V3 0324': {'mode': True, 'id': "deepseek/deepseek-chat-v3-0324", 'name': "DeepSeek V3 0324"},
+ 'DeepSeek-Coder-V2': {'mode': True, 'id': "deepseek/deepseek-coder", 'name': "DeepSeek-Coder-V2"},
+ 'Dolphin 2.6 Mixtral 8x7B \uD83D\uDC2C': {'mode': True, 'id': "cognitivecomputations/dolphin-mixtral-8x7b", 'name': "Dolphin 2.6 Mixtral 8x7B \uD83D\uDC2C"},
+ 'Dolphin 2.9.2 Mixtral 8x22B \uD83D\uDC2C': {'mode': True, 'id': "cognitivecomputations/dolphin-mixtral-8x22b", 'name': "Dolphin 2.9.2 Mixtral 8x22B \uD83D\uDC2C"},
+ 'Dolphin Llama 3 70B \uD83D\uDC2C': {'mode': True, 'id': "cognitivecomputations/dolphin-llama-3-70b", 'name': "Dolphin Llama 3 70B \uD83D\uDC2C"},
+ 'Eagle 7B': {'mode': True, 'id': "recursal/eagle-7b", 'name': "Eagle 7B"},
+ 'EVA Llama 3.33 70B': {'mode': True, 'id': "eva-unit-01/eva-llama-3.33-70b", 'name': "EVA Llama 3.33 70B"},
+ 'EVA Qwen2.5 14B': {'mode': True, 'id': "eva-unit-01/eva-qwen-2.5-14b", 'name': "EVA Qwen2.5 14B"},
+ 'EVA Qwen2.5 32B': {'mode': True, 'id': "eva-unit-01/eva-qwen-2.5-32b", 'name': "EVA Qwen2.5 32B"},
+ 'EVA Qwen2.5 72B': {'mode': True, 'id': "eva-unit-01/eva-qwen-2.5-72b", 'name': "EVA Qwen2.5 72B"},
+ 'Fimbulvetr 11B v2': {'mode': True, 'id': "sao10k/fimbulvetr-11b-v2", 'name': "Fimbulvetr 11B v2"},
+ 'FireLLaVA 13B': {'mode': True, 'id': "fireworks/firellava-13b", 'name': "FireLLaVA 13B"},
+ 'Gemini 1.5 Flash ': {'mode': True, 'id': "google/gemini-flash-1.5", 'name': "Gemini 1.5 Flash "},
+ 'Gemini 1.5 Flash 8B': {'mode': True, 'id': "google/gemini-flash-1.5-8b", 'name': "Gemini 1.5 Flash 8B"},
+ 'Gemini 1.5 Flash 8B Experimental': {'mode': True, 'id': "google/gemini-flash-1.5-8b-exp", 'name': "Gemini 1.5 Flash 8B Experimental"},
+ 'Gemini 1.5 Flash Experimental': {'mode': True, 'id': "google/gemini-flash-1.5-exp", 'name': "Gemini 1.5 Flash Experimental"},
+ 'Gemini 1.5 Pro': {'mode': True, 'id': "google/gemini-pro-1.5", 'name': "Gemini 1.5 Pro"},
+ 'Gemini 1.5 Pro Experimental': {'mode': True, 'id': "google/gemini-pro-1.5-exp", 'name': "Gemini 1.5 Pro Experimental"},
+ 'Gemini 2.0 Flash': {'mode': True, 'id': "google/gemini-2.0-flash-001", 'name': "Gemini 2.0 Flash"},
+ 'Gemini 2.0 Flash Lite': {'mode': True, 'id': "google/gemini-2.0-flash-lite-001", 'name': "Gemini 2.0 Flash Lite"},
+ 'Gemini 2.5 Pro': {'mode': True, 'id': "google/gemini-2.5-pro-preview-03-25", 'name': "Gemini 2.5 Pro"},
+ 'Gemini Experimental 1114': {'mode': True, 'id': "google/gemini-exp-1114", 'name': "Gemini Experimental 1114"},
+ 'Gemini Experimental 1121': {'mode': True, 'id': "google/gemini-exp-1121", 'name': "Gemini Experimental 1121"},
+ 'Gemini Pro 1.0': {'mode': True, 'id': "google/gemini-pro", 'name': "Gemini Pro 1.0"},
+ 'Gemini Pro Vision 1.0': {'mode': True, 'id': "google/gemini-pro-vision", 'name': "Gemini Pro Vision 1.0"},
+ 'Gemma 2 27B': {'mode': True, 'id': "google/gemma-2-27b-it", 'name': "Gemma 2 27B"},
+ 'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it", 'name': "Gemma 2 9B"},
+ 'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it", 'name': "Gemma 3 12B"},
+ 'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it", 'name': "Gemma 3 27B"},
+ 'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it", 'name': "Gemma 3 4B"},
+ 'Gemma 7B': {'mode': True, 'id': "google/gemma-7b-it", 'name': "Gemma 7B"},
+ 'Goliath 120B': {'mode': True, 'id': "alpindale/goliath-120b", 'name': "Goliath 120B"},
+ 'GPT-3.5 Turbo': {'mode': True, 'id': "openai/gpt-3.5-turbo", 'name': "GPT-3.5 Turbo"},
+ 'GPT-3.5 Turbo (older v0301)': {'mode': True, 'id': "openai/gpt-3.5-turbo-0301", 'name': "GPT-3.5 Turbo (older v0301)"},
+ 'GPT-3.5 Turbo (older v0613)': {'mode': True, 'id': "openai/gpt-3.5-turbo-0613", 'name': "GPT-3.5 Turbo (older v0613)"},
+ 'GPT-3.5 Turbo 16k': {'mode': True, 'id': "openai/gpt-3.5-turbo-16k", 'name': "GPT-3.5 Turbo 16k"},
+ 'GPT-3.5 Turbo 16k': {'mode': True, 'id': "openai/gpt-3.5-turbo-0125", 'name': "GPT-3.5 Turbo 16k"},
+ 'GPT-3.5 Turbo 16k (older v1106)': {'mode': True, 'id': "openai/gpt-3.5-turbo-1106", 'name': "GPT-3.5 Turbo 16k (older v1106)"},
+ 'GPT-3.5 Turbo Instruct': {'mode': True, 'id': "openai/gpt-3.5-turbo-instruct", 'name': "GPT-3.5 Turbo Instruct"},
+ 'GPT-4': {'mode': True, 'id': "openai/gpt-4", 'name': "GPT-4"},
+ 'GPT-4 (older v0314)': {'mode': True, 'id': "openai/gpt-4-0314", 'name': "GPT-4 (older v0314)"},
+ 'GPT-4 32k': {'mode': True, 'id': "openai/gpt-4-32k", 'name': "GPT-4 32k"},
+ 'GPT-4 32k (older v0314)': {'mode': True, 'id': "openai/gpt-4-32k-0314", 'name': "GPT-4 32k (older v0314)"},
+ 'GPT-4 Turbo': {'mode': True, 'id': "openai/gpt-4-turbo", 'name': "GPT-4 Turbo"},
+ 'GPT-4 Turbo (older v1106)': {'mode': True, 'id': "openai/gpt-4-1106-preview", 'name': "GPT-4 Turbo (older v1106)"},
+ 'GPT-4 Turbo Preview': {'mode': True, 'id': "openai/gpt-4-turbo-preview", 'name': "GPT-4 Turbo Preview"},
+ 'GPT-4 Vision': {'mode': True, 'id': "openai/gpt-4-vision-preview", 'name': "GPT-4 Vision"},
+ 'GPT-4.1': {'mode': True, 'id': "openai/gpt-4.1", 'name': "GPT-4.1"},
+ 'GPT-4.1 Mini': {'mode': True, 'id': "openai/gpt-4.1-mini", 'name': "GPT-4.1 Mini"},
+ 'GPT-4.1 Nano': {'mode': True, 'id': "openai/gpt-4.1-nano", 'name': "GPT-4.1 Nano"},
+ 'GPT-4.5 (Preview)': {'mode': True, 'id': "openai/gpt-4.5-preview", 'name': "GPT-4.5 (Preview)"},
+ 'GPT-4o': {'mode': True, 'id': "openai/gpt-4o", 'name': "GPT-4o"},
+ 'GPT-4o (2024-05-13)': {'mode': True, 'id': "openai/gpt-4o-2024-05-13", 'name': "GPT-4o (2024-05-13)"},
+ 'GPT-4o (2024-08-06)': {'mode': True, 'id': "openai/gpt-4o-2024-08-06", 'name': "GPT-4o (2024-08-06)"},
+ 'GPT-4o (2024-11-20)': {'mode': True, 'id': "openai/gpt-4o-2024-11-20", 'name': "GPT-4o (2024-11-20)"},
+ 'GPT-4o (extended)': {'mode': True, 'id': "openai/gpt-4o:extended", 'name': "GPT-4o (extended)"},
+ 'GPT-4o Search Preview': {'mode': True, 'id': "openai/gpt-4o-search-preview", 'name': "GPT-4o Search Preview"},
+ 'GPT-4o-mini': {'mode': True, 'id': "openai/gpt-4o-mini", 'name': "GPT-4o-mini"},
+ 'GPT-4o-mini (2024-07-18)': {'mode': True, 'id': "openai/gpt-4o-mini-2024-07-18", 'name': "GPT-4o-mini (2024-07-18)"},
+ 'GPT-4o-mini Search Preview': {'mode': True, 'id': "openai/gpt-4o-mini-search-preview", 'name': "GPT-4o-mini Search Preview"},
+ 'Grok 2': {'mode': True, 'id': "x-ai/grok-2", 'name': "Grok 2"},
+ 'Grok 2 1212': {'mode': True, 'id': "x-ai/grok-2-1212", 'name': "Grok 2 1212"},
+ 'Grok 2 mini': {'mode': True, 'id': "x-ai/grok-2-mini", 'name': "Grok 2 mini"},
+ 'Grok 2 Vision 1212': {'mode': True, 'id': "x-ai/grok-2-vision-1212", 'name': "Grok 2 Vision 1212"},
+ 'Grok 3': {'mode': True, 'id': "x-ai/grok-3-beta", 'name': "Grok 3"},
+ 'Grok 3 Mini Beta': {'mode': True, 'id': "x-ai/grok-3-mini-beta", 'name': "Grok 3 Mini Beta"},
+ 'Grok Beta': {'mode': True, 'id': "x-ai/grok-beta", 'name': "Grok Beta"},
+ 'Grok Vision Beta': {'mode': True, 'id': "x-ai/grok-vision-beta", 'name': "Grok Vision Beta"},
+ 'Hermes 13B': {'mode': True, 'id': "nousresearch/nous-hermes-llama2-13b", 'name': "Hermes 13B"},
+ 'Hermes 2 Mistral 7B DPO': {'mode': True, 'id': "nousresearch/nous-hermes-2-mistral-7b-dpo", 'name': "Hermes 2 Mistral 7B DPO"},
+ 'Hermes 2 Mixtral 8x7B DPO': {'mode': True, 'id': "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", 'name': "Hermes 2 Mixtral 8x7B DPO"},
+ 'Hermes 2 Mixtral 8x7B SFT': {'mode': True, 'id': "nousresearch/nous-hermes-2-mixtral-8x7b-sft", 'name': "Hermes 2 Mixtral 8x7B SFT"},
+ 'Hermes 2 Pro - Llama-3 8B': {'mode': True, 'id': "nousresearch/hermes-2-pro-llama-3-8b", 'name': "Hermes 2 Pro - Llama-3 8B"},
+ 'Hermes 2 Theta 8B': {'mode': True, 'id': "nousresearch/hermes-2-theta-llama-3-8b", 'name': "Hermes 2 Theta 8B"},
+ 'Hermes 2 Vision 7B (alpha)': {'mode': True, 'id': "nousresearch/nous-hermes-2-vision-7b", 'name': "Hermes 2 Vision 7B (alpha)"},
+ 'Hermes 2 Yi 34B': {'mode': True, 'id': "nousresearch/nous-hermes-yi-34b", 'name': "Hermes 2 Yi 34B"},
+ 'Hermes 3 405B Instruct': {'mode': True, 'id': "nousresearch/hermes-3-llama-3.1-405b", 'name': "Hermes 3 405B Instruct"},
+ 'Hermes 3 70B Instruct': {'mode': True, 'id': "nousresearch/hermes-3-llama-3.1-70b", 'name': "Hermes 3 70B Instruct"},
+ 'Hermes 70B': {'mode': True, 'id': "nousresearch/nous-hermes-llama2-70b", 'name': "Hermes 70B"},
+ 'Inflection 3 Pi': {'mode': True, 'id': "inflection/inflection-3-pi", 'name': "Inflection 3 Pi"},
+ 'Inflection 3 Productivity': {'mode': True, 'id': "inflection/inflection-3-productivity", 'name': "Inflection 3 Productivity"},
+ 'Jamba 1.5 Large': {'mode': True, 'id': "ai21/jamba-1-5-large", 'name': "Jamba 1.5 Large"},
+ 'Jamba 1.5 Mini': {'mode': True, 'id': "ai21/jamba-1-5-mini", 'name': "Jamba 1.5 Mini"},
+ 'Jamba 1.6 Large': {'mode': True, 'id': "ai21/jamba-1.6-large", 'name': "Jamba 1.6 Large"},
+ 'Jamba Instruct': {'mode': True, 'id': "ai21/jamba-instruct", 'name': "Jamba Instruct"},
+ 'Jamba Mini 1.6': {'mode': True, 'id': "ai21/jamba-1.6-mini", 'name': "Jamba Mini 1.6"},
+ 'L3.3 Electra R1 70B': {'mode': True, 'id': "steelskull/l3.3-electra-r1-70b", 'name': "L3.3 Electra R1 70B"},
+ 'LFM 3B': {'mode': True, 'id': "liquid/lfm-3b", 'name': "LFM 3B"},
+ 'LFM 40B MoE': {'mode': True, 'id': "liquid/lfm-40b", 'name': "LFM 40B MoE"},
+ 'LFM 7B': {'mode': True, 'id': "liquid/lfm-7b", 'name': "LFM 7B"},
+ 'Llama 2 13B Chat': {'mode': True, 'id': "meta-llama/llama-2-13b-chat", 'name': "Llama 2 13B Chat"},
+ 'Llama 2 70B Chat': {'mode': True, 'id': "meta-llama/llama-2-70b-chat", 'name': "Llama 2 70B Chat"},
+ 'Llama 3 70B (Base)': {'mode': True, 'id': "meta-llama/llama-3-70b", 'name': "Llama 3 70B (Base)"},
+ 'Llama 3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3-70b-instruct", 'name': "Llama 3 70B Instruct"},
+ 'Llama 3 8B (Base)': {'mode': True, 'id': "meta-llama/llama-3-8b", 'name': "Llama 3 8B (Base)"},
+ 'Llama 3 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3-8b-instruct", 'name': "Llama 3 8B Instruct"},
+ 'Llama 3 8B Lunaris': {'mode': True, 'id': "sao10k/l3-lunaris-8b", 'name': "Llama 3 8B Lunaris"},
+ 'Llama 3 Euryale 70B v2.1': {'mode': True, 'id': "sao10k/l3-euryale-70b", 'name': "Llama 3 Euryale 70B v2.1"},
+ 'Llama 3 Lumimaid 70B': {'mode': True, 'id': "neversleep/llama-3-lumimaid-70b", 'name': "Llama 3 Lumimaid 70B"},
+ 'Llama 3 Lumimaid 8B': {'mode': True, 'id': "neversleep/llama-3-lumimaid-8b", 'name': "Llama 3 Lumimaid 8B"},
+ 'Llama 3 Lumimaid 8B (extended)': {'mode': True, 'id': "neversleep/llama-3-lumimaid-8b:extended", 'name': "Llama 3 Lumimaid 8B (extended)"},
+ 'Llama 3 Soliloquy 7B v3 32K': {'mode': True, 'id': "lynn/soliloquy-v3", 'name': "Llama 3 Soliloquy 7B v3 32K"},
+ 'Llama 3 Soliloquy 8B v2': {'mode': True, 'id': "lynn/soliloquy-l3", 'name': "Llama 3 Soliloquy 8B v2"},
+ 'Llama 3 Stheno 8B v3.3 32K': {'mode': True, 'id': "sao10k/l3-stheno-8b", 'name': "Llama 3 Stheno 8B v3.3 32K"},
+ 'Llama 3.1 405B (base)': {'mode': True, 'id': "meta-llama/llama-3.1-405b", 'name': "Llama 3.1 405B (base)"},
+ 'Llama 3.1 405B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-405b-instruct", 'name': "Llama 3.1 405B Instruct"},
+ 'Llama 3.1 70B Hanami x1': {'mode': True, 'id': "sao10k/l3.1-70b-hanami-x1", 'name': "Llama 3.1 70B Hanami x1"},
+ 'Llama 3.1 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-70b-instruct", 'name': "Llama 3.1 70B Instruct"},
+ 'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct", 'name': "Llama 3.1 8B Instruct"},
+ 'Llama 3.1 Euryale 70B v2.2': {'mode': True, 'id': "sao10k/l3.1-euryale-70b", 'name': "Llama 3.1 Euryale 70B v2.2"},
+ 'Llama 3.1 Nemotron 70B Instruct': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-70b-instruct", 'name': "Llama 3.1 Nemotron 70B Instruct"},
+ 'Llama 3.1 Swallow 70B Instruct V0.3': {'mode': True, 'id': "tokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3", 'name': "Llama 3.1 Swallow 70B Instruct V0.3"},
+ 'Llama 3.1 Swallow 8B Instruct V0.3': {'mode': True, 'id': "tokyotech-llm/llama-3.1-swallow-8b-instruct-v0.3", 'name': "Llama 3.1 Swallow 8B Instruct V0.3"},
+ 'Llama 3.1 Tulu 3 405B': {'mode': True, 'id': "allenai/llama-3.1-tulu-3-405b", 'name': "Llama 3.1 Tulu 3 405B"},
+ 'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct", 'name': "Llama 3.2 11B Vision Instruct"},
+ 'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct", 'name': "Llama 3.2 1B Instruct"},
+ 'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct", 'name': "Llama 3.2 3B Instruct"},
+ 'Llama 3.2 90B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-90b-vision-instruct", 'name': "Llama 3.2 90B Vision Instruct"},
+ 'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct", 'name': "Llama 3.3 70B Instruct"},
+ 'Llama 3.3 Euryale 70B': {'mode': True, 'id': "sao10k/l3.3-euryale-70b", 'name': "Llama 3.3 Euryale 70B"},
+ 'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick", 'name': "Llama 4 Maverick"},
+ 'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout", 'name': "Llama 4 Scout"},
+ 'Llama Guard 3 8B': {'mode': True, 'id': "meta-llama/llama-guard-3-8b", 'name': "Llama Guard 3 8B"},
+ 'LlamaGuard 2 8B': {'mode': True, 'id': "meta-llama/llama-guard-2-8b", 'name': "LlamaGuard 2 8B"},
+ 'LLaVA 13B': {'mode': True, 'id': "liuhaotian/llava-13b", 'name': "LLaVA 13B"},
+ 'LLaVA v1.6 34B': {'mode': True, 'id': "liuhaotian/llava-yi-34b", 'name': "LLaVA v1.6 34B"},
+ 'Llemma 7b': {'mode': True, 'id': "eleutherai/llemma_7b", 'name': "Llemma 7b"},
+ 'Lumimaid v0.2 70B': {'mode': True, 'id': "neversleep/llama-3.1-lumimaid-70b", 'name': "Lumimaid v0.2 70B"},
+ 'Lumimaid v0.2 8B': {'mode': True, 'id': "neversleep/llama-3.1-lumimaid-8b", 'name': "Lumimaid v0.2 8B"},
+ 'lzlv 70B': {'mode': True, 'id': "lizpreciatior/lzlv-70b-fp16-hf", 'name': "lzlv 70B"},
+ 'Mag Mell R1 12B': {'mode': True, 'id': "inflatebot/mn-mag-mell-r1", 'name': "Mag Mell R1 12B"},
+ 'Magnum 72B': {'mode': True, 'id': "alpindale/magnum-72b", 'name': "Magnum 72B"},
+ 'Magnum v2 72B': {'mode': True, 'id': "anthracite-org/magnum-v2-72b", 'name': "Magnum v2 72B"},
+ 'Magnum v4 72B': {'mode': True, 'id': "anthracite-org/magnum-v4-72b", 'name': "Magnum v4 72B"},
+ 'Midnight Rose 70B': {'mode': True, 'id': "sophosympatheia/midnight-rose-70b", 'name': "Midnight Rose 70B"},
+ 'MiniMax-01': {'mode': True, 'id': "minimax/minimax-01", 'name': "MiniMax-01"},
+ 'Ministral 3B': {'mode': True, 'id': "mistralai/ministral-3b", 'name': "Ministral 3B"},
+ 'Ministral 8B': {'mode': True, 'id': "mistral/ministral-8b", 'name': "Ministral 8B"},
+ 'Ministral 8B': {'mode': True, 'id': "mistralai/ministral-8b", 'name': "Ministral 8B"},
+ 'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct", 'name': "Mistral 7B Instruct"},
+ 'Mistral 7B Instruct v0.1': {'mode': True, 'id': "mistralai/mistral-7b-instruct-v0.1", 'name': "Mistral 7B Instruct v0.1"},
+ 'Mistral 7B Instruct v0.2': {'mode': True, 'id': "mistralai/mistral-7b-instruct-v0.2", 'name': "Mistral 7B Instruct v0.2"},
+ 'Mistral 7B Instruct v0.3': {'mode': True, 'id': "mistralai/mistral-7b-instruct-v0.3", 'name': "Mistral 7B Instruct v0.3"},
+ 'Mistral Large': {'mode': True, 'id': "mistralai/mistral-large", 'name': "Mistral Large"},
+ 'Mistral Large 2407': {'mode': True, 'id': "mistralai/mistral-large-2407", 'name': "Mistral Large 2407"},
+ 'Mistral Large 2411': {'mode': True, 'id': "mistralai/mistral-large-2411", 'name': "Mistral Large 2411"},
+ 'Mistral Medium': {'mode': True, 'id': "mistralai/mistral-medium", 'name': "Mistral Medium"},
+ 'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo", 'name': "Mistral Nemo"},
+ 'Mistral Nemo 12B Celeste': {'mode': True, 'id': "nothingiisreal/mn-celeste-12b", 'name': "Mistral Nemo 12B Celeste"},
+ 'Mistral Nemo Inferor 12B': {'mode': True, 'id': "infermatic/mn-inferor-12b", 'name': "Mistral Nemo Inferor 12B"},
+ 'Mistral OpenOrca 7B': {'mode': True, 'id': "open-orca/mistral-7b-openorca", 'name': "Mistral OpenOrca 7B"},
+ 'Mistral Small': {'mode': True, 'id': "mistralai/mistral-small", 'name': "Mistral Small"},
+ 'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501", 'name': "Mistral Small 3"},
+ 'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct", 'name': "Mistral Small 3.1 24B"},
+ 'Mistral Tiny': {'mode': True, 'id': "mistralai/mistral-tiny", 'name': "Mistral Tiny"},
+ 'Mixtral 8x22B (base)': {'mode': True, 'id': "mistralai/mixtral-8x22b", 'name': "Mixtral 8x22B (base)"},
+ 'Mixtral 8x22B Instruct': {'mode': True, 'id': "mistralai/mixtral-8x22b-instruct", 'name': "Mixtral 8x22B Instruct"},
+ 'Mixtral 8x7B Instruct': {'mode': True, 'id': "mistralai/mixtral-8x7b-instruct", 'name': "Mixtral 8x7B Instruct"},
+ 'Mythalion 13B': {'mode': True, 'id': "pygmalionai/mythalion-13b", 'name': "Mythalion 13B"},
+ 'MythoMax 13B': {'mode': True, 'id': "gryphe/mythomax-l2-13b", 'name': "MythoMax 13B"},
+ 'MythoMist 7B': {'mode': True, 'id': "gryphe/mythomist-7b", 'name': "MythoMist 7B"},
+ 'Nemotron-4 340B Instruct': {'mode': True, 'id': "nvidia/nemotron-4-340b-instruct", 'name': "Nemotron-4 340B Instruct"},
+ 'Neural Chat 7B v3.1': {'mode': True, 'id': "intel/neural-chat-7b", 'name': "Neural Chat 7B v3.1"},
+ 'Noromaid 20B': {'mode': True, 'id': "neversleep/noromaid-20b", 'name': "Noromaid 20B"},
+ 'Noromaid Mixtral 8x7B Instruct': {'mode': True, 'id': "neversleep/noromaid-mixtral-8x7b-instruct", 'name': "Noromaid Mixtral 8x7B Instruct"},
+ 'Nova Lite 1.0': {'mode': True, 'id': "amazon/nova-lite-v1", 'name': "Nova Lite 1.0"},
+ 'Nova Micro 1.0': {'mode': True, 'id': "amazon/nova-micro-v1", 'name': "Nova Micro 1.0"},
+ 'Nova Pro 1.0': {'mode': True, 'id': "amazon/nova-pro-v1", 'name': "Nova Pro 1.0"},
+ 'o1': {'mode': True, 'id': "openai/o1", 'name': "o1"},
+ 'o1-mini': {'mode': True, 'id': "openai/o1-mini", 'name': "o1-mini"},
+ 'o1-mini (2024-09-12)': {'mode': True, 'id': "openai/o1-mini-2024-09-12", 'name': "o1-mini (2024-09-12)"},
+ 'o1-preview': {'mode': True, 'id': "openai/o1-preview", 'name': "o1-preview"},
+ 'o1-preview (2024-09-12)': {'mode': True, 'id': "openai/o1-preview-2024-09-12", 'name': "o1-preview (2024-09-12)"},
+ 'o1-pro': {'mode': True, 'id': "openai/o1-pro", 'name': "o1-pro"},
+ 'o3 Mini': {'mode': True, 'id': "openai/o3-mini", 'name': "o3 Mini"},
+ 'o3 Mini High': {'mode': True, 'id': "openai/o3-mini-high", 'name': "o3 Mini High"},
+ 'Olmo 2 32B Instruct': {'mode': True, 'id': "allenai/olmo-2-0325-32b-instruct", 'name': "Olmo 2 32B Instruct"},
+ 'OLMo 7B Instruct': {'mode': True, 'id': "allenai/olmo-7b-instruct", 'name': "OLMo 7B Instruct"},
+ 'OpenChat 3.5 7B': {'mode': True, 'id': "openchat/openchat-7b", 'name': "OpenChat 3.5 7B"},
+ 'OpenChat 3.6 8B': {'mode': True, 'id': "openchat/openchat-8b", 'name': "OpenChat 3.6 8B"},
+ 'OpenHands LM 32B V0.1': {'mode': True, 'id': "all-hands/openhands-lm-32b-v0.1", 'name': "OpenHands LM 32B V0.1"},
+ 'OpenHermes 2 Mistral 7B': {'mode': True, 'id': "teknium/openhermes-2-mistral-7b", 'name': "OpenHermes 2 Mistral 7B"},
+ 'OpenHermes 2.5 Mistral 7B': {'mode': True, 'id': "teknium/openhermes-2.5-mistral-7b", 'name': "OpenHermes 2.5 Mistral 7B"},
+ 'Optimus Alpha': {'mode': True, 'id': "openrouter/optimus-alpha", 'name': "Optimus Alpha"},
+ 'PaLM 2 Chat': {'mode': True, 'id': "google/palm-2-chat-bison", 'name': "PaLM 2 Chat"},
+ 'PaLM 2 Chat 32k': {'mode': True, 'id': "google/palm-2-chat-bison-32k", 'name': "PaLM 2 Chat 32k"},
+ 'PaLM 2 Code Chat': {'mode': True, 'id': "google/palm-2-codechat-bison", 'name': "PaLM 2 Code Chat"},
+ 'PaLM 2 Code Chat 32k': {'mode': True, 'id': "google/palm-2-codechat-bison-32k", 'name': "PaLM 2 Code Chat 32k"},
+ 'Phi 4': {'mode': True, 'id': "microsoft/phi-4", 'name': "Phi 4"},
+ 'Phi 4 Multimodal Instruct': {'mode': True, 'id': "microsoft/phi-4-multimodal-instruct", 'name': "Phi 4 Multimodal Instruct"},
+ 'Phi-3 Medium 128K Instruct': {'mode': True, 'id': "microsoft/phi-3-medium-128k-instruct", 'name': "Phi-3 Medium 128K Instruct"},
+ 'Phi-3 Medium 4K Instruct': {'mode': True, 'id': "microsoft/phi-3-medium-4k-instruct", 'name': "Phi-3 Medium 4K Instruct"},
+ 'Phi-3 Mini 128K Instruct': {'mode': True, 'id': "microsoft/phi-3-mini-128k-instruct", 'name': "Phi-3 Mini 128K Instruct"},
+ 'Phi-3.5 Mini 128K Instruct': {'mode': True, 'id': "microsoft/phi-3.5-mini-128k-instruct", 'name': "Phi-3.5 Mini 128K Instruct"},
+ 'Pixtral 12B': {'mode': True, 'id': "mistralai/pixtral-12b", 'name': "Pixtral 12B"},
+ 'Pixtral Large 2411': {'mode': True, 'id': "mistralai/pixtral-large-2411", 'name': "Pixtral Large 2411"},
+ 'Psyfighter 13B': {'mode': True, 'id': "jebcarter/psyfighter-13b", 'name': "Psyfighter 13B"},
+ 'Psyfighter v2 13B': {'mode': True, 'id': "koboldai/psyfighter-13b-2", 'name': "Psyfighter v2 13B"},
+ 'Quasar Alpha': {'mode': True, 'id': "openrouter/quasar-alpha", 'name': "Quasar Alpha"},
+ 'Qwen 1.5 110B Chat': {'mode': True, 'id': "qwen/qwen-110b-chat", 'name': "Qwen 1.5 110B Chat"},
+ 'Qwen 1.5 14B Chat': {'mode': True, 'id': "qwen/qwen-14b-chat", 'name': "Qwen 1.5 14B Chat"},
+ 'Qwen 1.5 32B Chat': {'mode': True, 'id': "qwen/qwen-32b-chat", 'name': "Qwen 1.5 32B Chat"},
+ 'Qwen 1.5 4B Chat': {'mode': True, 'id': "qwen/qwen-4b-chat", 'name': "Qwen 1.5 4B Chat"},
+ 'Qwen 1.5 72B Chat': {'mode': True, 'id': "qwen/qwen-72b-chat", 'name': "Qwen 1.5 72B Chat"},
+ 'Qwen 1.5 7B Chat': {'mode': True, 'id': "qwen/qwen-7b-chat", 'name': "Qwen 1.5 7B Chat"},
+ 'Qwen 2 72B Instruct': {'mode': True, 'id': "qwen/qwen-2-72b-instruct", 'name': "Qwen 2 72B Instruct"},
+ 'Qwen 2 7B Instruct': {'mode': True, 'id': "qwen/qwen-2-7b-instruct", 'name': "Qwen 2 7B Instruct"},
+ 'Qwen VL Max': {'mode': True, 'id': "qwen/qwen-vl-max", 'name': "Qwen VL Max"},
+ 'Qwen VL Plus': {'mode': True, 'id': "qwen/qwen-vl-plus", 'name': "Qwen VL Plus"},
+ 'Qwen-Max ': {'mode': True, 'id': "qwen/qwen-max", 'name': "Qwen-Max "},
+ 'Qwen-Plus': {'mode': True, 'id': "qwen/qwen-plus", 'name': "Qwen-Plus"},
+ 'Qwen-Turbo': {'mode': True, 'id': "qwen/qwen-turbo", 'name': "Qwen-Turbo"},
+ 'Qwen2.5 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-32b-instruct", 'name': "Qwen2.5 32B Instruct"},
+ 'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct", 'name': "Qwen2.5 72B Instruct"},
+ 'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct", 'name': "Qwen2.5 7B Instruct"},
+ 'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct", 'name': "Qwen2.5 Coder 32B Instruct"},
+ 'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct", 'name': "Qwen2.5 VL 32B Instruct"},
+ 'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct", 'name': "Qwen2.5 VL 72B Instruct"},
+ 'Qwen2.5-VL 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-72b-instruct", 'name': "Qwen2.5-VL 72B Instruct"},
+ 'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct", 'name': "Qwen2.5-VL 7B Instruct"},
+ 'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b", 'name': "QwQ 32B"},
+ 'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview", 'name': "QwQ 32B Preview"},
+ 'R1': {'mode': True, 'id': "deepseek/deepseek-r1", 'name': "R1"},
+ 'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b", 'name': "R1 Distill Llama 70B"},
+ 'R1 Distill Llama 8B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-8b", 'name': "R1 Distill Llama 8B"},
+ 'R1 Distill Qwen 1.5B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-1.5b", 'name': "R1 Distill Qwen 1.5B"},
+ 'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b", 'name': "R1 Distill Qwen 14B"},
+ 'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b", 'name': "R1 Distill Qwen 32B"},
+ 'Reflection 70B': {'mode': True, 'id': "mattshumer/reflection-70b", 'name': "Reflection 70B"},
+ 'ReMM SLERP 13B': {'mode': True, 'id': "undi95/remm-slerp-l2-13b", 'name': "ReMM SLERP 13B"},
+ 'Rocinante 12B': {'mode': True, 'id': "thedrummer/rocinante-12b", 'name': "Rocinante 12B"},
+ 'RWKV v5 3B AI Town': {'mode': True, 'id': "recursal/rwkv-5-3b-ai-town", 'name': "RWKV v5 3B AI Town"},
+ 'RWKV v5 World 3B': {'mode': True, 'id': "rwkv/rwkv-5-world-3b", 'name': "RWKV v5 World 3B"},
+ 'Saba': {'mode': True, 'id': "mistralai/mistral-saba", 'name': "Saba"},
+ 'Skyfall 36B V2': {'mode': True, 'id': "thedrummer/skyfall-36b-v2", 'name': "Skyfall 36B V2"},
+ 'SorcererLM 8x22B': {'mode': True, 'id': "raifle/sorcererlm-8x22b", 'name': "SorcererLM 8x22B"},
+ 'Starcannon 12B': {'mode': True, 'id': "aetherwiing/mn-starcannon-12b", 'name': "Starcannon 12B"},
+ 'StarCoder2 15B Instruct': {'mode': True, 'id': "bigcode/starcoder2-15b-instruct", 'name': "StarCoder2 15B Instruct"},
+ 'StripedHyena Hessian 7B (base)': {'mode': True, 'id': "togethercomputer/stripedhyena-hessian-7b", 'name': "StripedHyena Hessian 7B (base)"},
+ 'StripedHyena Nous 7B': {'mode': True, 'id': "togethercomputer/stripedhyena-nous-7b", 'name': "StripedHyena Nous 7B"},
+ 'Synthia 70B': {'mode': True, 'id': "migtissera/synthia-70b", 'name': "Synthia 70B"},
+ 'Toppy M 7B': {'mode': True, 'id': "undi95/toppy-m-7b", 'name': "Toppy M 7B"},
+ 'Typhoon2 70B Instruct': {'mode': True, 'id': "scb10x/llama3.1-typhoon2-70b-instruct", 'name': "Typhoon2 70B Instruct"},
+ 'Typhoon2 8B Instruct': {'mode': True, 'id': "scb10x/llama3.1-typhoon2-8b-instruct", 'name': "Typhoon2 8B Instruct"},
+ 'Unslopnemo 12B': {'mode': True, 'id': "thedrummer/unslopnemo-12b", 'name': "Unslopnemo 12B"},
+ 'Wayfarer Large 70B Llama 3.3': {'mode': True, 'id': "latitudegames/wayfarer-large-70b-llama-3.3", 'name': "Wayfarer Large 70B Llama 3.3"},
+ 'Weaver (alpha)': {'mode': True, 'id': "mancer/weaver", 'name': "Weaver (alpha)"},
+ 'WizardLM-2 7B': {'mode': True, 'id': "microsoft/wizardlm-2-7b", 'name': "WizardLM-2 7B"},
+ 'WizardLM-2 8x22B': {'mode': True, 'id': "microsoft/wizardlm-2-8x22b", 'name': "WizardLM-2 8x22B"},
+ 'Xwin 70B': {'mode': True, 'id': "xwin-lm/xwin-lm-70b", 'name': "Xwin 70B"},
+ 'Yi 1.5 34B Chat': {'mode': True, 'id': "01-ai/yi-1.5-34b-chat", 'name': "Yi 1.5 34B Chat"},
+ 'Yi 34B (base)': {'mode': True, 'id': "01-ai/yi-34b", 'name': "Yi 34B (base)"},
+ 'Yi 34B 200K': {'mode': True, 'id': "01-ai/yi-34b-200k", 'name': "Yi 34B 200K"},
+ 'Yi 34B Chat': {'mode': True, 'id': "01-ai/yi-34b-chat", 'name': "Yi 34B Chat"},
+ 'Yi 6B (base)': {'mode': True, 'id': "01-ai/yi-6b", 'name': "Yi 6B (base)"},
+ 'Yi Large': {'mode': True, 'id': "01-ai/yi-large", 'name': "Yi Large"},
+ 'Yi Large FC': {'mode': True, 'id': "01-ai/yi-large-fc", 'name': "Yi Large FC"},
+ 'Yi Large Turbo': {'mode': True, 'id': "01-ai/yi-large-turbo", 'name': "Yi Large Turbo"},
+ 'Yi Vision': {'mode': True, 'id': "01-ai/yi-vision", 'name': "Yi Vision"},
+ 'Zephyr 141B-A35B': {'mode': True, 'id': "huggingfaceh4/zephyr-orpo-141b-a35b", 'name': "Zephyr 141B-A35B"},
+
+ # Default
+ 'GPT-4o': {'mode': True, 'id': "GPT-4o", 'name': "GPT-4o"},
+ 'Gemini-PRO': {'mode': True, 'id': "Gemini-PRO", 'name': "Gemini-PRO"},
+ 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
+ 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
+ 'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
+ 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
+ 'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
+ 'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
+ 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
+ 'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
+ 'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
+ 'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
+ }
+
+ # Trending agent modes
+ trendingAgentMode = {
+ 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
+ "Gemini Agent": {'mode': True, 'id': 'gemini'},
+ "llama-3.1-405 Agent": {'mode': True, 'id': "llama-3.1-405"},
+ 'llama-3.1-70b Agent': {'mode': True, 'id': "llama-3.1-70b"},
+ 'llama-3.1-8b Agent': {'mode': True, 'id': "llama-3.1-8b"},
+ 'Python Agent': {'mode': True, 'id': "python"},
+ 'HTML Agent': {'mode': True, 'id': "html"},
+ 'Builder Agent': {'mode': True, 'id': "builder"},
+ 'Java Agent': {'mode': True, 'id': "java"},
+ 'JavaScript Agent': {'mode': True, 'id': "javascript"},
+ 'React Agent': {'mode': True, 'id': "react"},
+ 'Android Agent': {'mode': True, 'id': "android"},
+ 'Flutter Agent': {'mode': True, 'id': "flutter"},
+ 'Next.js Agent': {'mode': True, 'id': "next.js"},
+ 'AngularJS Agent': {'mode': True, 'id': "angularjs"},
+ 'Swift Agent': {'mode': True, 'id': "swift"},
+ 'MongoDB Agent': {'mode': True, 'id': "mongodb"},
+ 'PyTorch Agent': {'mode': True, 'id': "pytorch"},
+ 'Xcode Agent': {'mode': True, 'id': "xcode"},
+ 'Azure Agent': {'mode': True, 'id': "azure"},
+ 'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
+ 'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
+ 'Docker Agent': {'mode': True, 'id': "docker"},
+ 'Electron Agent': {'mode': True, 'id': "electron"},
+ 'Erlang Agent': {'mode': True, 'id': "erlang"},
+ 'FastAPI Agent': {'mode': True, 'id': "fastapi"},
+ 'Firebase Agent': {'mode': True, 'id': "firebase"},
+ 'Flask Agent': {'mode': True, 'id': "flask"},
+ 'Git Agent': {'mode': True, 'id': "git"},
+ 'Gitlab Agent': {'mode': True, 'id': "gitlab"},
+ 'Go Agent': {'mode': True, 'id': "go"},
+ 'Godot Agent': {'mode': True, 'id': "godot"},
+ 'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
+ 'Heroku Agent': {'mode': True, 'id': "heroku"},
+ }
+
+ # Complete list of all models (for authorized users)
+ _all_models = list(dict.fromkeys([
+ *fallback_models, # Include all free models
+ *premium_models, # Include all premium models
+ *openrouter_pro_models, # Include all OpenRouter Pro models
+ *image_models,
+ *list(agentMode.keys()),
+ *list(trendingAgentMode.keys())
+ ]))
+
+ # Initialize models with fallback_models
+ models = fallback_models
+
+ @classmethod
+ async def get_models_async(cls) -> list:
+ """
+ Asynchronous version of get_models that checks subscription status.
+ Returns a list of available models based on subscription status.
+ Premium users get the full list of models.
+ Free users get fallback_models.
+ """
+ # Check if there are valid session data in HAR files
+ session_data = cls._find_session_in_har_files()
+
+ if not session_data:
+ # For users without HAR files - return free models
+ debug.log(f"BlackboxPro: Returning free model list with {len(cls.fallback_models)} models")
+ return cls.fallback_models
+
+ # For accounts with HAR files, check subscription status
+ if 'user' in session_data and 'email' in session_data['user']:
+ subscription = await cls.check_subscription(session_data['user']['email'])
+ if subscription['status'] == "PREMIUM":
+ debug.log(f"BlackboxPro: Returning premium model list with {len(cls._all_models)} models")
+ return cls._all_models
+
+ # For free accounts - return free models
+ debug.log(f"BlackboxPro: Returning free model list with {len(cls.fallback_models)} models")
+ return cls.fallback_models
+
+ @classmethod
+ def get_models(cls) -> list:
+ """
+ Returns a list of available models based on authorization status.
+ Authorized users get the full list of models.
+ Free users get fallback_models.
+
+ Note: This is a synchronous method that can't check subscription status,
+ so it falls back to the basic premium access check.
+ For more accurate results, use get_models_async when possible.
+ """
+ # Check if there are valid session data in HAR files
+ session_data = cls._find_session_in_har_files()
+
+ if not session_data:
+ # For users without HAR files - return free models
+ debug.log(f"BlackboxPro: Returning free model list with {len(cls.fallback_models)} models")
+ return cls.fallback_models
+
+ # For accounts with HAR files, check premium access
+ has_premium_access = cls._check_premium_access()
+
+ if has_premium_access:
+ # For premium users - all models
+ debug.log(f"BlackboxPro: Returning premium model list with {len(cls._all_models)} models")
+ return cls._all_models
+
+ # For free accounts - return free models
+ debug.log(f"BlackboxPro: Returning free model list with {len(cls.fallback_models)} models")
+ return cls.fallback_models
+
+ @classmethod
+ async def check_subscription(cls, email: str) -> dict:
+ """
+ Check subscription status for a given email using the Blackbox API.
+
+ Args:
+ email: The email to check subscription for
+
+ Returns:
+ dict: Subscription status information with keys:
+ - status: "PREMIUM" or "FREE"
+ - customerId: Customer ID if available
+ - isTrialSubscription: Whether this is a trial subscription
+ """
+ if not email:
+ return {"status": "FREE", "customerId": None, "isTrialSubscription": False, "lastChecked": None}
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.blackbox.ai',
+ 'referer': 'https://www.blackbox.ai/?ref=login-success',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
+ }
+
+ try:
+ async with ClientSession(headers=headers) as session:
+ async with session.post(
+ 'https://www.blackbox.ai/api/check-subscription',
+ json={"email": email}
+ ) as response:
+ if response.status != 200:
+ debug.log(f"BlackboxPro: Subscription check failed with status {response.status}")
+ return {"status": "FREE", "customerId": None, "isTrialSubscription": False, "lastChecked": None}
+
+ result = await response.json()
+ status = "PREMIUM" if result.get("hasActiveSubscription", False) else "FREE"
+
+ return {
+ "status": status,
+ "customerId": result.get("customerId"),
+ "isTrialSubscription": result.get("isTrialSubscription", False),
+ "lastChecked": result.get("lastChecked")
+ }
+ except Exception as e:
+ debug.log(f"BlackboxPro: Error checking subscription: {e}")
+ return {"status": "FREE", "customerId": None, "isTrialSubscription": False, "lastChecked": None}
+
+ @classmethod
+ def _check_premium_access(cls) -> bool:
+ """
+ Checks for an authorized session in HAR files.
+ Returns True if a valid session is found.
+ """
+ try:
+ session_data = cls._find_session_in_har_files()
+ if not session_data:
+ return False
+
+ # Check if this is a premium session
+ return True
+ except Exception as e:
+ debug.log(f"BlackboxPro: Error checking premium access: {e}")
+ return False
+
+ @classmethod
+ def _find_session_in_har_files(cls) -> Optional[dict]:
+ """
+ Search for valid session data in HAR files.
+
+ Returns:
+ Optional[dict]: Session data if found, None otherwise
+ """
+ try:
+ for file in get_har_files():
+ try:
+ with open(file, 'rb') as f:
+ har_data = json.load(f)
+
+ for entry in har_data['log']['entries']:
+ # Only look at blackbox API responses
+ if 'blackbox.ai/api' in entry['request']['url']:
+ # Look for a response that has the right structure
+ if 'response' in entry and 'content' in entry['response']:
+ content = entry['response']['content']
+ # Look for both regular and Google auth session formats
+ if ('text' in content and
+ isinstance(content['text'], str) and
+ '"user"' in content['text'] and
+ '"email"' in content['text'] and
+ '"expires"' in content['text']):
+ try:
+ # Remove any HTML or other non-JSON content
+ text = content['text'].strip()
+ if text.startswith('{') and text.endswith('}'):
+ # Replace escaped quotes
+ text = text.replace('\\"', '"')
+ har_session = json.loads(text)
+
+ # Check if this is a valid session object
+ if (isinstance(har_session, dict) and
+ 'user' in har_session and
+ 'email' in har_session['user'] and
+ 'expires' in har_session):
+
+ debug.log(f"BlackboxPro: Found session in HAR file: {file}")
+ return har_session
+ except json.JSONDecodeError as e:
+ # Only print error for entries that truly look like session data
+ if ('"user"' in content['text'] and
+ '"email"' in content['text']):
+ debug.log(f"BlackboxPro: Error parsing likely session data: {e}")
+ except Exception as e:
+ debug.log(f"BlackboxPro: Error reading HAR file {file}: {e}")
+ return None
+ except NoValidHarFileError:
+ pass
+ except Exception as e:
+ debug.log(f"BlackboxPro: Error searching HAR files: {e}")
+ return None
+
+ @classmethod
+ async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
+ cache_file = Path(get_cookies_dir()) / 'blackbox.json'
+
+ if not force_refresh and cache_file.exists():
+ try:
+ with open(cache_file, 'r') as f:
+ data = json.load(f)
+ if data.get('validated_value'):
+ return data['validated_value']
+ except Exception as e:
+ debug.log(f"BlackboxPro: Error reading cache: {e}")
+
+ js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js'
+ uuid_pattern = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']'
+
+ def is_valid_context(text: str) -> bool:
+ return any(char + '=' in text for char in 'abcdefghijklmnopqrstuvwxyz')
+
+ async with ClientSession() as session:
+ try:
+ async with session.get(url) as response:
+ if response.status != 200:
+ return None
+
+ page_content = await response.text()
+ js_files = re.findall(js_file_pattern, page_content)
+
+ for js_file in js_files:
+ js_url = f"{url}/_next/{js_file}"
+ async with session.get(js_url) as js_response:
+ if js_response.status == 200:
+ js_content = await js_response.text()
+ for match in re.finditer(uuid_pattern, js_content):
+ start = max(0, match.start() - 10)
+ end = min(len(js_content), match.end() + 10)
+ context = js_content[start:end]
+
+ if is_valid_context(context):
+ validated_value = match.group(1)
+
+ cache_file.parent.mkdir(exist_ok=True)
+ try:
+ with open(cache_file, 'w') as f:
+ json.dump({'validated_value': validated_value}, f)
+ except Exception as e:
+ debug.log(f"BlackboxPro: Error writing cache: {e}")
+
+ return validated_value
+
+ except Exception as e:
+ debug.log(f"BlackboxPro: Error retrieving validated_value: {e}")
+
+ return None
+
+ @classmethod
+ def generate_id(cls, length: int = 7) -> str:
+ chars = string.ascii_letters + string.digits
+ return ''.join(random.choice(chars) for _ in range(length))
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ prompt: str = None,
+ proxy: str = None,
+ media: MediaListType = None,
+ top_p: float = None,
+ temperature: float = None,
+ max_tokens: int = None,
+ conversation: Conversation = None,
+ return_conversation: bool = True,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.blackbox.ai',
+ 'referer': 'https://www.blackbox.ai/',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if conversation is None or not hasattr(conversation, "chat_id"):
+ conversation = Conversation(model)
+ conversation.validated_value = await cls.fetch_validated()
+ conversation.chat_id = cls.generate_id()
+ conversation.message_history = []
+
+ current_messages = []
+ for i, msg in enumerate(render_messages(messages)):
+ msg_id = conversation.chat_id if i == 0 and msg["role"] == "user" else cls.generate_id()
+ current_msg = {
+ "id": msg_id,
+ "content": msg["content"],
+ "role": msg["role"]
+ }
+ current_messages.append(current_msg)
+
+ media = list(merge_media(media, messages))
+ if media:
+ current_messages[-1]['data'] = {
+ "imagesData": [
+ {
+ "filePath": f"/{image_name}",
+ "contents": to_data_uri(image)
+ }
+ for image, image_name in media
+ ],
+ "fileText": "",
+ "title": ""
+ }
+
+ # Get session data from HAR files
+ session_data = cls._find_session_in_har_files()
+
+ # Check if we have a valid session
+ if not session_data:
+ # No valid session found, raise an error
+ debug.log("BlackboxPro: No valid session found in HAR files")
+ raise NoValidHarFileError("No valid Blackbox session found. Please log in to Blackbox AI in your browser first.")
+
+ debug.log(f"BlackboxPro: Using session from HAR file (email: {session_data['user'].get('email', 'unknown')})")
+
+ # Check subscription status
+ subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False, "lastChecked": None}
+ if session_data.get('user', {}).get('email'):
+ subscription_status = await cls.check_subscription(session_data['user']['email'])
+ debug.log(f"BlackboxPro: Subscription status for {session_data['user']['email']}: {subscription_status['status']}")
+
+ # Determine if user has premium access based on subscription status
+ if subscription_status['status'] == "PREMIUM":
+ is_premium = True
+ else:
+ # For free accounts, check if the requested model is in fallback_models
+ is_premium = model in cls.fallback_models
+ if not is_premium:
+ debug.log(f"BlackboxPro: Model {model} not available in free account, falling back to default model")
+ model = cls.default_model
+ is_premium = True
+
+ data = {
+ "messages": current_messages,
+ "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
+ "id": conversation.chat_id,
+ "previewToken": None,
+ "userId": None,
+ "codeModelMode": True,
+ "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
+ "isMicMode": False,
+ "userSystemPrompt": None,
+ "maxTokens": max_tokens,
+ "playgroundTopP": top_p,
+ "playgroundTemperature": temperature,
+ "isChromeExt": False,
+ "githubToken": "",
+ "clickedAnswer2": False,
+ "clickedAnswer3": False,
+ "clickedForceWebSearch": False,
+ "visitFromDelta": False,
+ "isMemoryEnabled": False,
+ "mobileClient": False,
+ "userSelectedModel": model if model in cls.userSelectedModel else None,
+ "validated": conversation.validated_value,
+ "imageGenerationMode": model == cls.default_image_model,
+ "webSearchModePrompt": False,
+ "deepSearchMode": False,
+ "designerMode": False,
+ "domains": None,
+ "vscodeClient": False,
+ "codeInterpreterMode": False,
+ "customProfile": {
+ "name": "",
+ "occupation": "",
+ "traits": [],
+ "additionalInfo": "",
+ "enableNewChats": False
+ },
+ "session": session_data,
+ "isPremium": is_premium,
+ "subscriptionCache": {
+ "status": subscription_status['status'],
+ "customerId": subscription_status['customerId'],
+ "isTrialSubscription": subscription_status['isTrialSubscription'],
+ "lastChecked": int(datetime.now().timestamp() * 1000)
+ },
+ "beastMode": False,
+ "reasoningMode": False,
+ "webSearchMode": False
+ }
+
+ # Continue with the API request and async generator behavior
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+
+ # Collect the full response
+ full_response = []
+ async for chunk in response.content.iter_any():
+ if chunk:
+ chunk_text = chunk.decode()
+ if "You have reached your request limit for the hour" in chunk_text:
+ raise RateLimitError(chunk_text)
+ full_response.append(chunk_text)
+ # Only yield chunks for non-image models
+ if model != cls.default_image_model:
+ yield chunk_text
+
+ full_response_text = ''.join(full_response)
+
+ # For image models, check for image markdown
+ if model == cls.default_image_model:
+ image_url_match = re.search(r'!\[.*?\]\((.*?)\)', full_response_text)
+ if image_url_match:
+ image_url = image_url_match.group(1)
+ yield ImageResponse(urls=[image_url], alt=format_image_prompt(messages, prompt))
+ return
+
+ # Handle conversation history once, in one place
+ if return_conversation:
+ conversation.message_history.append({"role": "assistant", "content": full_response_text})
+ yield conversation
+ # For image models that didn't produce an image, fall back to text response
+ elif model == cls.default_image_model:
+ yield full_response_text
diff --git a/g4f/Provider/needs_auth/GigaChat.py b/g4f/Provider/needs_auth/GigaChat.py
index d5066f46..33491f3b 100644
--- a/g4f/Provider/needs_auth/GigaChat.py
+++ b/g4f/Provider/needs_auth/GigaChat.py
@@ -64,9 +64,8 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_stream = True
needs_auth = True
- default_model = "GigaChat:latest"
- models = [default_model, "GigaChat-Plus", "GigaChat-Pro"]
- model_aliases = {"gigachat": default_model}
+ default_model = "GigaChat"
+ models = ["GigaChat-2", "GigaChat-2-Pro", "GigaChat-2-Max", default_model, "GigaChat-Pro", "GigaChat-Max"]
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 81cbf9a2..982982bb 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -1,5 +1,6 @@
from .Anthropic import Anthropic
from .BingCreateImages import BingCreateImages
+from .BlackboxPro import BlackboxPro
from .CablyAI import CablyAI
from .Cerebras import Cerebras
from .CopilotAccount import CopilotAccount
diff --git a/g4f/Provider/AllenAI.py b/g4f/Provider/not_working/AllenAI.py
similarity index 94%
rename from g4f/Provider/AllenAI.py
rename to g4f/Provider/not_working/AllenAI.py
index 5b99234f..dcdb90fb 100644
--- a/g4f/Provider/AllenAI.py
+++ b/g4f/Provider/not_working/AllenAI.py
@@ -2,13 +2,13 @@ from __future__ import annotations
import json
from uuid import uuid4
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages, MediaListType
-from ..image import to_bytes, is_accepted_format, to_data_uri
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..requests.raise_for_status import raise_for_status
-from ..providers.response import FinishReason, JsonConversation
-from .helper import format_prompt, get_last_user_message, format_image_prompt
-from ..tools.media import merge_media
+from ...typing import AsyncResult, Messages, MediaListType
+from ...image import to_bytes, is_accepted_format, to_data_uri
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...requests.raise_for_status import raise_for_status
+from ...providers.response import FinishReason, JsonConversation
+from ..helper import format_prompt, get_last_user_message, format_image_prompt
+from ...tools.media import merge_media
class Conversation(JsonConversation):
@@ -29,7 +29,7 @@ class AllenAI(AsyncGeneratorProvider, ProviderModelMixin):
login_url = None
api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
- working = True
+ working = False
needs_auth = False
use_nodriver = False
supports_stream = True
diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/not_working/ChatGptEs.py
similarity index 95%
rename from g4f/Provider/ChatGptEs.py
rename to g4f/Provider/not_working/ChatGptEs.py
index 90866464..4ec41390 100644
--- a/g4f/Provider/ChatGptEs.py
+++ b/g4f/Provider/not_working/ChatGptEs.py
@@ -10,16 +10,16 @@ try:
except ImportError:
has_curl_cffi = False
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-from ..errors import MissingRequirementsError
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+from ...errors import MissingRequirementsError
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgpt.es"
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
- working = True
+ working = False
supports_stream = True
supports_system_message = False
supports_message_history = False
diff --git a/g4f/Provider/FreeRouter.py b/g4f/Provider/not_working/FreeRouter.py
similarity index 77%
rename from g4f/Provider/FreeRouter.py
rename to g4f/Provider/not_working/FreeRouter.py
index f7283895..7e1a5554 100644
--- a/g4f/Provider/FreeRouter.py
+++ b/g4f/Provider/not_working/FreeRouter.py
@@ -1,9 +1,9 @@
from __future__ import annotations
-from .template import OpenaiTemplate
+from ..template import OpenaiTemplate
class FreeRouter(OpenaiTemplate):
label = "CablyAI FreeRouter"
url = "https://freerouter.cablyai.com"
api_base = "https://freerouter.cablyai.com/v1"
- working = True
\ No newline at end of file
+ working = True
diff --git a/g4f/Provider/Glider.py b/g4f/Provider/not_working/Glider.py
similarity index 89%
rename from g4f/Provider/Glider.py
rename to g4f/Provider/not_working/Glider.py
index 61f8c841..37295fe1 100644
--- a/g4f/Provider/Glider.py
+++ b/g4f/Provider/not_working/Glider.py
@@ -1,12 +1,12 @@
from __future__ import annotations
-from .template import OpenaiTemplate
+from ..template import OpenaiTemplate
class Glider(OpenaiTemplate):
label = "Glider"
url = "https://glider.so"
api_endpoint = "https://glider.so/api/chat"
- working = True
+ working = False
default_model = 'chat-llama-3-1-70b'
models = [
@@ -21,4 +21,4 @@ class Glider(OpenaiTemplate):
"llama-3.1-8b": "chat-llama-3-1-8b",
"llama-3.2-3b": "chat-llama-3-2-3b",
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
- }
\ No newline at end of file
+ }
diff --git a/g4f/Provider/not_working/RubiksAI.py b/g4f/Provider/not_working/RubiksAI.py
index 0f3610b4..4fce1ffe 100644
--- a/g4f/Provider/not_working/RubiksAI.py
+++ b/g4f/Provider/not_working/RubiksAI.py
@@ -17,7 +17,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://rubiks.ai"
api_endpoint = "https://rubiks.ai/search/api/"
- working = True
+ working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index 174d5557..432837ff 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -5,15 +5,19 @@ from .AiChats import AiChats
from .Airforce import Airforce
from .AutonomousAI import AutonomousAI
from .AIUncensored import AIUncensored
+from .AllenAI import AllenAI
from .AmigoChat import AmigoChat
from .Aura import Aura
from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online
+from .ChatGptEs import ChatGptEs
from .ChatgptFree import ChatgptFree
from .ChatGptt import ChatGptt
from .DarkAI import DarkAI
from .FlowGpt import FlowGpt
from .FreeNetfly import FreeNetfly
+from .FreeRouter import FreeRouter
+from .Glider import Glider
from .GPROChat import GPROChat
from .Koala import Koala
from .MagickPen import MagickPen
diff --git a/g4f/models.py b/g4f/models.py
index 5c7f9680..9d5fed38 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -5,11 +5,10 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
### No Auth Required ###
- AllenAI,
ARTA,
Blackbox,
+ Chatai,
ChatGLM,
- ChatGptEs,
Cloudflare,
Copilot,
DDG,
@@ -20,13 +19,9 @@ from .Provider import (
HuggingSpace,
Grok,
DeepseekAI_JanusPro7b,
- Glider,
- Goabror,
ImageLabs,
- Jmuz,
LambdaChat,
Liaobots,
- OIVSCode,
PerplexityLabs,
Pi,
PollinationsAI,
@@ -41,7 +36,6 @@ from .Provider import (
CopilotAccount,
Gemini,
GeminiPro,
- GigaChat,
HailuoAI,
HuggingChat,
HuggingFace,
@@ -89,21 +83,17 @@ default = Model(
name = "",
base_provider = "",
best_provider = IterListProvider([
- DDG,
Blackbox,
+ DDG,
Copilot,
DeepInfraChat,
- AllenAI,
PollinationsAI,
TypeGPT,
- OIVSCode,
- ChatGptEs,
Free2GPT,
FreeGpt,
- Glider,
Dynaspark,
+ Chatai,
OpenaiChat,
- Jmuz,
Cloudflare,
])
)
@@ -113,12 +103,10 @@ default_vision = VisionModel(
base_provider = "",
best_provider = IterListProvider([
Blackbox,
- OIVSCode,
TypeGPT,
DeepInfraChat,
PollinationsAI,
Dynaspark,
- AllenAI,
HuggingSpace,
GeminiPro,
HuggingFaceAPI,
@@ -133,30 +121,49 @@ default_vision = VisionModel(
##########################
### OpenAI ###
-# gpt-3.5
-gpt_3_5_turbo = Model(
- name = 'gpt-3.5-turbo',
- base_provider = 'OpenAI'
-)
-
# gpt-4
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
- best_provider = IterListProvider([DDG, Jmuz, ChatGptEs, PollinationsAI, Yqcloud, Goabror, Copilot, OpenaiChat, Liaobots])
+ best_provider = IterListProvider([Blackbox, DDG, PollinationsAI, Copilot, Yqcloud, Liaobots, OpenaiChat])
+)
+
+gpt_4_turbo = Model(
+ name = 'gpt-4-turbo',
+ base_provider = 'OpenAI',
+ best_provider = Liaobots
+)
+
+# gpt-4.1
+gpt_4_1 = Model(
+ name = 'gpt-4.1',
+ base_provider = 'OpenAI',
+ best_provider = IterListProvider([PollinationsAI, Liaobots])
+)
+
+gpt_4_1_mini = Model(
+ name = 'gpt-4.1-mini',
+ base_provider = 'OpenAI',
+ best_provider = IterListProvider([PollinationsAI, Liaobots])
+)
+
+gpt_4_1_nano = Model(
+ name = 'gpt-4.1-nano',
+ base_provider = 'OpenAI',
+ best_provider = IterListProvider([Blackbox, PollinationsAI])
)
# gpt-4o
gpt_4o = VisionModel(
name = 'gpt-4o',
base_provider = 'OpenAI',
- best_provider = IterListProvider([Blackbox, Jmuz, ChatGptEs, PollinationsAI, Liaobots, OpenaiChat])
+ best_provider = IterListProvider([Blackbox, PollinationsAI, Liaobots, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
- best_provider = IterListProvider([DDG, Blackbox, ChatGptEs, TypeGPT, PollinationsAI, OIVSCode, Liaobots, Jmuz, OpenaiChat])
+ best_provider = IterListProvider([Blackbox, DDG, PollinationsAI, TypeGPT, Chatai, Liaobots, OpenaiChat])
)
gpt_4o_audio = AudioModel(
@@ -169,7 +176,7 @@ gpt_4o_audio = AudioModel(
o1 = Model(
name = 'o1',
base_provider = 'OpenAI',
- best_provider = IterListProvider([Blackbox, Copilot, OpenaiAccount])
+ best_provider = IterListProvider([Copilot, OpenaiAccount])
)
o1_mini = Model(
@@ -185,11 +192,11 @@ o3_mini = Model(
best_provider = IterListProvider([DDG, Blackbox, Liaobots])
)
-### GigaChat ###
-gigachat = Model(
- name = 'gigachat',
- base_provider = 'gigachat',
- best_provider = GigaChat
+# o4
+o4_mini = Model(
+ name = 'o4-mini',
+ base_provider = 'OpenAI',
+ best_provider = PollinationsAI
)
### Meta ###
@@ -199,85 +206,92 @@ meta = Model(
best_provider = MetaAI
)
-### llama 2-4 ###
+# llama 2
llama_2_7b = Model(
name = "llama-2-7b",
base_provider = "Meta Llama",
best_provider = Cloudflare
)
+# llama 3
llama_3_8b = Model(
name = "llama-3-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Jmuz, Cloudflare])
+ best_provider = Cloudflare
)
-llama_3_70b = Model(
- name = "llama-3-70b",
- base_provider = "Meta Llama",
- best_provider = Jmuz
-)
+# llama 3.1
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DeepInfraChat, Glider, PollinationsAI, AllenAI, Jmuz, Cloudflare])
-)
-
-llama_3_1_70b = Model(
- name = "llama-3.1-70b",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([Glider, AllenAI, Jmuz])
-)
-
-llama_3_1_405b = Model(
- name = "llama-3.1-405b",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([AllenAI, Jmuz])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, Cloudflare])
)
+# llama 3.2
llama_3_2_1b = Model(
name = "llama-3.2-1b",
base_provider = "Meta Llama",
- best_provider = Cloudflare
+ best_provider = IterListProvider([Blackbox, Cloudflare])
)
llama_3_2_3b = Model(
name = "llama-3.2-3b",
base_provider = "Meta Llama",
- best_provider = Glider
+ best_provider = IterListProvider([Blackbox])
)
llama_3_2_11b = VisionModel(
name = "llama-3.2-11b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Jmuz, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([Blackbox, HuggingChat, HuggingFace])
)
llama_3_2_90b = Model(
name = "llama-3.2-90b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DeepInfraChat, Jmuz])
+ best_provider = DeepInfraChat
)
+# llama 3.3
llama_3_3_70b = Model(
name = "llama-3.3-70b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Blackbox, DDG, DeepInfraChat, LambdaChat, PollinationsAI, Jmuz, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([Blackbox, DDG, DeepInfraChat, LambdaChat, PollinationsAI, HuggingChat, HuggingFace])
)
+# llama 4
llama_4_scout = Model(
name = "llama-4-scout",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Cloudflare, PollinationsAI])
+ best_provider = IterListProvider([Blackbox, PollinationsAI, Cloudflare])
+)
+
+llama_4_scout_17b = Model(
+ name = "llama-4-scout-17b",
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([DeepInfraChat, PollinationsAI])
+)
+
+llama_4_maverick = Model(
+ name = "llama-4-maverick",
+ base_provider = "Meta Llama",
+ best_provider = IterListProvider([Blackbox, DeepInfraChat])
+)
+
+llama_4_maverick_17b = Model(
+ name = "llama-4-maverick-17b",
+ base_provider = "Meta Llama",
+ best_provider = DeepInfraChat
)
### Mistral ###
-mixtral_8x7b = Model(
- name = "mixtral-8x7b",
+mistral_7b = Model(
+ name = "mistral-7b",
base_provider = "Mistral",
- best_provider = Jmuz
+ best_provider = Blackbox
)
+
mixtral_8x22b = Model(
name = "mixtral-8x22b",
base_provider = "Mistral",
@@ -287,15 +301,21 @@ mixtral_8x22b = Model(
mistral_nemo = Model(
name = "mistral-nemo",
base_provider = "Mistral",
- best_provider = IterListProvider([PollinationsAI, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([Blackbox, HuggingChat, HuggingFace])
)
-mixtral_small_24b = Model(
- name = "mixtral-small-24b",
+mistral_small_24b = Model(
+ name = "mistral-small-24b",
base_provider = "Mistral",
best_provider = IterListProvider([Blackbox, DDG, DeepInfraChat])
)
+mistral_small_3_1_24b = Model(
+ name = "mistral-small-3.1-24b",
+ base_provider = "Mistral",
+ best_provider = IterListProvider([Blackbox, PollinationsAI])
+)
+
### NousResearch ###
hermes_3 = Model(
name = "hermes-3",
@@ -303,6 +323,18 @@ hermes_3 = Model(
best_provider = LambdaChat
)
+hermes_3_405b = Model(
+ name = "hermes-3-405b",
+ base_provider = "NousResearch",
+ best_provider = LambdaChat
+)
+
+deephermes_3_8b = Model(
+ name = "deephermes-3-8b",
+ base_provider = "NousResearch",
+ best_provider = Blackbox
+)
+
### Microsoft ###
# phi
phi_3_5_mini = Model(
@@ -317,6 +349,19 @@ phi_4 = Model(
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, HuggingSpace])
)
+phi_4_multimodal = VisionModel(
+ name = "phi-4-multimodal",
+ base_provider = "Microsoft",
+ best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
+)
+
+phi_4_reasoning_plus = Model(
+ name = "phi-4-reasoning-plus",
+ base_provider = "Microsoft",
+ best_provider = DeepInfraChat
+)
+
+
# wizardlm
wizardlm_2_7b = Model(
name = 'wizardlm-2-7b',
@@ -327,7 +372,7 @@ wizardlm_2_7b = Model(
wizardlm_2_8x22b = Model(
name = 'wizardlm-2-8x22b',
base_provider = 'Microsoft',
- best_provider = IterListProvider([DeepInfraChat, Jmuz])
+ best_provider = DeepInfraChat
)
### Google DeepMind ###
@@ -338,37 +383,37 @@ gemini = Model(
best_provider = Gemini
)
-# gemini-exp
-gemini_exp = Model(
- name = 'gemini-exp',
- base_provider = 'Google',
- best_provider = Jmuz
+# gemini-1.0
+gemini_1_0_pro = Model(
+ name = 'gemini-1.0-pro',
+ base_provider = 'Google DeepMind',
+ best_provider = Liaobots
)
# gemini-1.5
gemini_1_5_flash = Model(
name = 'gemini-1.5-flash',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([Free2GPT, FreeGpt, TeachAnything, Websim, Dynaspark, Jmuz, GeminiPro])
+ best_provider = IterListProvider([Free2GPT, FreeGpt, TeachAnything, Websim, Dynaspark, GeminiPro])
)
gemini_1_5_pro = Model(
name = 'gemini-1.5-pro',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([Free2GPT, FreeGpt, TeachAnything, Websim, Jmuz, GeminiPro])
+ best_provider = IterListProvider([Free2GPT, FreeGpt, TeachAnything, Websim, GeminiPro])
)
# gemini-2.0
gemini_2_0_flash = Model(
name = 'gemini-2.0-flash',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([Dynaspark, GeminiPro, Gemini])
+ best_provider = IterListProvider([Blackbox, Dynaspark, GeminiPro, Gemini, Liaobots])
)
gemini_2_0_flash_thinking = Model(
name = 'gemini-2.0-flash-thinking',
base_provider = 'Google DeepMind',
- best_provider = Gemini
+ best_provider = IterListProvider([PollinationsAI, Liaobots, Gemini])
)
gemini_2_0_flash_thinking_with_apps = Model(
@@ -377,19 +422,63 @@ gemini_2_0_flash_thinking_with_apps = Model(
best_provider = Gemini
)
+# gemini-2.5
+gemini_2_5_flash = Model(
+ name = 'gemini-2.5-flash',
+ base_provider = 'Google DeepMind',
+ best_provider = PollinationsAI
+)
+
+gemini_2_5_pro = Model(
+ name = 'gemini-2.5-pro',
+ base_provider = 'Google DeepMind',
+ best_provider = Liaobots
+)
+
+# gemma
+gemma_2_9b = Model(
+ name = 'gemma-2-9b',
+ base_provider = 'Google DeepMind',
+ best_provider = Blackbox
+)
+
+gemma_3_12b = Model(
+ name = 'gemma-3-12b',
+ base_provider = 'Google DeepMind',
+ best_provider = IterListProvider([Blackbox, DeepInfraChat])
+)
+
+gemma_3_1b = Model(
+ name = 'gemma-3-1b',
+ base_provider = 'Google DeepMind',
+ best_provider = Blackbox
+)
+
+gemma_3_27b = Model(
+ name = 'gemma-3-27b',
+ base_provider = 'Google DeepMind',
+ best_provider = IterListProvider([Blackbox, DeepInfraChat])
+)
+
+gemma_3_4b = Model(
+ name = 'gemma-3-4b',
+ base_provider = 'Google DeepMind',
+ best_provider = Blackbox
+)
+
### Anthropic ###
# claude 3
claude_3_haiku = Model(
name = 'claude-3-haiku',
base_provider = 'Anthropic',
- best_provider = IterListProvider([DDG, Jmuz])
+ best_provider = DDG
)
# claude 3.5
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
- best_provider = IterListProvider([Blackbox, Jmuz, Liaobots])
+ best_provider = IterListProvider([Blackbox, Liaobots])
)
# claude 3.7
@@ -406,15 +495,15 @@ reka_core = Model(
best_provider = Reka
)
-### Blackbox AI ###
-blackboxai = Model(
- name = 'blackboxai',
- base_provider = 'Blackbox AI',
+reka_flash = Model(
+ name = 'reka-flash',
+ base_provider = 'Reka AI',
best_provider = Blackbox
)
-blackboxai_pro = Model(
- name = 'blackboxai-pro',
+### Blackbox AI ###
+blackboxai = Model(
+ name = 'blackboxai',
base_provider = 'Blackbox AI',
best_provider = Blackbox
)
@@ -426,20 +515,38 @@ command_r = Model(
best_provider = HuggingSpace
)
+command_r_plus_08_2024 = Model(
+ name = 'command-r-plus-08-2024',
+ base_provider = 'CohereForAI',
+ best_provider = IterListProvider([PollinationsAI, HuggingSpace])
+)
+
+command_r_08_2024 = Model(
+ name = 'command-r-08-2024',
+ base_provider = 'CohereForAI',
+ best_provider = HuggingSpace
+)
+
command_r_plus = Model(
name = 'command-r-plus',
base_provider = 'CohereForAI',
best_provider = IterListProvider([HuggingSpace, HuggingChat])
)
-command_r7b = Model(
- name = 'command-r7b',
+command_r7b_12_2024 = Model(
+ name = 'command-r7b-12-2024',
base_provider = 'CohereForAI',
best_provider = HuggingSpace
)
-command_a = Model(
- name = 'command-a',
+command_r7b_arabic_02_2025 = Model(
+ name = 'command-r7b-arabic-02-2025',
+ base_provider = 'CohereForAI',
+ best_provider = HuggingSpace
+)
+
+command_a_03_2025 = Model(
+ name = 'command-a-03-2025',
base_provider = 'CohereForAI',
best_provider = HuggingSpace
)
@@ -471,15 +578,22 @@ qwen_2_5 = Model(
best_provider = HuggingSpace
)
+qwen_2_5_7b = Model(
+ name = 'qwen-2.5-7b',
+ base_provider = 'Qwen',
+ best_provider = Blackbox
+)
+
qwen_2_5_72b = Model(
name = 'qwen-2.5-72b',
base_provider = 'Qwen',
- best_provider = Jmuz
+ best_provider = Blackbox
)
+
qwen_2_5_coder_32b = Model(
name = 'qwen-2.5-coder-32b',
base_provider = 'Qwen',
- best_provider = IterListProvider([PollinationsAI, Jmuz, HuggingChat])
+ best_provider = IterListProvider([Blackbox, PollinationsAI, LambdaChat, HuggingChat])
)
qwen_2_5_1m = Model(
name = 'qwen-2.5-1m',
@@ -493,12 +607,92 @@ qwen_2_5_max = Model(
best_provider = HuggingSpace
)
+qwen_2_5_vl_3b = Model(
+ name = 'qwen-2.5-vl-3b',
+ base_provider = 'Qwen',
+ best_provider = Blackbox
+)
+
+qwen_2_5_vl_7b = Model(
+ name = 'qwen-2.5-vl-7b',
+ base_provider = 'Qwen',
+ best_provider = Blackbox
+)
+
+qwen_2_5_vl_32b = Model(
+ name = 'qwen-2.5-vl-32b',
+ base_provider = 'Qwen',
+ best_provider = Blackbox
+)
+
+qwen_2_5_vl_72b = Model(
+ name = 'qwen-2.5-vl-72b',
+ base_provider = 'Qwen',
+ best_provider = Blackbox
+)
+
+# qwen-3
+qwen_3_235b = Model(
+ name = 'qwen-3-235b',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([DeepInfraChat, HuggingSpace, Liaobots])
+)
+
+qwen_3_32b = Model(
+ name = 'qwen-3-32b',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
+)
+
+qwen_3_30b = Model(
+ name = 'qwen-3-30b',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
+)
+
+qwen_3_14b = Model(
+ name = 'qwen-3-14b',
+ base_provider = 'Qwen',
+ best_provider = IterListProvider([DeepInfraChat, HuggingSpace])
+)
+
+qwen_3_4b = Model(
+ name = 'qwen-3-4b',
+ base_provider = 'Qwen',
+ best_provider = HuggingSpace
+)
+
+qwen_3_1_7b = Model(
+ name = 'qwen-3-1.7b',
+ base_provider = 'Qwen',
+ best_provider = HuggingSpace
+)
+
+qwen_3_0_6b = Model(
+ name = 'qwen-3-0.6b',
+ base_provider = 'Qwen',
+ best_provider = HuggingSpace
+)
+
### qwq/qvq ###
qwq_32b = Model(
name = 'qwq-32b',
base_provider = 'Qwen',
- best_provider = IterListProvider([Blackbox, Jmuz, HuggingChat])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, HuggingChat])
)
+
+qwq_32b_preview = Model(
+ name = 'qwq-32b-preview',
+ base_provider = 'Qwen',
+ best_provider = Blackbox
+)
+
+qwq_32b_arliai = Model(
+ name = 'qwq-32b-arliai',
+ base_provider = 'Qwen',
+ best_provider = Blackbox
+)
+
qvq_72b = VisionModel(
name = 'qvq-72b',
base_provider = 'Qwen',
@@ -513,41 +707,82 @@ pi = Model(
)
### DeepSeek ###
-deepseek_chat = Model(
- name = 'deepseek-chat',
- base_provider = 'DeepSeek',
- best_provider = IterListProvider([Blackbox, Jmuz])
-)
-
+# deepseek-r3
deepseek_v3 = Model(
name = 'deepseek-v3',
base_provider = 'DeepSeek',
- best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, OIVSCode, TypeGPT, Liaobots])
+ best_provider = IterListProvider([DeepInfraChat, PollinationsAI, TypeGPT, Liaobots])
)
+# deepseek-r1
deepseek_r1 = Model(
name = 'deepseek-r1',
base_provider = 'DeepSeek',
- best_provider = IterListProvider([Blackbox, DeepInfraChat, Glider, LambdaChat, PollinationsAI, TypeGPT, Liaobots, Jmuz, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, LambdaChat, PollinationsAI, TypeGPT, HuggingChat, HuggingFace])
)
+deepseek_r1_zero = Model(
+ name = 'deepseek-r1-zero',
+ base_provider = 'DeepSeek',
+ best_provider = Blackbox
+)
+
+deepseek_r1_turbo = Model(
+ name = 'deepseek-r1-turbo',
+ base_provider = 'DeepSeek',
+ best_provider = DeepInfraChat
+)
+
+deepseek_r1_distill_llama_70b = Model(
+ name = 'deepseek-r1-distill-llama-70b',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI])
+)
+
+deepseek_r1_distill_qwen_14b = Model(
+ name = 'deepseek-r1-distill-qwen-14b',
+ base_provider = 'DeepSeek',
+ best_provider = Blackbox
+)
+
+deepseek_r1_distill_qwen_32b = Model(
+ name = 'deepseek-r1-distill-qwen-32b',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI])
+)
+
+# deepseek-v2
+deepseek_prover_v2_671b = Model(
+ name = 'deepseek-prover-v2-671b',
+ base_provider = 'DeepSeek',
+ best_provider = DeepInfraChat
+)
+
+# deepseek-v3-0324
+deepseek_v3_0324 = Model(
+ name = 'deepseek-v3-0324',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([DeepInfraChat, PollinationsAI])
+)
+
+# janus
janus_pro_7b = VisionModel(
name = DeepseekAI_JanusPro7b.default_model,
base_provider = 'DeepSeek',
- best_provider = IterListProvider([DeepseekAI_JanusPro7b])
+ best_provider = DeepseekAI_JanusPro7b
)
### x.ai ###
grok_3 = Model(
name = 'grok-3',
base_provider = 'x.ai',
- best_provider = Grok
+ best_provider = IterListProvider([Grok, Liaobots])
)
grok_3_r1 = Model(
name = 'grok-3-r1',
base_provider = 'x.ai',
- best_provider = Grok
+ best_provider = IterListProvider([Grok, Liaobots])
)
### Perplexity AI ###
@@ -582,17 +817,22 @@ r1_1776 = Model(
)
### Nvidia ###
+nemotron_49b = Model(
+ name = 'nemotron-49b',
+ base_provider = 'Nvidia',
+ best_provider = Blackbox
+)
+
nemotron_70b = Model(
name = 'nemotron-70b',
base_provider = 'Nvidia',
best_provider = IterListProvider([LambdaChat, HuggingChat, HuggingFace])
)
-### Databricks ###
-dbrx_instruct = Model(
- name = 'dbrx-instruct',
- base_provider = 'Databricks',
- best_provider = DeepInfraChat
+nemotron_253b = Model(
+ name = 'nemotron-253b',
+ base_provider = 'Nvidia',
+ best_provider = Blackbox
)
### THUDM ###
@@ -609,13 +849,6 @@ mini_max = Model(
best_provider = HailuoAI
)
-### 01-ai ###
-yi_34b = Model(
- name = "yi-34b",
- base_provider = "01-ai",
- best_provider = DeepInfraChat
-)
-
### Cognitive Computations ###
dolphin_2_6 = Model(
name = "dolphin-2.6",
@@ -629,6 +862,18 @@ dolphin_2_9 = Model(
best_provider = DeepInfraChat
)
+dolphin_3_0_24b = Model(
+ name = "dolphin-3.0-24b",
+ base_provider = "Cognitive Computations",
+ best_provider = Blackbox
+)
+
+dolphin_3_0_r1_24b = Model(
+ name = "dolphin-3.0-r1-24b",
+ base_provider = "Cognitive Computations",
+ best_provider = Blackbox
+)
+
### DeepInfra ###
airoboros_70b = Model(
name = "airoboros-70b",
@@ -643,54 +888,11 @@ lzlv_70b = Model(
best_provider = DeepInfraChat
)
-### OpenBMB ###
-minicpm_2_5 = Model(
- name = "minicpm-2.5",
- base_provider = "OpenBMB",
- best_provider = DeepInfraChat
-)
-
### Ai2 ###
-olmo_1_7b = Model(
- name = "olmo-1-7b",
+molmo_7b = Model(
+ name = "molmo-7b",
base_provider = "Ai2",
- best_provider = AllenAI
-)
-
-olmo_2_13b = Model(
- name = "olmo-2-13b",
- base_provider = "Ai2",
- best_provider = AllenAI
-)
-
-olmo_2_32b = Model(
- name = "olmo-2-32b",
- base_provider = "Ai2",
- best_provider = AllenAI
-)
-
-olmo_4_synthetic = VisionModel(
- name = "olmo-4-synthetic",
- base_provider = "Ai2",
- best_provider = AllenAI
-)
-
-tulu_3_1_8b = Model(
- name = "tulu-3-1-8b",
- base_provider = "Ai2",
- best_provider = AllenAI
-)
-
-tulu_3_70b = Model(
- name = "tulu-3-70b",
- base_provider = "Ai2",
- best_provider = AllenAI
-)
-
-tulu_3_405b = Model(
- name = "tulu-3-405b",
- base_provider = "Ai2",
- best_provider = AllenAI
+ best_provider = Blackbox
)
### Liquid AI ###
@@ -700,6 +902,33 @@ lfm_40b = Model(
best_provider = LambdaChat
)
+### Agentica ###
+deepcode_14b = Model(
+ name = "deepcoder-14b",
+ base_provider = "Agentica",
+ best_provider = Blackbox
+)
+
+### Moonshot AI ###
+kimi_vl_a3b_thinking = Model(
+ name = "kimi-vl-a3b-thinking",
+ base_provider = "Moonshot AI",
+ best_provider = Blackbox
+)
+
+moonlight_16b = Model(
+ name = "moonlight-16b",
+ base_provider = "Moonshot AI",
+ best_provider = Blackbox
+)
+
+### Featherless Serverless LLM ###
+qwerky_72b = Model(
+ name = 'qwerky-72b',
+ base_provider = 'Featherless Serverless LLM',
+ best_provider = Blackbox
+)
+
### Uncensored AI ###
evil = Model(
name = 'evil',
@@ -729,7 +958,7 @@ sd_3_5 = ImageModel(
flux = ImageModel(
name = 'flux',
base_provider = 'Black Forest Labs',
- best_provider = IterListProvider([Blackbox, PollinationsImage, Websim, HuggingSpace, ARTA])
+ best_provider = IterListProvider([PollinationsImage, Websim, HuggingSpace, ARTA])
)
flux_pro = ImageModel(
@@ -777,12 +1006,15 @@ class ModelUtils:
### Text ###
############
- ### OpenAI ###
- # gpt-3.5
- gpt_3_5_turbo.name: gpt_3_5_turbo,
-
+ ### OpenAI ###
# gpt-4
gpt_4.name: gpt_4,
+ gpt_4_turbo.name: gpt_4_turbo,
+
+ # gpt-4.1
+ gpt_4_1.name: gpt_4_1,
+ gpt_4_1_nano.name: gpt_4_1_nano,
+ gpt_4_1_mini.name: gpt_4_1_mini,
# gpt-4o
gpt_4o.name: gpt_4o,
@@ -795,6 +1027,9 @@ class ModelUtils:
# o3
o3_mini.name: o3_mini,
+
+ # o4
+ o4_mini.name: o4_mini,
### Meta ###
meta.name: meta,
@@ -804,12 +1039,9 @@ class ModelUtils:
# llama-3
llama_3_8b.name: llama_3_8b,
- llama_3_70b.name: llama_3_70b,
# llama-3.1
llama_3_1_8b.name: llama_3_1_8b,
- llama_3_1_70b.name: llama_3_1_70b,
- llama_3_1_405b.name: llama_3_1_405b,
# llama-3.2
llama_3_2_1b.name: llama_3_2_1b,
@@ -822,35 +1054,60 @@ class ModelUtils:
# llama-4
llama_4_scout.name: llama_4_scout,
+ llama_4_scout_17b.name: llama_4_scout_17b,
+ llama_4_maverick.name: llama_4_maverick,
+ llama_4_maverick_17b.name: llama_4_maverick_17b,
### Mistral ###
- mixtral_8x7b.name: mixtral_8x7b,
+ mistral_7b.name: mistral_7b,
mixtral_8x22b.name: mixtral_8x22b,
mistral_nemo.name: mistral_nemo,
- mixtral_small_24b.name: mixtral_small_24b,
+ mistral_small_24b.name: mistral_small_24b,
+ mistral_small_3_1_24b.name: mistral_small_3_1_24b,
### NousResearch ###
hermes_3.name: hermes_3,
+ hermes_3_405b.name: hermes_3_405b,
+ deephermes_3_8b.name: deephermes_3_8b,
### Microsoft ###
# phi
phi_3_5_mini.name: phi_3_5_mini,
phi_4.name: phi_4,
+ phi_4_multimodal.name: phi_4_multimodal,
+ phi_4_reasoning_plus.name: phi_4_reasoning_plus,
# wizardlm
wizardlm_2_7b.name: wizardlm_2_7b,
wizardlm_2_8x22b.name: wizardlm_2_8x22b,
### Google ###
- ### Gemini
+ ### gemini
"gemini": gemini,
gemini.name: gemini,
- gemini_exp.name: gemini_exp,
+
+ # gemini-1.0
+ gemini_1_0_pro.name: gemini_1_0_pro,
+
+ # gemini-1.5
gemini_1_5_pro.name: gemini_1_5_pro,
gemini_1_5_flash.name: gemini_1_5_flash,
+
+ # gemini-2.0
gemini_2_0_flash.name: gemini_2_0_flash,
gemini_2_0_flash_thinking.name: gemini_2_0_flash_thinking,
gemini_2_0_flash_thinking_with_apps.name: gemini_2_0_flash_thinking_with_apps,
+
+ # gemini-2.5
+ gemini_2_5_flash.name: gemini_2_5_flash,
+ gemini_2_5_pro.name: gemini_2_5_pro,
+
+ # gemma
+ gemma_2_9b.name: gemma_2_9b,
+ gemma_3_12b.name: gemma_3_12b,
+ gemma_3_1b.name: gemma_3_1b,
+ gemma_3_27b.name: gemma_3_27b,
+ gemma_3_4b.name: gemma_3_4b,
### Anthropic ###
# claude 3
@@ -864,19 +1121,19 @@ class ModelUtils:
### Reka AI ###
reka_core.name: reka_core,
+ reka_flash.name: reka_flash,
### Blackbox AI ###
blackboxai.name: blackboxai,
- blackboxai_pro.name: blackboxai_pro,
### CohereForAI ###
command_r.name: command_r,
+ command_r_plus_08_2024.name: command_r_plus_08_2024,
+ command_r_08_2024.name: command_r_08_2024,
command_r_plus.name: command_r_plus,
- command_r7b.name: command_r7b,
- command_a.name: command_a,
-
- ### GigaChat ###
- gigachat.name: gigachat,
+ command_r7b_12_2024.name: command_r7b_12_2024,
+ command_r7b_arabic_02_2025.name: command_r7b_arabic_02_2025,
+ command_a_03_2025.name: command_a_03_2025,
### Qwen ###
# qwen-1.5
@@ -888,13 +1145,29 @@ class ModelUtils:
# qwen-2.5
qwen_2_5.name: qwen_2_5,
+ qwen_2_5_7b.name: qwen_2_5_7b,
qwen_2_5_72b.name: qwen_2_5_72b,
qwen_2_5_coder_32b.name: qwen_2_5_coder_32b,
qwen_2_5_1m.name: qwen_2_5_1m,
qwen_2_5_max.name: qwen_2_5_max,
+ qwen_2_5_vl_3b.name: qwen_2_5_vl_3b,
+ qwen_2_5_vl_7b.name: qwen_2_5_vl_7b,
+ qwen_2_5_vl_32b.name: qwen_2_5_vl_32b,
+ qwen_2_5_vl_72b.name: qwen_2_5_vl_72b,
+
+ # qwen-2.5
+ qwen_3_235b.name: qwen_3_235b,
+ qwen_3_32b.name: qwen_3_32b,
+ qwen_3_30b.name: qwen_3_30b,
+ qwen_3_14b.name: qwen_3_14b,
+ qwen_3_4b.name: qwen_3_4b,
+ qwen_3_1_7b.name: qwen_3_1_7b,
+ qwen_3_0_6b.name: qwen_3_0_6b,
# qwq/qvq
qwq_32b.name: qwq_32b,
+ qwq_32b_preview.name: qwq_32b_preview,
+ qwq_32b_arliai.name: qwq_32b_arliai,
qvq_72b.name: qvq_72b,
### Inflection ###
@@ -910,16 +1183,28 @@ class ModelUtils:
sonar_reasoning_pro.name: sonar_reasoning_pro,
r1_1776.name: r1_1776,
- ### DeepSeek ###
- deepseek_chat.name: deepseek_chat,
+ ### DeepSeek ###
+ # deepseek-v3
deepseek_v3.name: deepseek_v3,
+
+ # deepseek-r1
deepseek_r1.name: deepseek_r1,
+ deepseek_r1_zero.name: deepseek_r1_zero,
+ deepseek_r1_turbo.name: deepseek_r1_turbo,
+ deepseek_r1_distill_llama_70b.name: deepseek_r1_distill_llama_70b,
+ deepseek_r1_distill_qwen_14b.name: deepseek_r1_distill_qwen_14b,
+ deepseek_r1_distill_qwen_32b.name: deepseek_r1_distill_qwen_32b,
+
+ # deepseek-v2
+ deepseek_prover_v2_671b.name: deepseek_prover_v2_671b,
+
+ # deepseek-v3-0324
+ deepseek_v3_0324.name: deepseek_v3_0324,
### Nvidia ###
+ nemotron_49b.name: nemotron_49b,
nemotron_70b.name: nemotron_70b,
-
- ### Databricks ###
- dbrx_instruct.name: dbrx_instruct,
+ nemotron_253b.name: nemotron_253b,
### THUDM ###
glm_4.name: glm_4,
@@ -927,33 +1212,30 @@ class ModelUtils:
## MiniMax ###
mini_max.name: mini_max,
- ## 01-ai ###
- yi_34b.name: yi_34b,
-
### Cognitive Computations ###
dolphin_2_6.name: dolphin_2_6,
dolphin_2_9.name: dolphin_2_9,
+ dolphin_3_0_24b.name: dolphin_3_0_24b,
+ dolphin_3_0_r1_24b.name: dolphin_3_0_r1_24b,
### DeepInfra ###
airoboros_70b.name: airoboros_70b,
### Lizpreciatior ###
lzlv_70b.name: lzlv_70b,
-
- ### OpenBMB ###
- minicpm_2_5.name: minicpm_2_5,
-
+
### Ai2 ###
- olmo_1_7b.name: olmo_1_7b,
- olmo_2_13b.name: olmo_2_13b,
- olmo_2_32b.name: olmo_2_32b,
- olmo_4_synthetic.name: olmo_4_synthetic,
- tulu_3_1_8b.name: tulu_3_1_8b,
- tulu_3_70b.name: tulu_3_70b,
- tulu_3_405b.name: tulu_3_405b,
+ molmo_7b.name: molmo_7b,
### Liquid AI ###
lfm_40b.name: lfm_40b,
+ deepcode_14b.name: deepcode_14b,
+
+ ### Moonshot AI ###
+ moonlight_16b.name: moonlight_16b,
+
+ ### Featherless Serverless LLM ###
+ qwerky_72b.name: qwerky_72b,
### Uncensored AI ###
evil.name: evil,
@@ -987,7 +1269,7 @@ demo_models = {
janus_pro_7b.name: [janus_pro_7b, [HuggingSpace]],
command_r.name: [command_r, [HuggingSpace]],
command_r_plus.name: [command_r_plus, [HuggingSpace]],
- command_r7b.name: [command_r7b, [HuggingSpace]],
+ command_r7b_12_2024.name: [command_r7b_12_2024, [HuggingSpace]],
qwen_2_5_coder_32b.name: [qwen_2_5_coder_32b, [HuggingFace]],
qwq_32b.name: [qwq_32b, [HuggingFace]],
llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]],
diff --git a/g4f/providers/any_provider.py b/g4f/providers/any_provider.py
index aff94a93..9da28ea0 100644
--- a/g4f/providers/any_provider.py
+++ b/g4f/providers/any_provider.py
@@ -10,7 +10,7 @@ from ..Provider.hf_space import HuggingSpace
from .. import Provider
from .. import models
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
-from ..Provider import Microsoft_Phi_4, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
+from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
@@ -100,7 +100,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
cls.image_models.extend([clean_name(model) for model in provider.image_models])
cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
cls.video_models.extend([clean_name(model) for model in provider.video_models])
- for provider in [Microsoft_Phi_4, PollinationsAI]:
+ for provider in [Microsoft_Phi_4_Multimodal, PollinationsAI]:
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
cls.audio_models.update(provider.audio_models)
cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)})
@@ -137,7 +137,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
if "audio" in kwargs or "audio" in kwargs.get("modalities", []):
providers = [PollinationsAI, EdgeTTS, gTTS]
elif has_audio:
- providers = [PollinationsAI, Microsoft_Phi_4, MarkItDown]
+ providers = [PollinationsAI, Microsoft_Phi_4_Multimodal, MarkItDown]
elif has_image:
providers = models.default_vision.best_provider.providers
else:
@@ -194,4 +194,4 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
setattr(Provider, "AnyProvider", AnyProvider)
Provider.__map__["AnyProvider"] = AnyProvider
-Provider.__providers__.append(AnyProvider)
\ No newline at end of file
+Provider.__providers__.append(AnyProvider)