Merge pull request #2989 from hlohaus/kq

Update providers and models for better compatibility
This commit is contained in:
H Lohaus 2025-05-15 02:52:14 +02:00 committed by GitHub
commit d2a967c887
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
34 changed files with 2990 additions and 1647 deletions

View file

@ -31,7 +31,7 @@ from g4f import debug
debug.logging = True debug.logging = True
# Constants # Constants
DEFAULT_MODEL = "gpt-4o" DEFAULT_MODEL = "claude-3.7-sonnet"
FALLBACK_MODELS = [] FALLBACK_MODELS = []
MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit
MAX_RETRIES = 3 MAX_RETRIES = 3

View file

@ -151,10 +151,12 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
# Step 1: Get Authentication Token # Step 1: Get Authentication Token
auth_data = await cls.read_and_refresh_token(proxy) auth_data = await cls.read_and_refresh_token(proxy)
auth_token = auth_data.get("idToken")
async with ClientSession() as session: async with ClientSession() as session:
# Step 2: Generate Images # Step 2: Generate Images
image_payload = { # Create a form data structure as the API might expect form data instead of JSON
form_data = {
"prompt": prompt, "prompt": prompt,
"negative_prompt": negative_prompt, "negative_prompt": negative_prompt,
"style": model, "style": model,
@ -166,10 +168,12 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
} }
headers = { headers = {
"Authorization": auth_data.get("idToken"), "Authorization": auth_token,
# No Content-Type header for multipart/form-data, aiohttp sets it automatically
} }
async with session.post(cls.image_generation_url, data=image_payload, headers=headers, proxy=proxy) as image_response: # Try with form data instead of JSON
async with session.post(cls.image_generation_url, data=form_data, headers=headers, proxy=proxy) as image_response:
await raise_error(f"Failed to initiate image generation", image_response) await raise_error(f"Failed to initiate image generation", image_response)
image_data = await image_response.json() image_data = await image_response.json()
record_id = image_data.get("record_id") record_id = image_data.get("record_id")
@ -208,4 +212,4 @@ async def raise_error(message: str, response: ClientResponse):
return return
error_text = await response.text() error_text = await response.text()
content_type = response.headers.get('Content-Type', 'unknown') content_type = response.headers.get('Content-Type', 'unknown')
raise ResponseError(f"{message}. Content-Type: {content_type}, Response: {error_text}") raise ResponseError(f"{message}. Content-Type: {content_type}, Response: {error_text}")

View file

@ -6,7 +6,6 @@ import re
import json import json
import random import random
import string import string
import base64
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
from datetime import datetime, timedelta from datetime import datetime, timedelta
@ -14,13 +13,11 @@ from datetime import datetime, timedelta
from ..typing import AsyncResult, Messages, MediaListType from ..typing import AsyncResult, Messages, MediaListType
from ..requests.raise_for_status import raise_for_status from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .openai.har_file import get_har_files
from ..image import to_data_uri from ..image import to_data_uri
from ..cookies import get_cookies_dir from .helper import render_messages
from .helper import format_image_prompt, render_messages from ..providers.response import JsonConversation
from ..providers.response import JsonConversation, ImageResponse
from ..tools.media import merge_media from ..tools.media import merge_media
from ..errors import RateLimitError, NoValidHarFileError from ..errors import RateLimitError
from .. import debug from .. import debug
class Conversation(JsonConversation): class Conversation(JsonConversation):
@ -43,83 +40,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "blackboxai" default_model = "blackboxai"
default_vision_model = default_model default_vision_model = default_model
default_image_model = 'flux'
# OpenRouter Free
# Free models (available without subscription) openrouter_models = [
fallback_models = [ "Deepcoder 14B Preview",
default_model, "DeepHermes 3 Llama 3 8B Preview",
"gpt-4o-mini", "DeepSeek R1 Zero",
"DeepSeek-V3", "Dolphin3.0 Mistral 24B",
"DeepSeek-R1", "Dolphin3.0 R1 Mistral 24B",
"Meta-Llama-3.3-70B-Instruct-Turbo", "Flash 3", # FIX (<reasoning> ◁</reasoning>)
"Mistral-Small-24B-Instruct-2501", "Gemini 2.0 Flash Experimental",
"DeepSeek-LLM-Chat-(67B)", "Gemma 2 9B",
"Qwen-QwQ-32B-Preview", "Gemma 3 12B",
# Image models "Gemma 3 1B",
"flux", "Gemma 3 27B",
# Trending agent modes "Gemma 3 4B",
'Python Agent', "Kimi VL A3B Thinking", # FIX (◁think▷ ◁/think▷)
'HTML Agent', "Llama 3.1 8B Instruct",
'Builder Agent', "Llama 3.1 Nemotron Ultra 253B v1",
'Java Agent', "Llama 3.2 11B Vision Instruct",
'JavaScript Agent', "Llama 3.2 1B Instruct",
'React Agent', "Llama 3.2 3B Instruct",
'Android Agent', "Llama 3.3 70B Instruct",
'Flutter Agent', "Llama 3.3 Nemotron Super 49B v1",
'Next.js Agent', "Llama 4 Maverick",
'AngularJS Agent', "Llama 4 Scout",
'Swift Agent', "Mistral 7B Instruct",
'MongoDB Agent', "Mistral Nemo",
'PyTorch Agent', "Mistral Small 3",
'Xcode Agent', "Mistral Small 3.1 24B",
'Azure Agent', "Molmo 7B D",
'Bitbucket Agent', "Moonlight 16B A3B Instruct",
'DigitalOcean Agent', "Qwen2.5 72B Instruct",
'Docker Agent', "Qwen2.5 7B Instruct",
'Electron Agent', "Qwen2.5 Coder 32B Instruct",
'Erlang Agent', "Qwen2.5 VL 32B Instruct",
'FastAPI Agent', "Qwen2.5 VL 3B Instruct",
'Firebase Agent', "Qwen2.5 VL 72B Instruct",
'Flask Agent', "Qwen2.5-VL 7B Instruct",
'Git Agent', "Qwerky 72B",
'Gitlab Agent', "QwQ 32B",
'Go Agent', "QwQ 32B Preview",
'Godot Agent', "QwQ 32B RpR v1",
'Google Cloud Agent', "R1",
'Heroku Agent' "R1 Distill Llama 70B",
"R1 Distill Qwen 14B",
"R1 Distill Qwen 32B",
] ]
# Premium models (require subscription) models = [
premium_models = [
"GPT-4o",
"o1",
"o3-mini",
"Claude-sonnet-3.7",
"Claude-sonnet-3.5",
"Gemini-Flash-2.0",
"DBRX-Instruct",
"blackboxai-pro",
"Gemini-PRO"
]
# Models available in the demo account
demo_models = [
default_model, default_model,
"blackboxai-pro",
"gpt-4o-mini",
"GPT-4o",
"o1",
"o3-mini", "o3-mini",
"gpt-4.1-nano",
"Claude-sonnet-3.7", "Claude-sonnet-3.7",
"Claude-sonnet-3.5", "Claude-sonnet-3.5",
"DeepSeek-V3",
"DeepSeek-R1", "DeepSeek-R1",
"DeepSeek-LLM-Chat-(67B)",
"Meta-Llama-3.3-70B-Instruct-Turbo",
"Mistral-Small-24B-Instruct-2501", "Mistral-Small-24B-Instruct-2501",
"Qwen-QwQ-32B-Preview",
# Image models # OpenRouter Free
"flux", *openrouter_models,
# Trending agent modes # Trending agent modes
'Python Agent', 'Python Agent',
'HTML Agent', 'HTML Agent',
@ -152,35 +132,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'Heroku Agent' 'Heroku Agent'
] ]
image_models = [default_image_model] vision_models = [default_vision_model, 'o3-mini']
vision_models = [default_vision_model, 'GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Gemini Agent', 'llama-3.1-8b Agent', 'llama-3.1-70b Agent', 'llama-3.1-405 Agent', 'Gemini-Flash-2.0', 'DeepSeek-V3']
userSelectedModel = ['GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0'] userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
# Agent mode configurations # Agent mode configurations
agentMode = { agentMode = {
'GPT-4o': {'mode': True, 'id': "GPT-4o", 'name': "GPT-4o"}, # OpenRouter Free
'Gemini-PRO': {'mode': True, 'id': "Gemini-PRO", 'name': "Gemini-PRO"}, 'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"},
'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"},
'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
# Default
'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"}, 'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"}, 'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"}, 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
} }
# Trending agent modes # Trending agent modes
trendingAgentMode = { trendingAgentMode = {
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
"Gemini Agent": {'mode': True, 'id': 'gemini'},
"llama-3.1-405 Agent": {'mode': True, 'id': "llama-3.1-405"},
'llama-3.1-70b Agent': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-8b Agent': {'mode': True, 'id': "llama-3.1-8b"},
'Python Agent': {'mode': True, 'id': "python"}, 'Python Agent': {'mode': True, 'id': "python"},
'HTML Agent': {'mode': True, 'id': "html"}, 'HTML Agent': {'mode': True, 'id': "html"},
'Builder Agent': {'mode': True, 'id': "builder"}, 'Builder Agent': {'mode': True, 'id': "builder"},
@ -214,180 +225,78 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
# Complete list of all models (for authorized users) # Complete list of all models (for authorized users)
_all_models = list(dict.fromkeys([ _all_models = list(dict.fromkeys([
*fallback_models, # Include all free models *models, # Include all free models
*premium_models, # Include all premium models
*image_models,
*list(agentMode.keys()), *list(agentMode.keys()),
*list(trendingAgentMode.keys()) *list(trendingAgentMode.keys())
])) ]))
# Initialize models with fallback_models
models = fallback_models
model_aliases = { model_aliases = {
"gpt-4o": "GPT-4o", "gpt-4": default_model,
"gpt-4o": default_model,
"gpt-4o-mini": default_model,
"claude-3.7-sonnet": "Claude-sonnet-3.7", "claude-3.7-sonnet": "Claude-sonnet-3.7",
"claude-3.5-sonnet": "Claude-sonnet-3.5", "claude-3.5-sonnet": "Claude-sonnet-3.5",
"deepseek-v3": "DeepSeek-V3",
"deepseek-r1": "DeepSeek-R1", "deepseek-r1": "DeepSeek-R1",
"deepseek-chat": "DeepSeek-LLM-Chat-(67B)", #
"llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo", "deepcoder-14b": "Deepcoder 14B Preview",
"mixtral-small-24b": "Mistral-Small-24B-Instruct-2501", "deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
"qwq-32b": "Qwen-QwQ-32B-Preview", "deepseek-r1-zero": "DeepSeek R1 Zero",
"deepseek-r1": "DeepSeek R1 Zero",
"dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
"dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
"reka-flash": "Flash 3",
"gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
"gemma-2-9b": "Gemma 2 9B",
"gemma-3-12b": "Gemma 3 12B",
"gemma-3-1b": "Gemma 3 1B",
"gemma-3-27b": "Gemma 3 27B",
"gemma-3-4b": "Gemma 3 4B",
"kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
"llama-3.1-8b": "Llama 3.1 8B Instruct",
"nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
"llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
"llama-3.2-1b": "Llama 3.2 1B Instruct",
"llama-3.2-3b": "Llama 3.2 3B Instruct",
"llama-3.3-70b": "Llama 3.3 70B Instruct",
"nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
"llama-4-maverick": "Llama 4 Maverick",
"llama-4-scout": "Llama 4 Scout",
"mistral-7b": "Mistral 7B Instruct",
"mistral-nemo": "Mistral Nemo",
"mistral-small-24b": "Mistral Small 3",
"mistral-small-24b": "Mistral-Small-24B-Instruct-2501",
"mistral-small-3.1-24b": "Mistral Small 3.1 24B",
"molmo-7b": "Molmo 7B D",
"moonlight-16b": "Moonlight 16B A3B Instruct",
"qwen-2.5-72b": "Qwen2.5 72B Instruct",
"qwen-2.5-7b": "Qwen2.5 7B Instruct",
"qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
"qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
"qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
"qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
"qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
"qwerky-72b": "Qwerky 72B",
"qwq-32b": "QwQ 32B",
"qwq-32b-preview": "QwQ 32B Preview",
"qwq-32b": "QwQ 32B Preview",
"qwq-32b-arliai": "QwQ 32B RpR v1",
"qwq-32b": "QwQ 32B RpR v1",
"deepseek-r1": "R1",
"deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
"deepseek-r1": "R1 Distill Llama 70B",
"deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
"deepseek-r1": "R1 Distill Qwen 14B",
"deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
"deepseek-r1": "R1 Distill Qwen 32B",
} }
@classmethod @classmethod
async def get_models_async(cls) -> list: def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 365) -> dict:
""" """
Asynchronous version of get_models that checks subscription status. Generate a dynamic session with proper ID and expiry format using a specific email.
Returns a list of available models based on subscription status.
Premium users get the full list of models.
Free users get fallback_models.
Demo accounts get demo_models.
"""
# Check if there are valid session data in HAR files
session_data = cls._find_session_in_har_files()
if not session_data:
# For demo accounts - return demo models
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
return cls.demo_models
# Check if this is a demo session
demo_session = cls.generate_session()
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
if is_demo:
# For demo accounts - return demo models
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
return cls.demo_models
# For non-demo accounts, check subscription status
if 'user' in session_data and 'email' in session_data['user']:
subscription = await cls.check_subscription(session_data['user']['email'])
if subscription['status'] == "PREMIUM":
debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
return cls._all_models
# For free accounts - return free models
debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
return cls.fallback_models
@classmethod
def get_models(cls) -> list:
"""
Returns a list of available models based on authorization status.
Authorized users get the full list of models.
Free users get fallback_models.
Demo accounts get demo_models.
Note: This is a synchronous method that can't check subscription status,
so it falls back to the basic premium access check.
For more accurate results, use get_models_async when possible.
"""
# Check if there are valid session data in HAR files
session_data = cls._find_session_in_har_files()
if not session_data:
# For demo accounts - return demo models
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
return cls.demo_models
# Check if this is a demo session
demo_session = cls.generate_session()
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
if is_demo:
# For demo accounts - return demo models
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
return cls.demo_models
# For non-demo accounts, check premium access
has_premium_access = cls._check_premium_access()
if has_premium_access:
# For premium users - all models
debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
return cls._all_models
# For free accounts - return free models
debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
return cls.fallback_models
@classmethod
async def check_subscription(cls, email: str) -> dict:
"""
Check subscription status for a given email using the Blackbox API.
Args:
email: The email to check subscription for
Returns:
dict: Subscription status information with keys:
- status: "PREMIUM" or "FREE"
- customerId: Customer ID if available
- isTrialSubscription: Whether this is a trial subscription
"""
if not email:
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
headers = {
'accept': '*/*',
'accept-language': 'en',
'content-type': 'application/json',
'origin': 'https://www.blackbox.ai',
'referer': 'https://www.blackbox.ai/?ref=login-success',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
}
try:
async with ClientSession(headers=headers) as session:
async with session.post(
'https://www.blackbox.ai/api/check-subscription',
json={"email": email}
) as response:
if response.status != 200:
debug.log(f"Blackbox: Subscription check failed with status {response.status}")
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
result = await response.json()
status = "PREMIUM" if result.get("hasActiveSubscription", False) else "FREE"
return {
"status": status,
"customerId": result.get("customerId"),
"isTrialSubscription": result.get("isTrialSubscription", False)
}
except Exception as e:
debug.log(f"Blackbox: Error checking subscription: {e}")
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
@classmethod
def _check_premium_access(cls) -> bool:
"""
Checks for an authorized session in HAR files.
Returns True if a valid session is found that differs from the demo.
"""
try:
session_data = cls._find_session_in_har_files()
if not session_data:
return False
# Check if this is not a demo session
demo_session = cls.generate_session()
if (session_data['user'].get('email') != demo_session['user'].get('email')):
return True
return False
except Exception as e:
debug.log(f"Blackbox: Error checking premium access: {e}")
return False
@classmethod
def generate_session(cls, id_length: int = 21, days_ahead: int = 365) -> dict:
"""
Generate a dynamic session with proper ID and expiry format.
Args: Args:
email: The email to use for this session
id_length: Length of the numeric ID (default: 21) id_length: Length of the numeric ID (default: 21)
days_ahead: Number of days ahead for expiry (default: 365) days_ahead: Number of days ahead for expiry (default: 365)
@ -401,10 +310,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
future_date = datetime.now() + timedelta(days=days_ahead) future_date = datetime.now() + timedelta(days=days_ahead)
expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z' expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
# Decode the encoded email
encoded_email = "Z2lzZWxlQGJsYWNrYm94LmFp" # Base64 encoded email
email = base64.b64decode(encoded_email).decode('utf-8')
# Generate random image ID for the new URL format # Generate random image ID for the new URL format
chars = string.ascii_letters + string.digits + "-" chars = string.ascii_letters + string.digits + "-"
random_img_id = ''.join(random.choice(chars) for _ in range(48)) random_img_id = ''.join(random.choice(chars) for _ in range(48))
@ -417,68 +322,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"image": image_url, "image": image_url,
"id": numeric_id "id": numeric_id
}, },
"expires": expiry "expires": expiry,
"isNewUser": False
} }
@classmethod
def _find_session_in_har_files(cls) -> Optional[dict]:
"""
Search for valid session data in HAR files.
Returns:
Optional[dict]: Session data if found, None otherwise
"""
try:
for file in get_har_files():
try:
with open(file, 'rb') as f:
har_data = json.load(f)
for entry in har_data['log']['entries']:
# Only look at blackbox API responses
if 'blackbox.ai/api' in entry['request']['url']:
# Look for a response that has the right structure
if 'response' in entry and 'content' in entry['response']:
content = entry['response']['content']
# Look for both regular and Google auth session formats
if ('text' in content and
isinstance(content['text'], str) and
'"user"' in content['text'] and
'"email"' in content['text'] and
'"expires"' in content['text']):
try:
# Remove any HTML or other non-JSON content
text = content['text'].strip()
if text.startswith('{') and text.endswith('}'):
# Replace escaped quotes
text = text.replace('\\"', '"')
har_session = json.loads(text)
# Check if this is a valid session object
if (isinstance(har_session, dict) and
'user' in har_session and
'email' in har_session['user'] and
'expires' in har_session):
debug.log(f"Blackbox: Found session in HAR file: {file}")
return har_session
except json.JSONDecodeError as e:
# Only print error for entries that truly look like session data
if ('"user"' in content['text'] and
'"email"' in content['text']):
debug.log(f"Blackbox: Error parsing likely session data: {e}")
except Exception as e:
debug.log(f"Blackbox: Error reading HAR file {file}: {e}")
return None
except NoValidHarFileError:
pass
except Exception as e:
debug.log(f"Blackbox: Error searching HAR files: {e}")
return None
@classmethod @classmethod
async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]: async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
cache_file = Path(get_cookies_dir()) / 'blackbox.json' cache_path = Path(os.path.expanduser("~")) / ".g4f" / "cache"
cache_file = cache_path / 'blackbox.json'
if not force_refresh and cache_file.exists(): if not force_refresh and cache_file.exists():
try: try:
@ -517,7 +368,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if is_valid_context(context): if is_valid_context(context):
validated_value = match.group(1) validated_value = match.group(1)
cache_file.parent.mkdir(exist_ok=True) cache_file.parent.mkdir(exist_ok=True, parents=True)
try: try:
with open(cache_file, 'w') as f: with open(cache_file, 'w') as f:
json.dump({'validated_value': validated_value}, f) json.dump({'validated_value': validated_value}, f)
@ -592,41 +443,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"title": "" "title": ""
} }
# Get session data - try HAR files first, fall back to generated session # Generate a new email for each request instead of using the one stored in conversation
session_data = cls._find_session_in_har_files() or cls.generate_session() chars = string.ascii_lowercase + string.digits
random_team = ''.join(random.choice(chars) for _ in range(8))
request_email = f"{random_team}@blackbox.ai"
# Log which session type is being used # Generate a session with the new email
demo_session = cls.generate_session() session_data = cls.generate_session(request_email)
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email')) debug.log(f"Blackbox: Using generated session with email {request_email}")
if is_demo:
debug.log("Blackbox: Using generated demo session")
# For demo account, set default values without checking subscription
subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
# Check if the requested model is in demo_models
is_premium = model in cls.demo_models
if not is_premium:
debug.log(f"Blackbox: Model {model} not available in demo account, falling back to default model")
model = cls.default_model
is_premium = True
else:
debug.log(f"Blackbox: Using session from HAR file (email: {session_data['user'].get('email', 'unknown')})")
# Only check subscription for non-demo accounts
subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
if session_data.get('user', {}).get('email'):
subscription_status = await cls.check_subscription(session_data['user']['email'])
debug.log(f"Blackbox: Subscription status for {session_data['user']['email']}: {subscription_status['status']}")
# Determine if user has premium access based on subscription status
if subscription_status['status'] == "PREMIUM":
is_premium = True
else:
# For free accounts, check if the requested model is in fallback_models
is_premium = model in cls.fallback_models
if not is_premium:
debug.log(f"Blackbox: Model {model} not available in free account, falling back to default model")
model = cls.default_model
is_premium = True
data = { data = {
"messages": current_messages, "messages": current_messages,
@ -651,26 +475,28 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"mobileClient": False, "mobileClient": False,
"userSelectedModel": model if model in cls.userSelectedModel else None, "userSelectedModel": model if model in cls.userSelectedModel else None,
"validated": conversation.validated_value, "validated": conversation.validated_value,
"imageGenerationMode": model == cls.default_image_model, "imageGenerationMode": False,
"webSearchModePrompt": False, "webSearchModePrompt": False,
"deepSearchMode": False, "deepSearchMode": False,
"designerMode": False,
"domains": None, "domains": None,
"vscodeClient": False, "vscodeClient": False,
"codeInterpreterMode": False, "codeInterpreterMode": False,
"customProfile": { "customProfile": {
"additionalInfo": "",
"enableNewChats": False,
"name": "", "name": "",
"occupation": "", "occupation": "",
"traits": [], "traits": []
"additionalInfo": "",
"enableNewChats": False
}, },
"session": session_data, "session": session_data,
"isPremium": is_premium, "isPremium": True,
"subscriptionCache": { "subscriptionCache": {
"status": subscription_status['status'], "expiryTimestamp": None,
"customerId": subscription_status['customerId'], "isTrialSubscription": False,
"isTrialSubscription": subscription_status['isTrialSubscription'], "lastChecked": int(datetime.now().timestamp() * 1000),
"lastChecked": int(datetime.now().timestamp() * 1000) "status": "FREE",
"customerId": None
}, },
"beastMode": False, "beastMode": False,
"reasoningMode": False, "reasoningMode": False,
@ -689,24 +515,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if "You have reached your request limit for the hour" in chunk_text: if "You have reached your request limit for the hour" in chunk_text:
raise RateLimitError(chunk_text) raise RateLimitError(chunk_text)
full_response.append(chunk_text) full_response.append(chunk_text)
# Only yield chunks for non-image models yield chunk_text
if model != cls.default_image_model:
yield chunk_text
full_response_text = ''.join(full_response) full_response_text = ''.join(full_response)
# For image models, check for image markdown # Handle conversation history
if model == cls.default_image_model:
image_url_match = re.search(r'!\[.*?\]\((.*?)\)', full_response_text)
if image_url_match:
image_url = image_url_match.group(1)
yield ImageResponse(urls=[image_url], alt=format_image_prompt(messages, prompt))
return
# Handle conversation history once, in one place
if return_conversation: if return_conversation:
conversation.message_history.append({"role": "assistant", "content": full_response_text}) conversation.message_history.append({"role": "assistant", "content": full_response_text})
yield conversation yield conversation
# For image models that didn't produce an image, fall back to text response
elif model == cls.default_image_model:
yield full_response_text

View file

@ -35,9 +35,8 @@ class Chatai(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True supports_message_history = True
default_model = 'gpt-4o-mini-2024-07-18' default_model = 'gpt-4o-mini-2024-07-18'
models = ['gpt-4o-mini-2024-07-18'] #
model_aliases = {"gpt-4o-mini":default_model} model_aliases = {"gpt-4o-mini":default_model}
models = list(model_aliases.keys())
# --- ProviderModelMixin Methods --- # --- ProviderModelMixin Methods ---
@classmethod @classmethod

View file

@ -1,316 +1,94 @@
from __future__ import annotations from __future__ import annotations
import time
from aiohttp import ClientSession, ClientTimeout
import json import json
import asyncio
import random
import base64 import base64
import time
import random
import hashlib import hashlib
from yarl import URL import asyncio
from datetime import datetime
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages, Cookies from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_last_user_message from .helper import format_prompt, get_last_user_message
from ..providers.response import FinishReason, JsonConversation from ..providers.response import FinishReason, JsonConversation
from ..errors import ModelNotSupportedError, ResponseStatusError, RateLimitError, TimeoutError, ConversationLimitError
try:
from bs4 import BeautifulSoup
has_bs4 = True
except ImportError:
has_bs4 = False
class DuckDuckGoSearchException(Exception):
"""Base exception class for duckduckgo_search."""
class DuckDuckGoChallengeError(ResponseStatusError):
"""Raised when DuckDuckGo presents a challenge that needs to be solved."""
class Conversation(JsonConversation): class Conversation(JsonConversation):
vqd: str = None
vqd_hash_1: str = None
message_history: Messages = [] message_history: Messages = []
cookies: dict = {}
fe_version: str = None
def __init__(self, model: str): def __init__(self, model: str):
self.model = model self.model = model
self.message_history = []
class DDG(AsyncGeneratorProvider, ProviderModelMixin): class DDG(AsyncGeneratorProvider, ProviderModelMixin):
label = "DuckDuckGo AI Chat" label = "DuckDuckGo AI Chat"
url = "https://duckduckgo.com/aichat" url = "https://duckduckgo.com"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat" api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
status_url = "https://duckduckgo.com/duckchat/v1/status" status_url = "https://duckduckgo.com/duckchat/v1/status"
working = True working = True
needs_auth = False
supports_stream = True supports_stream = True
supports_system_message = True supports_system_message = True
supports_message_history = True supports_message_history = True
default_model = "gpt-4o-mini" default_model = "gpt-4o-mini"
model_aliases = {
# Model mapping from user-friendly names to API model names
_chat_models = {
"gpt-4": default_model, "gpt-4": default_model,
"gpt-4o-mini": default_model, "gpt-4o": default_model,
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"claude-3-haiku": "claude-3-haiku-20240307", "claude-3-haiku": "claude-3-haiku-20240307",
"o3-mini": "o3-mini", "mistral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
} }
models = [default_model, "o3-mini"] + list(model_aliases.keys())
# Available models (user-friendly names)
models = list(_chat_models.keys())
last_request_time = 0
max_retries = 3
base_delay = 2
# Class variable to store the x-fe-version across instances
_chat_xfe = ""
@staticmethod
def sha256_base64(text: str) -> str:
"""Return the base64 encoding of the SHA256 digest of the text."""
sha256_hash = hashlib.sha256(text.encode("utf-8")).digest()
return base64.b64encode(sha256_hash).decode()
@staticmethod @staticmethod
def parse_dom_fingerprint(js_text: str) -> str: def generate_user_agent() -> str:
if not has_bs4: return f"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.{random.randint(1000,9999)}.0 Safari/537.36"
# Fallback if BeautifulSoup is not available
return "1000"
try:
html_snippet = js_text.split("e.innerHTML = '")[1].split("';")[0]
offset_value = js_text.split("return String(")[1].split(" ")[0]
soup = BeautifulSoup(html_snippet, "html.parser")
corrected_inner_html = soup.body.decode_contents()
inner_html_length = len(corrected_inner_html)
fingerprint = int(offset_value) + inner_html_length
return str(fingerprint)
except Exception:
# Return a fallback value if parsing fails
return "1000"
@staticmethod @staticmethod
def parse_server_hashes(js_text: str) -> list: def generate_fe_signals() -> str:
try: current_time = int(time.time() * 1000)
return js_text.split('server_hashes: ["', maxsplit=1)[1].split('"]', maxsplit=1)[0].split('","') signals_data = {
except Exception: "start": current_time - 35000,
# Return a fallback value if parsing fails "events": [
return ["1", "2"] {"name": "onboarding_impression_1", "delta": 383},
{"name": "onboarding_impression_2", "delta": 6004},
@classmethod {"name": "onboarding_finish", "delta": 9690},
def build_x_vqd_hash_1(cls, vqd_hash_1: str, headers: dict) -> str: {"name": "startNewChat", "delta": 10082},
"""Build the x-vqd-hash-1 header value.""" {"name": "initSwitchModel", "delta": 16586}
try: ],
# If we received a valid base64 string, try to decode it "end": 35163
if vqd_hash_1 and len(vqd_hash_1) > 20:
try:
# Try to decode and parse as JSON first
decoded_json = json.loads(base64.b64decode(vqd_hash_1).decode())
# If it's already a complete structure with meta, return it as is
if isinstance(decoded_json, dict) and "meta" in decoded_json:
return vqd_hash_1
# Otherwise, extract what we can from it
if isinstance(decoded_json, dict) and "server_hashes" in decoded_json:
server_hashes = decoded_json.get("server_hashes", ["1", "2"])
else:
# Fall back to parsing from string
decoded = base64.b64decode(vqd_hash_1).decode()
server_hashes = cls.parse_server_hashes(decoded)
except (json.JSONDecodeError, UnicodeDecodeError):
# If it's not valid JSON, try to parse it as a string
decoded = base64.b64decode(vqd_hash_1).decode()
server_hashes = cls.parse_server_hashes(decoded)
else:
# Default server hashes if we can't extract them
server_hashes = ["1", "2"]
# Generate fingerprints
dom_fingerprint = "1000" # Default value
ua_fingerprint = headers.get("User-Agent", "") + headers.get("sec-ch-ua", "")
ua_hash = cls.sha256_base64(ua_fingerprint)
dom_hash = cls.sha256_base64(dom_fingerprint)
# Create a challenge ID (random hex string)
challenge_id = ''.join(random.choice('0123456789abcdef') for _ in range(40)) + 'h8jbt'
# Build the complete structure including meta
final_result = {
"server_hashes": server_hashes,
"client_hashes": [ua_hash, dom_hash],
"signals": {},
"meta": {
"v": "1",
"challenge_id": challenge_id,
"origin": "https://duckduckgo.com",
"stack": "Error\nat ke (https://duckduckgo.com/dist/wpm.chat.js:1:29526)\nat async dispatchServiceInitialVQD (https://duckduckgo.com/dist/wpm.chat.js:1:45076)"
}
}
base64_final_result = base64.b64encode(json.dumps(final_result).encode()).decode()
return base64_final_result
except Exception as e:
# If anything fails, return an empty string
return ""
@classmethod
def validate_model(cls, model: str) -> str:
"""Validates and returns the correct model name for the API"""
if not model:
return cls.default_model
# Check aliases first
if model in cls.model_aliases:
model = cls.model_aliases[model]
# Check if it's a valid model name
if model not in cls.models:
raise ModelNotSupportedError(f"Model {model} not supported. Available models: {cls.models}")
return model
@classmethod
async def sleep(cls, multiplier=1.0):
"""Implements rate limiting between requests"""
now = time.time()
if cls.last_request_time > 0:
delay = max(0.0, 1.5 - (now - cls.last_request_time)) * multiplier
if delay > 0:
await asyncio.sleep(delay)
cls.last_request_time = time.time()
@classmethod
async def get_default_cookies(cls, session: ClientSession) -> dict:
"""Obtains default cookies needed for API requests"""
try:
await cls.sleep()
# Make initial request to get cookies
async with session.get(cls.url) as response:
# Set the required cookies
cookies = {}
cookies_dict = {'dcs': '1', 'dcm': '3'}
# Add any cookies from the response
for cookie in response.cookies.values():
cookies[cookie.key] = cookie.value
# Ensure our required cookies are set
for name, value in cookies_dict.items():
cookies[name] = value
url_obj = URL(cls.url)
session.cookie_jar.update_cookies({name: value}, url_obj)
# Make a second request to the status endpoint to get any additional cookies
headers = {
"accept": "text/event-stream",
"accept-language": "en",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
"origin": "https://duckduckgo.com",
"referer": "https://duckduckgo.com/",
}
await cls.sleep()
async with session.get(cls.status_url, headers=headers) as status_response:
# Add any cookies from the status response
for cookie in status_response.cookies.values():
cookies[cookie.key] = cookie.value
url_obj = URL(cls.url)
session.cookie_jar.update_cookies({cookie.key: cookie.value}, url_obj)
return cookies
except Exception as e:
# Return at least the required cookies on error
cookies = {'dcs': '1', 'dcm': '3'}
url_obj = URL(cls.url)
for name, value in cookies.items():
session.cookie_jar.update_cookies({name: value}, url_obj)
return cookies
@classmethod
async def fetch_fe_version(cls, session: ClientSession) -> str:
"""Fetches the fe-version from the initial page load."""
if cls._chat_xfe:
return cls._chat_xfe
try:
url = "https://duckduckgo.com/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1"
await cls.sleep()
async with session.get(url) as response:
await raise_for_status(response)
content = await response.text()
# Extract x-fe-version components
try:
# Try to extract the version components
xfe1 = content.split('__DDG_BE_VERSION__="', 1)[1].split('"', 1)[0]
xfe2 = content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
# Format it like "serp_YYYYMMDD_HHMMSS_ET-hash"
from datetime import datetime
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
cls._chat_xfe = f"serp_{current_date}_ET-{xfe2}"
return cls._chat_xfe
except Exception:
# Fallback to a default format if extraction fails
from datetime import datetime
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
return cls._chat_xfe
except Exception:
# Fallback to a default format if request fails
from datetime import datetime
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
return cls._chat_xfe
@classmethod
async def fetch_vqd_and_hash(cls, session: ClientSession, retry_count: int = 0) -> tuple[str, str]:
"""Fetches the required VQD token and hash for the chat session with retries."""
headers = {
"accept": "text/event-stream",
"accept-language": "en",
"cache-control": "no-cache",
"pragma": "no-cache",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
"origin": "https://duckduckgo.com",
"referer": "https://duckduckgo.com/",
"x-vqd-accept": "1",
} }
return base64.b64encode(json.dumps(signals_data).encode()).decode()
# Make sure we have cookies first @staticmethod
if len(session.cookie_jar) == 0: def generate_fe_version(page_content: str = "") -> str:
await cls.get_default_cookies(session)
try: try:
await cls.sleep(multiplier=1.0 + retry_count * 0.5) fe_hash = page_content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
async with session.get(cls.status_url, headers=headers) as response: return f"serp_20250510_052906_ET-{fe_hash}"
await raise_for_status(response) except Exception:
return "serp_20250510_052906_ET-ed4f51dc2e106020bc4b"
vqd = response.headers.get("x-vqd-4", "")
vqd_hash_1 = response.headers.get("x-vqd-hash-1", "") @staticmethod
def generate_x_vqd_hash_1(vqd: str, fe_version: str) -> str:
if vqd: # Placeholder logic; in reality DuckDuckGo uses dynamic JS challenge
# Return the fetched vqd and vqd_hash_1 concat = f"{vqd}#{fe_version}"
return vqd, vqd_hash_1 hash_digest = hashlib.sha256(concat.encode()).digest()
b64 = base64.b64encode(hash_digest).decode()
response_text = await response.text() return base64.b64encode(json.dumps({
raise RuntimeError(f"Failed to fetch VQD token and hash: {response.status} {response_text}") "server_hashes": [],
"client_hashes": [b64],
except Exception as e: "signals": {},
if retry_count < cls.max_retries: "meta": {
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random()) "v": "1",
await asyncio.sleep(wait_time) "challenge_id": hashlib.md5(concat.encode()).hexdigest(),
return await cls.fetch_vqd_and_hash(session, retry_count + 1) "origin": "https://duckduckgo.com",
else: "stack": "Generated in Python"
raise RuntimeError(f"Failed to fetch VQD token and hash after {cls.max_retries} attempts: {str(e)}") }
}).encode()).decode()
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@ -318,229 +96,112 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 60,
cookies: Cookies = None,
conversation: Conversation = None, conversation: Conversation = None,
return_conversation: bool = True, return_conversation: bool = True,
retry_count: int = 0,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
model = cls.validate_model(model) model = cls.get_model(model)
retry_count = 0
while retry_count <= cls.max_retries: if conversation is None:
conversation = Conversation(model)
conversation.message_history = messages.copy()
else:
last_message = next((m for m in reversed(messages) if m["role"] == "user"), None)
if last_message and last_message not in conversation.message_history:
conversation.message_history.append(last_message)
base_headers = {
"accept-language": "en-US,en;q=0.9",
"dnt": "1",
"origin": "https://duckduckgo.com",
"referer": "https://duckduckgo.com/",
"sec-ch-ua": '"Chromium";v="135", "Not-A.Brand";v="8"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": cls.generate_user_agent(),
}
cookies = {'dcs': '1', 'dcm': '3'}
formatted_prompt = format_prompt(conversation.message_history) if len(conversation.message_history) > 1 else get_last_user_message(messages)
data = {"model": model, "messages": [{"role": "user", "content": formatted_prompt}], "canUseTools": False}
async with ClientSession(cookies=cookies) as session:
try: try:
session_timeout = ClientTimeout(total=timeout) # Step 1: Initial page load
async with ClientSession(timeout=session_timeout, cookies=cookies) as session: async with session.get(f"{cls.url}/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1",
# Step 1: Ensure we have the fe_version headers={**base_headers, "accept": "text/html"}, proxy=proxy) as r:
if not cls._chat_xfe: r.raise_for_status()
cls._chat_xfe = await cls.fetch_fe_version(session) page = await r.text()
fe_version = cls.generate_fe_version(page)
# Step 2: Initialize or update conversation
if conversation is None:
# Get initial cookies if not provided
if not cookies:
await cls.get_default_cookies(session)
# Create a new conversation
conversation = Conversation(model)
conversation.fe_version = cls._chat_xfe
# Step 3: Get VQD tokens
vqd, vqd_hash_1 = await cls.fetch_vqd_and_hash(session)
conversation.vqd = vqd
conversation.vqd_hash_1 = vqd_hash_1
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
else:
# Update existing conversation with new message
last_message = get_last_user_message(messages.copy())
conversation.message_history.append({"role": "user", "content": last_message})
# Step 4: Prepare headers with proper x-vqd-hash-1
headers = {
"accept": "text/event-stream",
"accept-language": "en",
"cache-control": "no-cache",
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
"origin": "https://duckduckgo.com",
"referer": "https://duckduckgo.com/",
"pragma": "no-cache",
"priority": "u=1, i",
"sec-ch-ua": '"Not:A-Brand";v="24", "Chromium";v="134"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-fe-version": conversation.fe_version or cls._chat_xfe,
"x-vqd-4": conversation.vqd,
}
# For the first request, send an empty x-vqd-hash-1 header
# This matches the behavior in the duckduckgo_search module
headers["x-vqd-hash-1"] = ""
# Step 5: Prepare the request data # Step 2: Get VQD
# Convert the user-friendly model name to the API model name status_headers = {**base_headers, "accept": "*/*", "cache-control": "no-store", "x-vqd-accept": "1"}
api_model = cls._chat_models.get(model, model) async with session.get(cls.status_url, headers=status_headers, proxy=proxy) as r:
r.raise_for_status()
data = { vqd = r.headers.get("x-vqd-4", "") or f"4-{random.randint(10**29, 10**30 - 1)}"
"model": api_model,
"messages": conversation.message_history,
}
# Step 6: Send the request x_vqd_hash_1 = cls.generate_x_vqd_hash_1(vqd, fe_version)
await cls.sleep(multiplier=1.0 + retry_count * 0.5)
async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: # Step 3: Actual chat request
# Handle 429 and 418 errors specifically chat_headers = {
if response.status == 429: **base_headers,
response_text = await response.text() "accept": "text/event-stream",
"content-type": "application/json",
if retry_count < cls.max_retries: "x-fe-signals": cls.generate_fe_signals(),
retry_count += 1 "x-fe-version": fe_version,
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random()) "x-vqd-4": vqd,
await asyncio.sleep(wait_time) "x-vqd-hash-1": x_vqd_hash_1,
}
# Get fresh tokens and cookies
cookies = await cls.get_default_cookies(session) async with session.post(cls.api_endpoint, json=data, headers=chat_headers, proxy=proxy) as response:
continue if response.status != 200:
else: error_text = await response.text()
raise RateLimitError(f"Rate limited after {cls.max_retries} retries") if "ERR_BN_LIMIT" in error_text:
elif response.status == 418: yield "Blocked by DuckDuckGo: Bot limit exceeded (ERR_BN_LIMIT)."
# Check if it's a challenge error return
if "ERR_INVALID_VQD" in error_text and retry_count < 3:
await asyncio.sleep(random.uniform(2.5, 5.5))
async for chunk in cls.create_async_generator(
model, messages, proxy, conversation, return_conversation, retry_count + 1, **kwargs
):
yield chunk
return
yield f"Error: HTTP {response.status} - {error_text}"
return
full_message = ""
async for line in response.content:
line_text = line.decode("utf-8").strip()
if line_text.startswith("data:"):
payload = line_text[5:].strip()
if payload == "[DONE]":
if full_message:
conversation.message_history.append({"role": "assistant", "content": full_message})
if return_conversation:
yield conversation
yield FinishReason("stop")
break
try: try:
response_text = await response.text() msg = json.loads(payload)
try: if msg.get("action") == "error":
response_json = json.loads(response_text) yield f"Error: {msg.get('type', 'unknown')}"
break
# Extract challenge data if available if "message" in msg:
challenge_data = None content = msg["message"]
if response_json.get("type") == "ERR_CHALLENGE" and "cd" in response_json: yield content
challenge_data = response_json["cd"] full_message += content
except json.JSONDecodeError:
if retry_count < cls.max_retries: continue
retry_count += 1
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
await asyncio.sleep(wait_time)
# Reset tokens and try again with fresh session
conversation = None
cls._chat_xfe = ""
# Get fresh cookies
cookies = await cls.get_default_cookies(session)
# If we have challenge data, try to use it
if challenge_data and isinstance(challenge_data, dict):
# Extract any useful information from challenge data
# This could be used to build a better response in the future
pass
continue
else:
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
except json.JSONDecodeError:
# If we can't parse the JSON, assume it's a challenge error anyway
if retry_count < cls.max_retries:
retry_count += 1
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
await asyncio.sleep(wait_time)
# Reset tokens and try again with fresh session
conversation = None
cls._chat_xfe = ""
cookies = await cls.get_default_cookies(session)
continue
else:
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
except Exception as e:
# If any other error occurs during handling, still try to recover
if retry_count < cls.max_retries:
retry_count += 1
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
await asyncio.sleep(wait_time)
# Reset tokens and try again with fresh session
conversation = None
cls._chat_xfe = ""
cookies = await cls.get_default_cookies(session)
continue
else:
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries: {str(e)}")
# For other status codes, use the standard error handler
await raise_for_status(response)
reason = None
full_message = ""
# Step 7: Process the streaming response
async for line in response.content:
line = line.decode("utf-8").strip()
if line.startswith("data:"):
try:
message = json.loads(line[5:].strip())
except json.JSONDecodeError:
continue
if "action" in message and message["action"] == "error":
error_type = message.get("type", "")
if message.get("status") == 429:
if error_type == "ERR_CONVERSATION_LIMIT":
raise ConversationLimitError(error_type)
raise RateLimitError(error_type)
elif message.get("status") == 418 and error_type == "ERR_CHALLENGE":
# Handle challenge error by refreshing tokens and retrying
if retry_count < cls.max_retries:
# Don't raise here, let the outer exception handler retry
raise DuckDuckGoChallengeError(f"Challenge detected: {error_type}")
raise DuckDuckGoSearchException(error_type)
if "message" in message:
if message["message"]:
yield message["message"]
full_message += message["message"]
reason = "length"
else:
reason = "stop"
# Step 8: Update conversation with response information
# Always update the VQD tokens from the response headers
conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd)
conversation.vqd_hash_1 = response.headers.get("x-vqd-hash-1", conversation.vqd_hash_1)
# Update cookies
conversation.cookies = {
n: c.value
for n, c in session.cookie_jar.filter_cookies(URL(cls.url)).items()
}
# If requested, return the updated conversation
if return_conversation:
conversation.message_history.append({"role": "assistant", "content": full_message})
yield conversation
if reason is not None:
yield FinishReason(reason)
# If we got here, the request was successful
break
except (RateLimitError, ResponseStatusError, DuckDuckGoChallengeError) as e:
if ("429" in str(e) or isinstance(e, DuckDuckGoChallengeError)) and retry_count < cls.max_retries:
retry_count += 1
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
await asyncio.sleep(wait_time)
# For challenge errors, refresh tokens and cookies
if isinstance(e, DuckDuckGoChallengeError):
# Reset conversation to force new token acquisition
conversation = None
# Clear class cache to force refresh
cls._chat_xfe = ""
else:
raise
except asyncio.TimeoutError as e:
raise TimeoutError(f"Request timed out: {str(e)}")
except Exception as e: except Exception as e:
raise if retry_count < 3:
await asyncio.sleep(random.uniform(2.5, 5.5))
async for chunk in cls.create_async_generator(
model, messages, proxy, conversation, return_conversation, retry_count + 1, **kwargs
):
yield chunk
else:
yield f"Error: {str(e)}"

View file

@ -8,12 +8,24 @@ class DeepInfraChat(OpenaiTemplate):
working = True working = True
default_model = 'deepseek-ai/DeepSeek-V3' default_model = 'deepseek-ai/DeepSeek-V3'
default_vision_model = 'openbmb/MiniCPM-Llama3-V-2_5' default_vision_model = 'microsoft/Phi-4-multimodal-instruct'
vision_models = [default_vision_model, 'meta-llama/Llama-3.2-90B-Vision-Instruct'] vision_models = [default_vision_model, 'meta-llama/Llama-3.2-90B-Vision-Instruct']
models = [ models = [
'deepseek-ai/DeepSeek-Prover-V2-671B',
'Qwen/Qwen3-235B-A22B',
'Qwen/Qwen3-30B-A3B',
'Qwen/Qwen3-32B',
'Qwen/Qwen3-14B',
'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
'meta-llama/Llama-4-Scout-17B-16E-Instruct',
'microsoft/phi-4-reasoning-plus',
'microsoft/meta-llama/Llama-Guard-4-12B',
'Qwen/QwQ-32B',
'deepseek-ai/DeepSeek-V3-0324',
'google/gemma-3-27b-it',
'google/gemma-3-12b-it',
'meta-llama/Meta-Llama-3.1-8B-Instruct', 'meta-llama/Meta-Llama-3.1-8B-Instruct',
'meta-llama/Llama-3.3-70B-Instruct-Turbo', 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
'meta-llama/Llama-3.3-70B-Instruct',
default_model, default_model,
'mistralai/Mistral-Small-24B-Instruct-2501', 'mistralai/Mistral-Small-24B-Instruct-2501',
'deepseek-ai/DeepSeek-R1', 'deepseek-ai/DeepSeek-R1',
@ -23,37 +35,48 @@ class DeepInfraChat(OpenaiTemplate):
'microsoft/phi-4', 'microsoft/phi-4',
'microsoft/WizardLM-2-8x22B', 'microsoft/WizardLM-2-8x22B',
'Qwen/Qwen2.5-72B-Instruct', 'Qwen/Qwen2.5-72B-Instruct',
'01-ai/Yi-34B-Chat',
'Qwen/Qwen2-72B-Instruct', 'Qwen/Qwen2-72B-Instruct',
'cognitivecomputations/dolphin-2.6-mixtral-8x7b', 'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
'cognitivecomputations/dolphin-2.9.1-llama-3-70b', 'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
'databricks/dbrx-instruct',
'deepinfra/airoboros-70b', 'deepinfra/airoboros-70b',
'lizpreciatior/lzlv_70b_fp16_hf', 'lizpreciatior/lzlv_70b_fp16_hf',
'microsoft/WizardLM-2-7B', 'microsoft/WizardLM-2-7B',
'mistralai/Mixtral-8x22B-Instruct-v0.1', 'mistralai/Mixtral-8x22B-Instruct-v0.1',
] + vision_models ] + vision_models
model_aliases = { model_aliases = {
"deepseek-prover-v2-671b": "deepseek-ai/DeepSeek-Prover-V2-671B",
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
"qwen-3-30b": "Qwen/Qwen3-30B-A3B",
"qwen-3-32b": "Qwen/Qwen3-32B",
"qwen-3-14b": "Qwen/Qwen3-14B",
"llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"llama-4-maverick-17b": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"llama-4-scout-17b": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"phi-4-reasoning-plus": "microsoft/phi-4-reasoning-plus",
#"": "meta-llama/Llama-Guard-4-12B",
"qwq-32b": "Qwen/QwQ-32B",
"deepseek-v3": "deepseek-ai/DeepSeek-V3-0324",
"deepseek-v3-0324": "deepseek-ai/DeepSeek-V3-0324",
"gemma-3-27b": "google/gemma-3-27b-it",
"gemma-3-12b": "google/gemma-3-12b-it",
"phi-4-multimodal": "microsoft/Phi-4-multimodal-instruct",
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct", "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
"deepseek-v3": default_model, "deepseek-v3": default_model,
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501", "mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
"deepseek-r1": "deepseek-ai/DeepSeek-R1-Turbo", "deepseek-r1-turbo": "deepseek-ai/DeepSeek-R1-Turbo",
"deepseek-r1": "deepseek-ai/DeepSeek-R1", "deepseek-r1": "deepseek-ai/DeepSeek-R1",
"deepseek-r1-distill-llama": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "deepseek-r1-distill-llama-70b": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"deepseek-r1-distill-qwen": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", "deepseek-r1-distill-qwen-32b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"phi-4": "microsoft/phi-4", "phi-4": "microsoft/phi-4",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
"yi-34b": "01-ai/Yi-34B-Chat",
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct", "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b", "dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
"dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b", "dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
"dbrx-instruct": "databricks/dbrx-instruct",
"airoboros-70b": "deepinfra/airoboros-70b", "airoboros-70b": "deepinfra/airoboros-70b",
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf", "lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
"wizardlm-2-7b": "microsoft/WizardLM-2-7B", "wizardlm-2-7b": "microsoft/WizardLM-2-7B",
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1", "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1"
"minicpm-2.5": "openbmb/MiniCPM-Llama3-V-2_5",
} }

View file

@ -12,7 +12,7 @@ from .helper import format_prompt
class Dynaspark(AsyncGeneratorProvider, ProviderModelMixin): class Dynaspark(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://dynaspark.onrender.com" url = "https://dynaspark.onrender.com"
login_url = None login_url = None
api_endpoint = "https://dynaspark.onrender.com/generate_response" api_endpoint = "https://dynaspark.onrender.com/dsai_fuck_u_spammer"
working = True working = True
needs_auth = False needs_auth = False

View file

@ -1,47 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from .helper import format_prompt, get_system_prompt
class Goabror(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://goabror.uz"
api_endpoint = "https://goabror.uz/api/gpt.php"
working = True
default_model = 'gpt-4'
models = [default_model]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
}
async with ClientSession(headers=headers) as session:
params = {
"user": format_prompt(messages, include_system=False),
"system": get_system_prompt(messages),
}
async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
await raise_for_status(response)
text_response = await response.text()
try:
json_response = json.loads(text_response)
if "data" in json_response:
yield json_response["data"]
else:
yield text_response
except json.JSONDecodeError:
yield text_response

View file

@ -1,77 +0,0 @@
from __future__ import annotations
from ..typing import AsyncResult, Messages
from .template import OpenaiTemplate
class Jmuz(OpenaiTemplate):
url = "https://discord.gg/Ew6JzjA2NR"
api_base = "https://jmuz.me/gpt/api/v2"
api_key = "prod"
working = True
supports_system_message = False
default_model = "gpt-4o"
model_aliases = {
"qwq-32b": "qwq-32b-preview",
"gemini-1.5-flash": "gemini-flash",
"gemini-1.5-pro": "gemini-pro",
"gemini-2.0-flash-thinking": "gemini-thinking",
"deepseek-chat": "deepseek-v3",
}
@classmethod
def get_models(cls, **kwargs):
if not cls.models:
cls.models = super().get_models(api_key=cls.api_key, api_base=cls.api_base)
return cls.models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
api_key: str = None, # Remove api_key from kwargs
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Authorization": f"Bearer {cls.api_key}",
"Content-Type": "application/json",
"accept": "*/*",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
}
started = False
buffer = ""
async for chunk in super().create_async_generator(
model=model,
messages=messages,
api_base=cls.api_base,
api_key=cls.api_key,
stream=cls.supports_stream,
headers=headers,
**kwargs
):
if isinstance(chunk, str):
buffer += chunk
if "Join for free".startswith(buffer) or buffer.startswith("Join for free"):
if buffer.endswith("\n"):
buffer = ""
continue
if "https://discord.gg/".startswith(buffer) or "https://discord.gg/" in buffer:
if "..." in buffer:
buffer = ""
continue
if "o1-preview".startswith(buffer) or buffer.startswith("o1-preview"):
if "\n" in buffer:
buffer = ""
continue
if not started:
buffer = buffer.lstrip()
if buffer:
started = True
yield buffer
buffer = ""
else:
yield chunk

View file

@ -1,28 +1,190 @@
from __future__ import annotations from __future__ import annotations
from .hf.HuggingChat import HuggingChat import json
import re
import uuid
from aiohttp import ClientSession, FormData
class LambdaChat(HuggingChat): from ..typing import AsyncResult, Messages
from ..requests import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_last_user_message
from ..providers.response import JsonConversation, TitleGeneration, Reasoning, FinishReason
class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "Lambda Chat" label = "Lambda Chat"
domain = "lambda.chat" url = "https://lambda.chat"
url = f"https://{domain}" conversation_url = f"{url}/conversation"
working = True working = True
use_nodriver = False
needs_auth = False
default_model = "deepseek-llama3.3-70b" default_model = "deepseek-llama3.3-70b"
reasoning_model = "deepseek-r1" reasoning_model = "deepseek-r1"
image_models = [] models = [
fallback_models = [
default_model, default_model,
reasoning_model, reasoning_model,
"hermes-3-llama-3.1-405b-fp8", "hermes-3-llama-3.1-405b-fp8",
"hermes3-405b-fp8-128k",
"llama3.1-nemotron-70b-instruct", "llama3.1-nemotron-70b-instruct",
"lfm-40b", "lfm-40b",
"llama3.3-70b-instruct-fp8" "llama3.3-70b-instruct-fp8",
"qwen25-coder-32b-instruct"
] ]
model_aliases = { model_aliases = {
"deepseek-v3": default_model,
"hermes-3": "hermes-3-llama-3.1-405b-fp8", "hermes-3": "hermes-3-llama-3.1-405b-fp8",
"hermes-3-405b": "hermes3-405b-fp8-128k",
"nemotron-70b": "llama3.1-nemotron-70b-instruct", "nemotron-70b": "llama3.1-nemotron-70b-instruct",
"llama-3.3-70b": "llama3.3-70b-instruct-fp8" "qwen-2.5-coder-32b": "qwen25-coder-32b-instruct"
} }
@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
api_key: str = None,
proxy: str = None,
cookies: dict = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Origin": cls.url,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Referer": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Priority": "u=1, i",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
}
# Initialize cookies if not provided
if cookies is None:
cookies = {
"hf-chat": str(uuid.uuid4()) # Generate a session ID
}
async with ClientSession(headers=headers, cookies=cookies) as session:
# Step 1: Create a new conversation
data = {"model": model}
async with session.post(cls.conversation_url, json=data, proxy=proxy) as response:
await raise_for_status(response)
conversation_response = await response.json()
conversation_id = conversation_response["conversationId"]
# Update cookies with any new ones from the response
for cookie_name, cookie in response.cookies.items():
cookies[cookie_name] = cookie.value
# Step 2: Get data for this conversation to extract message ID
async with session.get(
f"{cls.conversation_url}/{conversation_id}/__data.json?x-sveltekit-invalidated=11",
proxy=proxy
) as response:
await raise_for_status(response)
response_text = await response.text()
# Update cookies again
for cookie_name, cookie in response.cookies.items():
cookies[cookie_name] = cookie.value
# Parse the JSON response to find the message ID
message_id = None
try:
# Try to parse each line as JSON
for line in response_text.splitlines():
if not line.strip():
continue
try:
data_json = json.loads(line)
if "type" in data_json and data_json["type"] == "data" and "nodes" in data_json:
for node in data_json["nodes"]:
if "type" in node and node["type"] == "data" and "data" in node:
# Look for system message ID
for item in node["data"]:
if isinstance(item, dict) and "id" in item and "from" in item and item.get("from") == "system":
message_id = item["id"]
break
# If we found the ID, break out of the loop
if message_id:
break
except json.JSONDecodeError:
continue
# If we still don't have a message ID, try to find any UUID in the response
if not message_id:
uuid_pattern = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
uuids = re.findall(uuid_pattern, response_text)
if uuids:
message_id = uuids[0]
if not message_id:
raise ValueError("Could not find message ID in response")
except (IndexError, KeyError, ValueError) as e:
raise RuntimeError(f"Failed to parse conversation data: {str(e)}")
# Step 3: Send the user message
user_message = get_last_user_message(messages)
# Prepare form data exactly as in the curl example
form_data = FormData()
form_data.add_field(
"data",
json.dumps({
"inputs": user_message,
"id": message_id,
"is_retry": False,
"is_continue": False,
"web_search": False,
"tools": []
}),
content_type="application/json"
)
async with session.post(
f"{cls.conversation_url}/{conversation_id}",
data=form_data,
proxy=proxy
) as response:
await raise_for_status(response)
async for chunk in response.content:
if not chunk:
continue
chunk_str = chunk.decode('utf-8', errors='ignore')
try:
data = json.loads(chunk_str)
except json.JSONDecodeError:
continue
# Handling different types of responses
if data.get("type") == "stream" and "token" in data:
# Remove null characters from the token
token = data["token"].replace("\u0000", "")
if token:
yield token
elif data.get("type") == "title":
yield TitleGeneration(data.get("title", ""))
elif data.get("type") == "reasoning":
subtype = data.get("subtype")
token = data.get("token", "").replace("\u0000", "")
status = data.get("status", "")
if subtype == "stream" and token:
yield Reasoning(token=token)
elif subtype == "status" and status:
yield Reasoning(status=status)
elif data.get("type") == "finalAnswer":
yield FinishReason("stop")
break
elif data.get("type") == "status" and data.get("status") == "keepAlive":
# Just a keepalive, ignore
continue

View file

@ -8,189 +8,271 @@ from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector from .helper import get_connector
from ..requests import raise_for_status from ..requests import raise_for_status
from ..errors import RateLimitError
models = { models = {
"claude-3-5-sonnet-20241022": { "claude-3-5-sonnet-20241022": {
"id": "claude-3-5-sonnet-20241022", "id": "claude-3-5-sonnet-20241022",
"name": "Claude-3.5-Sonnet-V2", "name": "claude-3-5-sonnet-20241022",
"model": "Claude", "model": "claude-3-5-sonnet-20241022",
"provider": "Anthropic", "provider": "Anthropic",
"maxLength": 800000, "maxLength": 0,
"tokenLimit": 200000, "tokenLimit": 0,
"context": "200K", "context": 0,
"success_rate": 100,
"tps": 25.366666666666667,
}, },
"claude-3-5-sonnet-20241022-t": { "claude-3-5-sonnet-20241022-t": {
"id": "claude-3-5-sonnet-20241022-t", "id": "claude-3-5-sonnet-20241022-t",
"name": "Claude-3.5-Sonnet-V2-T", "name": "claude-3-5-sonnet-20241022-t",
"model": "Claude", "model": "claude-3-5-sonnet-20241022-t",
"provider": "Anthropic", "provider": "Anthropic",
"maxLength": 800000, "maxLength": 0,
"tokenLimit": 200000, "tokenLimit": 0,
"context": "200K", "context": 0,
"success_rate": 100,
"tps": 39.820754716981135,
}, },
"claude-3-7-sonnet-20250219": { "claude-3-7-sonnet-20250219": {
"id": "claude-3-7-sonnet-20250219", "id": "claude-3-7-sonnet-20250219",
"name": "Claude-3.7-Sonnet", "name": "claude-3-7-sonnet-20250219",
"model": "Claude", "model": "claude-3-7-sonnet-20250219",
"provider": "Anthropic", "provider": "Anthropic",
"maxLength": 800000, "maxLength": 0,
"tokenLimit": 200000, "tokenLimit": 0,
"context": "200K", "context": 0,
"success_rate": 100,
"tps": 47.02970297029703,
}, },
"claude-3-7-sonnet-20250219-t": { "claude-3-7-sonnet-20250219-t": {
"id": "claude-3-7-sonnet-20250219-t", "id": "claude-3-7-sonnet-20250219-t",
"name": "Claude-3.7-Sonnet-T", "name": "claude-3-7-sonnet-20250219-t",
"model": "Claude", "model": "claude-3-7-sonnet-20250219-t",
"provider": "Anthropic", "provider": "Anthropic",
"maxLength": 800000, "maxLength": 0,
"tokenLimit": 200000, "tokenLimit": 0,
"context": "200K", "context": 0,
}, "success_rate": 100,
"claude-3-7-sonnet-20250219-thinking": { "tps": 39.04289693593315,
"id": "claude-3-7-sonnet-20250219-thinking",
"name": "Claude-3.7-Sonnet-Thinking",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
"name": "Claude-3-Opus",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
"model": "Claude",
"provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
"deepseek-r1": {
"id": "deepseek-r1",
"name": "DeepSeek-R1",
"model": "DeepSeek-R1",
"provider": "DeepSeek",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "128K",
},
"deepseek-r1-distill-llama-70b": {
"id": "deepseek-r1-distill-llama-70b",
"name": "DeepSeek-R1-70B",
"model": "DeepSeek-R1-70B",
"provider": "DeepSeek",
"maxLength": 400000,
"tokenLimit": 100000,
"context": "128K",
}, },
"deepseek-v3": { "deepseek-v3": {
"id": "deepseek-v3", "id": "deepseek-v3",
"name": "DeepSeek-V3", "name": "deepseek-v3",
"model": "DeepSeek-V3", "model": "deepseek-v3",
"provider": "DeepSeek", "provider": "DeepSeek",
"maxLength": 400000, "maxLength": 0,
"tokenLimit": 100000, "tokenLimit": 0,
"context": "128K", "context": 0,
"success_rate": 100,
"tps": 40.484657419083646,
},
"gemini-1.0-pro-latest-123": {
"id": "gemini-1.0-pro-latest-123",
"name": "gemini-1.0-pro-latest-123",
"model": "gemini-1.0-pro-latest-123",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 10,
}, },
"gemini-2.0-flash": { "gemini-2.0-flash": {
"id": "gemini-2.0-flash", "id": "gemini-2.0-flash",
"name": "Gemini-2.0-Flash", "name": "gemini-2.0-flash",
"model": "Gemini", "model": "gemini-2.0-flash",
"provider": "Google", "provider": "Google",
"maxLength": 4000000, "maxLength": 0,
"tokenLimit": 1000000, "tokenLimit": 0,
"context": "1024K", "context": 0,
"success_rate": 100,
"tps": 216.44162436548223,
},
"gemini-2.0-flash-exp": {
"id": "gemini-2.0-flash-exp",
"name": "gemini-2.0-flash-exp",
"model": "gemini-2.0-flash-exp",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 0,
"tps": 0,
}, },
"gemini-2.0-flash-thinking-exp": { "gemini-2.0-flash-thinking-exp": {
"id": "gemini-2.0-flash-thinking-exp", "id": "gemini-2.0-flash-thinking-exp",
"name": "Gemini-2.0-Flash-Thinking-Exp", "name": "gemini-2.0-flash-thinking-exp",
"model": "Gemini", "model": "gemini-2.0-flash-thinking-exp",
"provider": "Google", "provider": "Google",
"maxLength": 4000000, "maxLength": 0,
"tokenLimit": 1000000, "tokenLimit": 0,
"context": "1024K", "context": 0,
"success_rate": 0,
"tps": 0,
}, },
"gemini-2.0-pro-exp": { "gemini-2.5-flash-preview-04-17": {
"id": "gemini-2.0-pro-exp", "id": "gemini-2.5-flash-preview-04-17",
"name": "Gemini-2.0-Pro-Exp", "name": "gemini-2.5-flash-preview-04-17",
"model": "Gemini", "model": "gemini-2.5-flash-preview-04-17",
"provider": "Google", "provider": "Google",
"maxLength": 4000000, "maxLength": 0,
"tokenLimit": 1000000, "tokenLimit": 0,
"context": "1024K", "context": 0,
"success_rate": 100,
"tps": 189.84010840108402,
}, },
"gpt-4o-2024-08-06": { "gemini-2.5-pro-official": {
"id": "gpt-4o-2024-08-06", "id": "gemini-2.5-pro-official",
"name": "GPT-4o", "name": "gemini-2.5-pro-official",
"model": "ChatGPT", "model": "gemini-2.5-pro-official",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 91.00613496932516,
},
"gemini-2.5-pro-preview-03-25": {
"id": "gemini-2.5-pro-preview-03-25",
"name": "gemini-2.5-pro-preview-03-25",
"model": "gemini-2.5-pro-preview-03-25",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 99.05660377358491,
"tps": 45.050511247443765,
},
"gemini-2.5-pro-preview-05-06": {
"id": "gemini-2.5-pro-preview-05-06",
"name": "gemini-2.5-pro-preview-05-06",
"model": "gemini-2.5-pro-preview-05-06",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 99.29617834394904,
},
"gpt-4-turbo-2024-04-09": {
"id": "gpt-4-turbo-2024-04-09",
"name": "gpt-4-turbo-2024-04-09",
"model": "gpt-4-turbo-2024-04-09",
"provider": "OpenAI", "provider": "OpenAI",
"maxLength": 260000, "maxLength": 0,
"tokenLimit": 126000, "tokenLimit": 0,
"context": "128K", "context": 0,
"success_rate": 100,
"tps": 1,
},
"gpt-4.1": {
"id": "gpt-4.1",
"name": "gpt-4.1",
"model": "gpt-4.1",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 42.857142857142854,
"tps": 19.58032786885246,
},
"gpt-4.1-mini": {
"id": "gpt-4.1-mini",
"name": "gpt-4.1-mini",
"model": "gpt-4.1-mini",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 68.75,
"tps": 12.677576601671309,
},
"gpt-4.1-mini-2025-04-14": {
"id": "gpt-4.1-mini-2025-04-14",
"name": "gpt-4.1-mini-2025-04-14",
"model": "gpt-4.1-mini-2025-04-14",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 94.23076923076923,
"tps": 8.297687861271676,
},
"gpt-4o-2024-11-20": {
"id": "gpt-4o-2024-11-20",
"name": "gpt-4o-2024-11-20",
"model": "gpt-4o-2024-11-20",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 73.3955223880597,
}, },
"gpt-4o-mini-2024-07-18": { "gpt-4o-mini-2024-07-18": {
"id": "gpt-4o-mini-2024-07-18", "id": "gpt-4o-mini-2024-07-18",
"name": "GPT-4o-Mini", "name": "gpt-4o-mini-2024-07-18",
"model": "ChatGPT", "model": "gpt-4o-mini-2024-07-18",
"provider": "OpenAI", "provider": "OpenAI",
"maxLength": 260000, "maxLength": 0,
"tokenLimit": 126000, "tokenLimit": 0,
"context": "128K", "context": 0,
}, "success_rate": 100,
"gpt-4o-mini-free": { "tps": 26.874455100261553,
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",
"model": "ChatGPT",
"provider": "OpenAI",
"maxLength": 31200,
"tokenLimit": 7800,
"context": "8K",
}, },
"grok-3": { "grok-3": {
"id": "grok-3", "id": "grok-3",
"name": "Grok-3", "name": "grok-3",
"model": "Grok", "model": "grok-3",
"provider": "x.ai", "provider": "xAI",
"maxLength": 800000, "maxLength": 0,
"tokenLimit": 200000, "tokenLimit": 0,
"context": "200K", "context": 0,
"success_rate": 100,
"tps": 51.110652663165794,
}, },
"grok-3-r1": { "grok-3-reason": {
"id": "grok-3-r1", "id": "grok-3-reason",
"name": "Grok-3-Thinking", "name": "grok-3-reason",
"model": "Grok", "model": "grok-3-reason",
"provider": "x.ai", "provider": "xAI",
"maxLength": 800000, "maxLength": 0,
"tokenLimit": 200000, "tokenLimit": 0,
"context": "200K", "context": 0,
"success_rate": 100,
"tps": 62.81976744186046,
}, },
"o3-mini": { "o3-mini-2025-01-31": {
"id": "o3-mini", "id": "o3-mini-2025-01-31",
"name": "o3-mini", "name": "o3-mini-2025-01-31",
"model": "o3", "model": "o3-mini-2025-01-31",
"provider": "OpenAI", "provider": "Unknown",
"maxLength": 400000, "maxLength": 0,
"tokenLimit": 100000, "tokenLimit": 0,
"context": "128K", "context": 0,
"success_rate": 100,
"tps": 125.31410256410257,
},
"qwen3-235b-a22b": {
"id": "qwen3-235b-a22b",
"name": "qwen3-235b-a22b",
"model": "qwen3-235b-a22b",
"provider": "Alibaba",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 25.846153846153847,
}, },
} }
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site" url = "https://liaobots.work"
working = True working = True
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True
default_model = "gpt-4o-2024-08-06" default_model = "grok-3"
models = list(models.keys()) models = list(models.keys())
model_aliases = { model_aliases = {
# Anthropic # Anthropic
@ -198,25 +280,33 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t", "claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219", "claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t", "claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
"claude-3.7-sonnet-thinking": "claude-3-7-sonnet-20250219-thinking",
"claude-3-opus": "claude-3-opus-20240229",
"claude-3-sonnet": "claude-3-sonnet-20240229",
# DeepSeek # DeepSeek
"deepseek-r1": "deepseek-r1-distill-llama-70b", #"deepseek-v3": "deepseek-v3",
# Google # Google
"gemini-1.0-pro": "gemini-1.0-pro-latest-123",
"gemini-2.0-flash": "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
"gemini-2.0-pro": "gemini-2.0-pro-exp", "gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
"gemini-2.5-pro": "gemini-2.5-pro-official",
"gemini-2.5-pro": "gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
# OpenAI # OpenAI
"gpt-4": default_model, "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4o": default_model, "gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
"gpt-4": "gpt-4o-2024-11-20",
"gpt-4o": "gpt-4o-2024-11-20",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18", "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gpt-4o-mini": "gpt-4o-mini-free",
# xAI
"grok-3-reason": "grok-3-reason",
"o3-mini": "o3-mini-2025-01-31",
"qwen-3-235b": "qwen3-235b-a22b",
} }
_auth_code = "" _auth_code = None
_cookie_jar = None _cookie_jar = None
@classmethod @classmethod
@ -238,92 +328,213 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
model = cls.get_model(model) model = cls.get_model(model)
headers = { headers = {
"referer": "https://liaobots.work/", "accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": "https://liaobots.work", "origin": "https://liaobots.work",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", "priority": "u=1, i",
"referer": "https://liaobots.work/en",
"sec-ch-ua": "\"Chromium\";v=\"135\", \"Not-A.Brand\";v=\"8\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
} }
async with ClientSession( async with ClientSession(
headers=headers, headers=headers,
cookie_jar=cls._cookie_jar, cookie_jar=cls._cookie_jar,
connector=get_connector(connector, proxy, True) connector=get_connector(connector, proxy, True)
) as session: ) as session:
# First, get a valid auth code
await cls.get_auth_code(session)
# Create conversation ID
conversation_id = str(uuid.uuid4())
# Prepare request data
data = { data = {
"conversationId": str(uuid.uuid4()), "conversationId": conversation_id,
"model": models[model], "models": [{
"modelId": model,
"provider": models[model]["provider"]
}],
"search": "false",
"messages": messages, "messages": messages,
"key": "", "key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."), "prompt": kwargs.get("system_message", "你是 {{model}},一个由 {{provider}} 训练的大型语言模型,请仔细遵循用户的指示。")
} }
if not cls._auth_code:
async with session.post( # Try to make the chat request
"https://liaobots.work/recaptcha/api/login",
data={"token": "abcdefghijklmnopqrst"},
verify_ssl=False
) as response:
await raise_for_status(response)
try: try:
# Make the chat request with the current auth code
async with session.post( async with session.post(
"https://liaobots.work/api/user", f"{cls.url}/api/chat",
json={"authcode": cls._auth_code},
verify_ssl=False
) as response:
await raise_for_status(response)
cls._auth_code = (await response.json(content_type=None))["authCode"]
if not cls._auth_code:
raise RuntimeError("Empty auth code")
cls._cookie_jar = session.cookie_jar
async with session.post(
"https://liaobots.work/api/chat",
json=data, json=data,
headers={"x-auth-code": cls._auth_code}, headers={"x-auth-code": cls._auth_code},
verify_ssl=False ssl=False
) as response: ) as response:
await raise_for_status(response) # Check if we got a streaming response
async for line in response.content: content_type = response.headers.get("Content-Type", "")
if line.startswith(b"data: "): if "text/event-stream" in content_type:
yield json.loads(line[6:]).get("content") async for line in response.content:
except: if line.startswith(b"data: "):
async with session.post( try:
"https://liaobots.work/api/user", response_data = json.loads(line[6:])
json={"authcode": "jGDRFOqHcZKAo"},
verify_ssl=False # Check for error response
) as response: if response_data.get("error") is True:
await raise_for_status(response) # Raise RateLimitError for payment required or other errors
cls._auth_code = (await response.json(content_type=None))["authCode"] if "402" in str(response_data.get("res_status", "")):
if not cls._auth_code: raise RateLimitError("This model requires payment or credits")
raise RuntimeError("Empty auth code") else:
cls._cookie_jar = session.cookie_jar error_msg = response_data.get('message', 'Unknown error')
async with session.post( raise RateLimitError(f"Error: {error_msg}")
"https://liaobots.work/api/chat",
json=data, # Process normal response
headers={"x-auth-code": cls._auth_code}, if response_data.get("role") == "assistant" and "content" in response_data:
verify_ssl=False content = response_data.get("content")
) as response: yield content
await raise_for_status(response) except json.JSONDecodeError:
async for line in response.content: continue
if line.startswith(b"data: "): else:
yield json.loads(line[6:]).get("content") # Not a streaming response, might be an error or HTML
response_text = await response.text()
# If we got HTML, we need to bypass CAPTCHA
if response_text.startswith("<!DOCTYPE html>"):
await cls.bypass_captcha(session)
# Get a fresh auth code
await cls.get_auth_code(session)
# Try the request again
async with session.post(
f"{cls.url}/api/chat",
json=data,
headers={"x-auth-code": cls._auth_code},
ssl=False
) as response2:
# Check if we got a streaming response
content_type = response2.headers.get("Content-Type", "")
if "text/event-stream" in content_type:
async for line in response2.content:
if line.startswith(b"data: "):
try:
response_data = json.loads(line[6:])
# Check for error response
if response_data.get("error") is True:
# Raise RateLimitError for payment required or other errors
if "402" in str(response_data.get("res_status", "")):
raise RateLimitError("This model requires payment or credits")
else:
error_msg = response_data.get('message', 'Unknown error')
raise RateLimitError(f"Error: {error_msg}")
# Process normal response
if response_data.get("role") == "assistant" and "content" in response_data:
content = response_data.get("content")
yield content
except json.JSONDecodeError:
continue
else:
raise RateLimitError("Failed to get streaming response")
else:
raise RateLimitError("Failed to connect to the service")
except Exception as e:
# If it's already a RateLimitError, re-raise it
if isinstance(e, RateLimitError):
raise
# Otherwise, wrap it in a RateLimitError
raise RateLimitError(f"Error processing request: {str(e)}")
@classmethod @classmethod
async def initialize_auth_code(cls, session: ClientSession) -> None: async def bypass_captcha(cls, session: ClientSession) -> None:
""" """
Initialize the auth code by making the necessary login requests. Bypass the CAPTCHA verification by directly making the recaptcha API request.
""" """
async with session.post( try:
"https://liaobots.work/api/user", # First, try the direct recaptcha API request
json={"authcode": "pTIQr4FTnVRfr"}, async with session.post(
verify_ssl=False f"{cls.url}/recaptcha/api/login",
) as response: json={"token": "abcdefghijklmnopqrst"},
await raise_for_status(response) ssl=False
cls._auth_code = (await response.json(content_type=None))["authCode"] ) as response:
if not cls._auth_code: if response.status == 200:
raise RuntimeError("Empty auth code") try:
cls._cookie_jar = session.cookie_jar response_text = await response.text()
# Try to parse as JSON
try:
response_data = json.loads(response_text)
# Check if we got a successful response
if response_data.get("code") == 200:
cls._cookie_jar = session.cookie_jar
except json.JSONDecodeError:
pass
except Exception:
pass
except Exception:
pass
@classmethod @classmethod
async def ensure_auth_code(cls, session: ClientSession) -> None: async def get_auth_code(cls, session: ClientSession) -> None:
""" """
Ensure the auth code is initialized, and if not, perform the initialization. Get a valid auth code by sending a request with an empty authcode.
""" """
if not cls._auth_code: try:
await cls.initialize_auth_code(session) # Send request with empty authcode to get a new one
auth_request_data = {
"authcode": "",
"recommendUrl": "https://liaobots.work/zh"
}
async with session.post(
f"{cls.url}/api/user",
json=auth_request_data,
ssl=False
) as response:
if response.status == 200:
response_text = await response.text()
try:
response_data = json.loads(response_text)
if "authCode" in response_data:
cls._auth_code = response_data["authCode"]
cls._cookie_jar = session.cookie_jar
return
except json.JSONDecodeError:
# If we got HTML, it might be the CAPTCHA page
if response_text.startswith("<!DOCTYPE html>"):
await cls.bypass_captcha(session)
# Try again after bypassing CAPTCHA
async with session.post(
f"{cls.url}/api/user",
json=auth_request_data,
ssl=False
) as response2:
if response2.status == 200:
response_text2 = await response2.text()
try:
response_data2 = json.loads(response_text2)
if "authCode" in response_data2:
cls._auth_code = response_data2["authCode"]
cls._cookie_jar = session.cookie_jar
return
except json.JSONDecodeError:
pass
except Exception:
pass
# If we're here, we couldn't get a valid auth code
# Set a default one as a fallback
cls._auth_code = "DvS3A5GTE9f0D" # Fallback to one of the provided auth codes

View file

@ -1,24 +0,0 @@
from __future__ import annotations
from .template import OpenaiTemplate
class OIVSCode(OpenaiTemplate):
label = "OI VSCode Server"
url = "https://oi-vscode-server.onrender.com"
api_base = "https://oi-vscode-server-2.onrender.com/v1"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini-2024-07-18"
default_vision_model = default_model
vision_models = [default_model, "gpt-4o-mini"]
models = vision_models + ["deepseek-ai/DeepSeek-V3"]
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"deepseek-v3": "deepseek-ai/DeepSeek-V3"
}

View file

@ -81,15 +81,31 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
await ws.send_str("3") await ws.send_str("3")
continue continue
try: try:
if last_message == 0 and model == cls.default_model: if not message.startswith("42"):
yield "<think>" continue
data = json.loads(message[2:])[1]
yield data["output"][last_message:] parsed_data = json.loads(message[2:])
last_message = len(data["output"]) message_type = parsed_data[0]
if data["final"]: data = parsed_data[1]
if data["citations"]:
yield Sources(data["citations"]) # Handle error responses
yield FinishReason("stop") if message_type.endswith("_query_progress") and data.get("status") == "failed":
break error_message = data.get("text", "Unknown API error")
raise ResponseError(f"API Error: {error_message}")
# Handle normal responses
if "output" in data:
if last_message == 0 and model == cls.default_model:
yield "<think>"
yield data["output"][last_message:]
last_message = len(data["output"])
if data["final"]:
if data["citations"]:
yield Sources(data["citations"])
yield FinishReason("stop")
break
except ResponseError as e:
# Re-raise ResponseError directly
raise e
except Exception as e: except Exception as e:
raise ResponseError(f"Message: {message}") from e raise ResponseError(f"Error processing message: {message}") from e

View file

@ -51,38 +51,70 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
image_models = [default_image_model] image_models = [default_image_model]
audio_models = {default_audio_model: []} audio_models = {default_audio_model: []}
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"] extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"] vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "openai-reasoning", "searchgpt"]
_models_loaded = False _models_loaded = False
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15 # https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
model_aliases = { model_aliases = {
### Text Models ### ### Text Models ###
"gpt-4o-mini": "openai", "gpt-4o-mini": "openai",
"gpt-4.1-nano": "openai-fast",
"gpt-4": "openai-large", "gpt-4": "openai-large",
"gpt-4o": "openai-large", "gpt-4o": "openai-large",
"gpt-4.1": "openai", "gpt-4.1": "openai-large",
"gpt-4.1-nano": "openai",
"gpt-4.1-mini": "openai-large",
"gpt-4.1-xlarge": "openai-xlarge",
"o4-mini": "openai-reasoning", "o4-mini": "openai-reasoning",
"gpt-4.1-mini": "openai",
"command-r-plus-08-2024": "command-r",
"gemini-2.5-flash": "gemini",
"gemini-2.0-flash-thinking": "gemini-thinking",
"qwen-2.5-coder-32b": "qwen-coder", "qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama", "llama-3.3-70b": "llama",
"llama-4-scout": "llamascout", "llama-4-scout": "llamascout",
"mistral-nemo": "mistral", "llama-4-scout-17b": "llamascout",
"llama-3.1-8b": "llamalight", "mistral-small-3.1-24b": "mistral",
"llama-3.3-70b": "llama-scaleway",
"phi-4": "phi",
"deepseek-r1": "deepseek-reasoning-large", "deepseek-r1": "deepseek-reasoning-large",
"deepseek-r1-distill-llama-70b": "deepseek-reasoning-large", "deepseek-r1-distill-llama-70b": "deepseek-reasoning-large",
"deepseek-r1-distill-llama-70b": "deepseek-r1-llama",
#"mistral-small-3.1-24b": "unity", # Personas
#"mirexa": "mirexa", # Personas
#"midijourney": "midijourney", # Personas
#"rtist": "rtist", # Personas
#"searchgpt": "searchgpt",
#"evil": "evil", # Personas
"deepseek-r1-distill-qwen-32b": "deepseek-reasoning", "deepseek-r1-distill-qwen-32b": "deepseek-reasoning",
"phi-4": "phi",
#"pixtral-12b": "pixtral",
#"hormoz-8b": "hormoz",
"qwq-32b": "qwen-qwq",
#"hypnosis-tracy-7b": "hypnosis-tracy", # Personas
#"mistral-?": "sur", # Personas
"deepseek-v3": "deepseek", "deepseek-v3": "deepseek",
"llama-3.2-11b": "llama-vision", "deepseek-v3-0324": "deepseek",
#"bidara": "bidara", # Personas
### Audio Models ###
"gpt-4o-audio": "openai-audio", "gpt-4o-audio": "openai-audio",
"gpt-4o-audio-preview": "openai-audio",
### Image Models ### ### Image Models ###
"sdxl-turbo": "turbo", "sdxl-turbo": "turbo",
} }
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Check if the model exists directly in our model lists
if model in cls.text_models or model in cls.image_models or model in cls.audio_models:
return model
# Check if there's an alias for this model
if model in cls.model_aliases:
return cls.model_aliases[model]
# If no match is found, raise an error
raise ModelNotFoundError(f"Model {model} not found")
@classmethod @classmethod
def get_models(cls, **kwargs): def get_models(cls, **kwargs):
if not cls._models_loaded: if not cls._models_loaded:
@ -160,6 +192,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
stream: bool = True, stream: bool = True,
proxy: str = None, proxy: str = None,
cache: bool = False, cache: bool = False,
referrer: str = "https://gpt4free.github.io/",
# Image generation parameters # Image generation parameters
prompt: str = None, prompt: str = None,
aspect_ratio: str = "1:1", aspect_ratio: str = "1:1",
@ -210,7 +243,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
private=private, private=private,
enhance=enhance, enhance=enhance,
safe=safe, safe=safe,
n=n n=n,
referrer=referrer
): ):
yield chunk yield chunk
else: else:
@ -238,6 +272,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
cache=cache, cache=cache,
stream=stream, stream=stream,
extra_parameters=extra_parameters, extra_parameters=extra_parameters,
referrer=referrer,
**kwargs **kwargs
): ):
yield result yield result
@ -257,7 +292,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
private: bool, private: bool,
enhance: bool, enhance: bool,
safe: bool, safe: bool,
n: int n: int,
referrer: str
) -> AsyncResult: ) -> AsyncResult:
params = use_aspect_ratio({ params = use_aspect_ratio({
"width": width, "width": width,
@ -269,7 +305,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"safe": str(safe).lower() "safe": str(safe).lower()
}, aspect_ratio) }, aspect_ratio)
query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None) query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None)
prompt = quote_plus(prompt)[:2048-256-len(query)] prompt = quote_plus(prompt)[:2048-len(cls.image_api_endpoint)-len(query)-8]
url = f"{cls.image_api_endpoint}prompt/{prompt}?{query}" url = f"{cls.image_api_endpoint}prompt/{prompt}?{query}"
def get_image_url(i: int, seed: Optional[int] = None): def get_image_url(i: int, seed: Optional[int] = None):
if i == 1: if i == 1:
@ -280,7 +316,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
return f"{url}&seed={seed}" if seed else url return f"{url}&seed={seed}" if seed else url
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session: async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
async def get_image(i: int, seed: Optional[int] = None): async def get_image(i: int, seed: Optional[int] = None):
async with session.get(get_image_url(i, seed), allow_redirects=False) as response: async with session.get(get_image_url(i, seed), allow_redirects=False, headers={"referer": referrer}) as response:
try: try:
await raise_for_status(response) await raise_for_status(response)
except Exception as e: except Exception as e:
@ -307,13 +343,11 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
cache: bool, cache: bool,
stream: bool, stream: bool,
extra_parameters: list[str], extra_parameters: list[str],
referrer: str,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if not cache and seed is None: if not cache and seed is None:
seed = random.randint(0, 2**32) seed = random.randint(0, 2**32)
json_mode = False
if response_format and response_format.get("type") == "json_object":
json_mode = True
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session: async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
if model in cls.audio_models: if model in cls.audio_models:
@ -331,13 +365,13 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"presence_penalty": presence_penalty, "presence_penalty": presence_penalty,
"top_p": top_p, "top_p": top_p,
"frequency_penalty": frequency_penalty, "frequency_penalty": frequency_penalty,
"jsonMode": json_mode, "response_format": response_format,
"stream": stream, "stream": stream,
"seed": seed, "seed": seed,
"cache": cache, "cache": cache,
**extra_parameters **extra_parameters
}) })
async with session.post(url, json=data) as response: async with session.post(url, json=data, headers={"referer": referrer}) as response:
await raise_for_status(response) await raise_for_status(response)
if response.headers["content-type"].startswith("text/plain"): if response.headers["content-type"].startswith("text/plain"):
yield await response.text() yield await response.text()

View file

@ -3,6 +3,7 @@ from __future__ import annotations
import requests import requests
from .template import OpenaiTemplate from .template import OpenaiTemplate
from .. import debug
class TypeGPT(OpenaiTemplate): class TypeGPT(OpenaiTemplate):
label = "TypeGpt" label = "TypeGpt"
@ -36,6 +37,10 @@ class TypeGPT(OpenaiTemplate):
@classmethod @classmethod
def get_models(cls, **kwargs): def get_models(cls, **kwargs):
if not cls.models: if not cls.models:
cls.models = requests.get(f"{cls.url}/api/config").json()["customModels"].split(",") try:
cls.models = [model.split("@")[0].strip("+") for model in cls.models if not model.startswith("-") and model not in cls.image_models] cls.models = requests.get(f"{cls.url}/api/config").json()["customModels"].split(",")
cls.models = [model.split("@")[0].strip("+") for model in cls.models if not model.startswith("-") and model not in cls.image_models]
except Exception as e:
cls.models = cls.fallback_models
debug.log(f"Error fetching models: {e}")
return cls.models return cls.models

View file

@ -34,50 +34,34 @@ try:
except ImportError as e: except ImportError as e:
debug.error("Audio providers not loaded:", e) debug.error("Audio providers not loaded:", e)
try: from .ARTA import ARTA
from .AllenAI import AllenAI from .Blackbox import Blackbox
from .ARTA import ARTA from .Chatai import Chatai
from .Blackbox import Blackbox from .ChatGLM import ChatGLM
from .Chatai import Chatai from .ChatGpt import ChatGpt
from .ChatGLM import ChatGLM from .Cloudflare import Cloudflare
from .ChatGpt import ChatGpt from .Copilot import Copilot
from .ChatGptEs import ChatGptEs from .DDG import DDG
from .Cloudflare import Cloudflare from .DeepInfraChat import DeepInfraChat
from .Copilot import Copilot from .DuckDuckGo import DuckDuckGo
from .DDG import DDG from .Dynaspark import Dynaspark
from .DeepInfraChat import DeepInfraChat from .Free2GPT import Free2GPT
from .DuckDuckGo import DuckDuckGo from .FreeGpt import FreeGpt
from .Dynaspark import Dynaspark from .GizAI import GizAI
except ImportError as e: from .ImageLabs import ImageLabs
debug.error("Providers not loaded (A-D):", e) from .LambdaChat import LambdaChat
try: from .Liaobots import Liaobots
from .Free2GPT import Free2GPT from .LMArenaProvider import LMArenaProvider
from .FreeGpt import FreeGpt from .PerplexityLabs import PerplexityLabs
from .FreeRouter import FreeRouter from .Pi import Pi
from .GizAI import GizAI from .Pizzagpt import Pizzagpt
from .Glider import Glider from .PollinationsAI import PollinationsAI
from .Goabror import Goabror from .PollinationsImage import PollinationsImage
from .ImageLabs import ImageLabs from .TeachAnything import TeachAnything
from .Jmuz import Jmuz from .TypeGPT import TypeGPT
from .LambdaChat import LambdaChat from .You import You
from .Liaobots import Liaobots from .Websim import Websim
from .LMArenaProvider import LMArenaProvider from .Yqcloud import Yqcloud
from .OIVSCode import OIVSCode
except ImportError as e:
debug.error("Providers not loaded (F-L):", e)
try:
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .PollinationsAI import PollinationsAI
from .PollinationsImage import PollinationsImage
from .TeachAnything import TeachAnything
from .TypeGPT import TypeGPT
from .You import You
from .Websim import Websim
from .Yqcloud import Yqcloud
except ImportError as e:
debug.error("Providers not loaded (M-Z):", e)
import sys import sys

View file

@ -1,75 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_image_prompt
from .raise_for_status import raise_for_status
class BlackForestLabs_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
label = "BlackForestLabs Flux-1-Schnell"
url = "https://black-forest-labs-flux-1-schnell.hf.space"
api_endpoint = "https://black-forest-labs-flux-1-schnell.hf.space/call/infer"
working = True
default_model = "black-forest-labs-flux-1-schnell"
default_image_model = default_model
model_aliases = {"flux-schnell": default_image_model, "flux": default_image_model}
image_models = list(model_aliases.keys())
models = image_models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
prompt: str = None,
width: int = 768,
height: int = 768,
num_inference_steps: int = 2,
seed: int = 0,
randomize_seed: bool = True,
**kwargs
) -> AsyncResult:
width = max(32, width - (width % 8))
height = max(32, height - (height % 8))
prompt = format_image_prompt(messages, prompt)
payload = {
"data": [
prompt,
seed,
randomize_seed,
width,
height,
num_inference_steps
]
}
async with ClientSession() as session:
async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
await raise_for_status(response)
response_data = await response.json()
event_id = response_data['event_id']
while True:
async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response:
await raise_for_status(status_response)
while not status_response.content.at_eof():
event = await status_response.content.readuntil(b'\n\n')
if event.startswith(b'event:'):
event_parts = event.split(b'\ndata: ')
if len(event_parts) < 2:
continue
event_type = event_parts[0].split(b': ')[1]
data = event_parts[1]
if event_type == b'error':
raise ResponseError(f"Error generating image: {data.decode(errors='ignore')}")
elif event_type == b'complete':
json_data = json.loads(data)
image_url = json_data[0]['url']
yield ImageResponse(images=[image_url], alt=prompt)
return

View file

@ -17,14 +17,15 @@ class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
working = True working = True
default_model = "command-a-03-2025" default_model = "command-a-03-2025"
model_aliases = { models = [
"command-a": default_model, default_model,
"command-r-plus": "command-r-plus-08-2024", "command-r-plus-08-2024",
"command-r": "command-r-08-2024", "command-r-08-2024",
"command-r": "command-r", "command-r-plus",
"command-r7b": "command-r7b-12-2024", "command-r",
} "command-r7b-12-2024",
models = list(model_aliases.keys()) "command-r7b-arabic-02-2025",
]
@classmethod @classmethod
def get_model(cls, model: str, **kwargs) -> str: def get_model(cls, model: str, **kwargs) -> str:

View file

@ -15,7 +15,7 @@ from ... import debug
from .DeepseekAI_JanusPro7b import get_zerogpu_token from .DeepseekAI_JanusPro7b import get_zerogpu_token
from .raise_for_status import raise_for_status from .raise_for_status import raise_for_status
class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin): class Microsoft_Phi_4_Multimodal(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Phi-4" label = "Microsoft Phi-4"
space = "microsoft/phi-4-multimodal" space = "microsoft/phi-4-multimodal"
url = f"https://huggingface.co/spaces/{space}" url = f"https://huggingface.co/spaces/{space}"
@ -29,9 +29,9 @@ class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "phi-4-multimodal" default_model = "phi-4-multimodal"
default_vision_model = default_model default_vision_model = default_model
model_aliases = {"phi-4": default_vision_model} vision_models = [default_vision_model]
vision_models = list(model_aliases.keys())
models = vision_models models = vision_models
model_aliases = {"phi-4": default_vision_model}
@classmethod @classmethod
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation, media: list = None): def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation, media: list = None):

View file

@ -31,7 +31,15 @@ class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
"qwen3-1.7b", "qwen3-1.7b",
"qwen3-0.6b", "qwen3-0.6b",
} }
model_aliases = {model: model for model in models} model_aliases = {
"qwen-3-235b": default_model,
"qwen-3-32b": "qwen3-32b",
"qwen-3-30b": "qwen3-30b-a3b",
"qwen-3-14b": "qwen3-14b",
"qwen-3-4b": "qwen3-4b",
"qwen-3-1.7b": "qwen3-1.7b",
"qwen-3-0.6b": "qwen3-0.6b",
}
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View file

@ -19,7 +19,10 @@ class Voodoohop_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "voodoohop-flux-1-schnell" default_model = "voodoohop-flux-1-schnell"
default_image_model = default_model default_image_model = default_model
model_aliases = {"flux-schnell": default_model, "flux": default_model} model_aliases = {
"flux-schnell": default_image_model,
"flux": default_image_model
}
image_models = list(model_aliases.keys()) image_models = list(model_aliases.keys())
models = image_models models = image_models

View file

@ -7,10 +7,9 @@ from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
from .Microsoft_Phi_4 import Microsoft_Phi_4 from .Microsoft_Phi_4_Multimodal import Microsoft_Phi_4_Multimodal
from .Qwen_QVQ_72B import Qwen_QVQ_72B from .Qwen_QVQ_72B import Qwen_QVQ_72B
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5 from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M
@ -30,10 +29,9 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = Qwen_QVQ_72B.default_model default_vision_model = Qwen_QVQ_72B.default_model
providers = [ providers = [
BlackForestLabs_Flux1Dev, BlackForestLabs_Flux1Dev,
BlackForestLabs_Flux1Schnell,
CohereForAI_C4AI_Command, CohereForAI_C4AI_Command,
DeepseekAI_JanusPro7b, DeepseekAI_JanusPro7b,
Microsoft_Phi_4, Microsoft_Phi_4_Multimodal,
Qwen_QVQ_72B, Qwen_QVQ_72B,
Qwen_Qwen_2_5, Qwen_Qwen_2_5,
Qwen_Qwen_2_5M, Qwen_Qwen_2_5M,

File diff suppressed because it is too large Load diff

View file

@ -63,6 +63,7 @@ GGOGLE_SID_COOKIE = "__Secure-1PSID"
models = { models = {
"gemini-2.5-pro-exp": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"2525e3954d185b3c"]'}, "gemini-2.5-pro-exp": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"2525e3954d185b3c"]'},
"gemini-2.5-flash": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"35609594dbe934d8"]'},
"gemini-2.0-flash-thinking-exp": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"7ca48d02d802f20a"]'}, "gemini-2.0-flash-thinking-exp": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"7ca48d02d802f20a"]'},
"gemini-deep-research": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"cd472a54d2abba7e"]'}, "gemini-deep-research": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"cd472a54d2abba7e"]'},
"gemini-2.0-flash": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f299729663a2343f"]'}, "gemini-2.0-flash": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f299729663a2343f"]'},
@ -87,7 +88,10 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
models = [ models = [
default_model, *models.keys() default_model, *models.keys()
] ]
model_aliases = {"gemini-2.0": ""} model_aliases = {
"gemini-2.0": "",
"gemini-2.5-pro": "gemini-2.5-pro-exp"
}
synthesize_content_type = "audio/vnd.wav" synthesize_content_type = "audio/vnd.wav"
@ -102,14 +106,11 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod @classmethod
async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]: async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
if not has_nodriver: if not has_nodriver:
if debug.logging: debug.log("Skip nodriver login in Gemini provider")
print("Skip nodriver login in Gemini provider")
return return
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="gemini") browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="gemini")
try: try:
login_url = os.environ.get("G4F_LOGIN_URL") yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
if login_url:
yield RequestLogin(cls.label, login_url)
page = await browser.get(f"{cls.url}/app") page = await browser.get(f"{cls.url}/app")
await page.select("div.ql-editor.textarea", 240) await page.select("div.ql-editor.textarea", 240)
cookies = {} cookies = {}
@ -159,6 +160,8 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
audio: dict = None, audio: dict = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if model in cls.model_aliases:
model = cls.model_aliases[model]
if audio is not None or model == "gemini-audio": if audio is not None or model == "gemini-audio":
prompt = format_image_prompt(messages, prompt) prompt = format_image_prompt(messages, prompt)
filename = get_filename(["gemini"], prompt, ".ogx", prompt) filename = get_filename(["gemini"], prompt, ".ogx", prompt)

View file

@ -64,9 +64,8 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True supports_system_message = True
supports_stream = True supports_stream = True
needs_auth = True needs_auth = True
default_model = "GigaChat:latest" default_model = "GigaChat"
models = [default_model, "GigaChat-Plus", "GigaChat-Pro"] models = ["GigaChat-2", "GigaChat-2-Pro", "GigaChat-2-Max", default_model, "GigaChat-Pro", "GigaChat-Max"]
model_aliases = {"gigachat": default_model}
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View file

@ -1,5 +1,6 @@
from .Anthropic import Anthropic from .Anthropic import Anthropic
from .BingCreateImages import BingCreateImages from .BingCreateImages import BingCreateImages
from .BlackboxPro import BlackboxPro
from .CablyAI import CablyAI from .CablyAI import CablyAI
from .Cerebras import Cerebras from .Cerebras import Cerebras
from .CopilotAccount import CopilotAccount from .CopilotAccount import CopilotAccount

View file

@ -2,13 +2,13 @@ from __future__ import annotations
import json import json
from uuid import uuid4 from uuid import uuid4
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages, MediaListType from ...typing import AsyncResult, Messages, MediaListType
from ..image import to_bytes, is_accepted_format, to_data_uri from ...image import to_bytes, is_accepted_format, to_data_uri
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status from ...requests.raise_for_status import raise_for_status
from ..providers.response import FinishReason, JsonConversation from ...providers.response import FinishReason, JsonConversation
from .helper import format_prompt, get_last_user_message, format_image_prompt from ..helper import format_prompt, get_last_user_message, format_image_prompt
from ..tools.media import merge_media from ...tools.media import merge_media
class Conversation(JsonConversation): class Conversation(JsonConversation):
@ -29,7 +29,7 @@ class AllenAI(AsyncGeneratorProvider, ProviderModelMixin):
login_url = None login_url = None
api_endpoint = "https://olmo-api.allen.ai/v4/message/stream" api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
working = True working = False
needs_auth = False needs_auth = False
use_nodriver = False use_nodriver = False
supports_stream = True supports_stream = True

View file

@ -10,16 +10,16 @@ try:
except ImportError: except ImportError:
has_curl_cffi = False has_curl_cffi = False
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt from ..helper import format_prompt
from ..errors import MissingRequirementsError from ...errors import MissingRequirementsError
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgpt.es" url = "https://chatgpt.es"
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php" api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
working = True working = False
supports_stream = True supports_stream = True
supports_system_message = False supports_system_message = False
supports_message_history = False supports_message_history = False

View file

@ -1,9 +1,9 @@
from __future__ import annotations from __future__ import annotations
from .template import OpenaiTemplate from ..template import OpenaiTemplate
class FreeRouter(OpenaiTemplate): class FreeRouter(OpenaiTemplate):
label = "CablyAI FreeRouter" label = "CablyAI FreeRouter"
url = "https://freerouter.cablyai.com" url = "https://freerouter.cablyai.com"
api_base = "https://freerouter.cablyai.com/v1" api_base = "https://freerouter.cablyai.com/v1"
working = True working = False

View file

@ -1,12 +1,12 @@
from __future__ import annotations from __future__ import annotations
from .template import OpenaiTemplate from ..template import OpenaiTemplate
class Glider(OpenaiTemplate): class Glider(OpenaiTemplate):
label = "Glider" label = "Glider"
url = "https://glider.so" url = "https://glider.so"
api_endpoint = "https://glider.so/api/chat" api_endpoint = "https://glider.so/api/chat"
working = True working = False
default_model = 'chat-llama-3-1-70b' default_model = 'chat-llama-3-1-70b'
models = [ models = [
@ -21,4 +21,4 @@ class Glider(OpenaiTemplate):
"llama-3.1-8b": "chat-llama-3-1-8b", "llama-3.1-8b": "chat-llama-3-1-8b",
"llama-3.2-3b": "chat-llama-3-2-3b", "llama-3.2-3b": "chat-llama-3-2-3b",
"deepseek-r1": "deepseek-ai/DeepSeek-R1", "deepseek-r1": "deepseek-ai/DeepSeek-R1",
} }

View file

@ -17,7 +17,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://rubiks.ai" url = "https://rubiks.ai"
api_endpoint = "https://rubiks.ai/search/api/" api_endpoint = "https://rubiks.ai/search/api/"
working = True working = False
supports_stream = True supports_stream = True
supports_system_message = True supports_system_message = True
supports_message_history = True supports_message_history = True

View file

@ -5,15 +5,19 @@ from .AiChats import AiChats
from .Airforce import Airforce from .Airforce import Airforce
from .AutonomousAI import AutonomousAI from .AutonomousAI import AutonomousAI
from .AIUncensored import AIUncensored from .AIUncensored import AIUncensored
from .AllenAI import AllenAI
from .AmigoChat import AmigoChat from .AmigoChat import AmigoChat
from .Aura import Aura from .Aura import Aura
from .Chatgpt4o import Chatgpt4o from .Chatgpt4o import Chatgpt4o
from .Chatgpt4Online import Chatgpt4Online from .Chatgpt4Online import Chatgpt4Online
from .ChatGptEs import ChatGptEs
from .ChatgptFree import ChatgptFree from .ChatgptFree import ChatgptFree
from .ChatGptt import ChatGptt from .ChatGptt import ChatGptt
from .DarkAI import DarkAI from .DarkAI import DarkAI
from .FlowGpt import FlowGpt from .FlowGpt import FlowGpt
from .FreeNetfly import FreeNetfly from .FreeNetfly import FreeNetfly
from .FreeRouter import FreeRouter
from .Glider import Glider
from .GPROChat import GPROChat from .GPROChat import GPROChat
from .Koala import Koala from .Koala import Koala
from .MagickPen import MagickPen from .MagickPen import MagickPen

File diff suppressed because it is too large Load diff

View file

@ -10,7 +10,7 @@ from ..Provider.hf_space import HuggingSpace
from .. import Provider from .. import Provider
from .. import models from .. import models
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
from ..Provider import Microsoft_Phi_4, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin): class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
@ -100,7 +100,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
cls.image_models.extend([clean_name(model) for model in provider.image_models]) cls.image_models.extend([clean_name(model) for model in provider.image_models])
cls.vision_models.extend([clean_name(model) for model in provider.vision_models]) cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
cls.video_models.extend([clean_name(model) for model in provider.video_models]) cls.video_models.extend([clean_name(model) for model in provider.video_models])
for provider in [Microsoft_Phi_4, PollinationsAI]: for provider in [Microsoft_Phi_4_Multimodal, PollinationsAI]:
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored: if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
cls.audio_models.update(provider.audio_models) cls.audio_models.update(provider.audio_models)
cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)}) cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)})
@ -137,7 +137,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
if "audio" in kwargs or "audio" in kwargs.get("modalities", []): if "audio" in kwargs or "audio" in kwargs.get("modalities", []):
providers = [PollinationsAI, EdgeTTS, gTTS] providers = [PollinationsAI, EdgeTTS, gTTS]
elif has_audio: elif has_audio:
providers = [PollinationsAI, Microsoft_Phi_4, MarkItDown] providers = [PollinationsAI, Microsoft_Phi_4_Multimodal, MarkItDown]
elif has_image: elif has_image:
providers = models.default_vision.best_provider.providers providers = models.default_vision.best_provider.providers
else: else:
@ -194,4 +194,4 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
setattr(Provider, "AnyProvider", AnyProvider) setattr(Provider, "AnyProvider", AnyProvider)
Provider.__map__["AnyProvider"] = AnyProvider Provider.__map__["AnyProvider"] = AnyProvider
Provider.__providers__.append(AnyProvider) Provider.__providers__.append(AnyProvider)