mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Merge pull request #2989 from hlohaus/kq
Update providers and models for better compatibility
This commit is contained in:
commit
d2a967c887
34 changed files with 2990 additions and 1647 deletions
|
|
@ -31,7 +31,7 @@ from g4f import debug
|
|||
debug.logging = True
|
||||
|
||||
# Constants
|
||||
DEFAULT_MODEL = "gpt-4o"
|
||||
DEFAULT_MODEL = "claude-3.7-sonnet"
|
||||
FALLBACK_MODELS = []
|
||||
MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit
|
||||
MAX_RETRIES = 3
|
||||
|
|
|
|||
|
|
@ -151,10 +151,12 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
# Step 1: Get Authentication Token
|
||||
auth_data = await cls.read_and_refresh_token(proxy)
|
||||
auth_token = auth_data.get("idToken")
|
||||
|
||||
async with ClientSession() as session:
|
||||
# Step 2: Generate Images
|
||||
image_payload = {
|
||||
# Create a form data structure as the API might expect form data instead of JSON
|
||||
form_data = {
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"style": model,
|
||||
|
|
@ -166,10 +168,12 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
}
|
||||
|
||||
headers = {
|
||||
"Authorization": auth_data.get("idToken"),
|
||||
"Authorization": auth_token,
|
||||
# No Content-Type header for multipart/form-data, aiohttp sets it automatically
|
||||
}
|
||||
|
||||
async with session.post(cls.image_generation_url, data=image_payload, headers=headers, proxy=proxy) as image_response:
|
||||
# Try with form data instead of JSON
|
||||
async with session.post(cls.image_generation_url, data=form_data, headers=headers, proxy=proxy) as image_response:
|
||||
await raise_error(f"Failed to initiate image generation", image_response)
|
||||
image_data = await image_response.json()
|
||||
record_id = image_data.get("record_id")
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import re
|
|||
import json
|
||||
import random
|
||||
import string
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from datetime import datetime, timedelta
|
||||
|
|
@ -14,13 +13,11 @@ from datetime import datetime, timedelta
|
|||
from ..typing import AsyncResult, Messages, MediaListType
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .openai.har_file import get_har_files
|
||||
from ..image import to_data_uri
|
||||
from ..cookies import get_cookies_dir
|
||||
from .helper import format_image_prompt, render_messages
|
||||
from ..providers.response import JsonConversation, ImageResponse
|
||||
from .helper import render_messages
|
||||
from ..providers.response import JsonConversation
|
||||
from ..tools.media import merge_media
|
||||
from ..errors import RateLimitError, NoValidHarFileError
|
||||
from ..errors import RateLimitError
|
||||
from .. import debug
|
||||
|
||||
class Conversation(JsonConversation):
|
||||
|
|
@ -43,20 +40,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
default_model = "blackboxai"
|
||||
default_vision_model = default_model
|
||||
default_image_model = 'flux'
|
||||
|
||||
# Free models (available without subscription)
|
||||
fallback_models = [
|
||||
# OpenRouter Free
|
||||
openrouter_models = [
|
||||
"Deepcoder 14B Preview",
|
||||
"DeepHermes 3 Llama 3 8B Preview",
|
||||
"DeepSeek R1 Zero",
|
||||
"Dolphin3.0 Mistral 24B",
|
||||
"Dolphin3.0 R1 Mistral 24B",
|
||||
"Flash 3", # FIX (<reasoning> ◁</reasoning>)
|
||||
"Gemini 2.0 Flash Experimental",
|
||||
"Gemma 2 9B",
|
||||
"Gemma 3 12B",
|
||||
"Gemma 3 1B",
|
||||
"Gemma 3 27B",
|
||||
"Gemma 3 4B",
|
||||
"Kimi VL A3B Thinking", # FIX (◁think▷ ◁/think▷)
|
||||
"Llama 3.1 8B Instruct",
|
||||
"Llama 3.1 Nemotron Ultra 253B v1",
|
||||
"Llama 3.2 11B Vision Instruct",
|
||||
"Llama 3.2 1B Instruct",
|
||||
"Llama 3.2 3B Instruct",
|
||||
"Llama 3.3 70B Instruct",
|
||||
"Llama 3.3 Nemotron Super 49B v1",
|
||||
"Llama 4 Maverick",
|
||||
"Llama 4 Scout",
|
||||
"Mistral 7B Instruct",
|
||||
"Mistral Nemo",
|
||||
"Mistral Small 3",
|
||||
"Mistral Small 3.1 24B",
|
||||
"Molmo 7B D",
|
||||
"Moonlight 16B A3B Instruct",
|
||||
"Qwen2.5 72B Instruct",
|
||||
"Qwen2.5 7B Instruct",
|
||||
"Qwen2.5 Coder 32B Instruct",
|
||||
"Qwen2.5 VL 32B Instruct",
|
||||
"Qwen2.5 VL 3B Instruct",
|
||||
"Qwen2.5 VL 72B Instruct",
|
||||
"Qwen2.5-VL 7B Instruct",
|
||||
"Qwerky 72B",
|
||||
"QwQ 32B",
|
||||
"QwQ 32B Preview",
|
||||
"QwQ 32B RpR v1",
|
||||
"R1",
|
||||
"R1 Distill Llama 70B",
|
||||
"R1 Distill Qwen 14B",
|
||||
"R1 Distill Qwen 32B",
|
||||
]
|
||||
|
||||
models = [
|
||||
default_model,
|
||||
"gpt-4o-mini",
|
||||
"DeepSeek-V3",
|
||||
"o3-mini",
|
||||
"gpt-4.1-nano",
|
||||
"Claude-sonnet-3.7",
|
||||
"Claude-sonnet-3.5",
|
||||
"DeepSeek-R1",
|
||||
"Meta-Llama-3.3-70B-Instruct-Turbo",
|
||||
"Mistral-Small-24B-Instruct-2501",
|
||||
"DeepSeek-LLM-Chat-(67B)",
|
||||
"Qwen-QwQ-32B-Preview",
|
||||
# Image models
|
||||
"flux",
|
||||
|
||||
# OpenRouter Free
|
||||
*openrouter_models,
|
||||
|
||||
# Trending agent modes
|
||||
'Python Agent',
|
||||
'HTML Agent',
|
||||
|
|
@ -89,98 +132,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
'Heroku Agent'
|
||||
]
|
||||
|
||||
# Premium models (require subscription)
|
||||
premium_models = [
|
||||
"GPT-4o",
|
||||
"o1",
|
||||
"o3-mini",
|
||||
"Claude-sonnet-3.7",
|
||||
"Claude-sonnet-3.5",
|
||||
"Gemini-Flash-2.0",
|
||||
"DBRX-Instruct",
|
||||
"blackboxai-pro",
|
||||
"Gemini-PRO"
|
||||
]
|
||||
vision_models = [default_vision_model, 'o3-mini']
|
||||
|
||||
# Models available in the demo account
|
||||
demo_models = [
|
||||
default_model,
|
||||
"blackboxai-pro",
|
||||
"gpt-4o-mini",
|
||||
"GPT-4o",
|
||||
"o1",
|
||||
"o3-mini",
|
||||
"Claude-sonnet-3.7",
|
||||
"Claude-sonnet-3.5",
|
||||
"DeepSeek-V3",
|
||||
"DeepSeek-R1",
|
||||
"DeepSeek-LLM-Chat-(67B)",
|
||||
"Meta-Llama-3.3-70B-Instruct-Turbo",
|
||||
"Mistral-Small-24B-Instruct-2501",
|
||||
"Qwen-QwQ-32B-Preview",
|
||||
# Image models
|
||||
"flux",
|
||||
# Trending agent modes
|
||||
'Python Agent',
|
||||
'HTML Agent',
|
||||
'Builder Agent',
|
||||
'Java Agent',
|
||||
'JavaScript Agent',
|
||||
'React Agent',
|
||||
'Android Agent',
|
||||
'Flutter Agent',
|
||||
'Next.js Agent',
|
||||
'AngularJS Agent',
|
||||
'Swift Agent',
|
||||
'MongoDB Agent',
|
||||
'PyTorch Agent',
|
||||
'Xcode Agent',
|
||||
'Azure Agent',
|
||||
'Bitbucket Agent',
|
||||
'DigitalOcean Agent',
|
||||
'Docker Agent',
|
||||
'Electron Agent',
|
||||
'Erlang Agent',
|
||||
'FastAPI Agent',
|
||||
'Firebase Agent',
|
||||
'Flask Agent',
|
||||
'Git Agent',
|
||||
'Gitlab Agent',
|
||||
'Go Agent',
|
||||
'Godot Agent',
|
||||
'Google Cloud Agent',
|
||||
'Heroku Agent'
|
||||
]
|
||||
|
||||
image_models = [default_image_model]
|
||||
vision_models = [default_vision_model, 'GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Gemini Agent', 'llama-3.1-8b Agent', 'llama-3.1-70b Agent', 'llama-3.1-405 Agent', 'Gemini-Flash-2.0', 'DeepSeek-V3']
|
||||
|
||||
userSelectedModel = ['GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0']
|
||||
userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
|
||||
|
||||
# Agent mode configurations
|
||||
agentMode = {
|
||||
'GPT-4o': {'mode': True, 'id': "GPT-4o", 'name': "GPT-4o"},
|
||||
'Gemini-PRO': {'mode': True, 'id': "Gemini-PRO", 'name': "Gemini-PRO"},
|
||||
# OpenRouter Free
|
||||
'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
|
||||
'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
|
||||
'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
|
||||
'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
|
||||
'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
|
||||
'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
|
||||
'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
|
||||
'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
|
||||
'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
|
||||
'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
|
||||
'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
|
||||
'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
|
||||
'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
|
||||
'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
|
||||
'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
|
||||
'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
|
||||
'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
|
||||
'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
|
||||
'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
|
||||
'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
|
||||
'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
|
||||
'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
|
||||
'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
|
||||
'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
|
||||
'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"},
|
||||
'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
|
||||
'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
|
||||
'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
|
||||
'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
|
||||
'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
|
||||
'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
|
||||
'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
|
||||
'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
|
||||
'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
|
||||
'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
|
||||
'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
|
||||
'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
|
||||
'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
|
||||
'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
|
||||
'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"},
|
||||
'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
|
||||
'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
|
||||
'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
|
||||
|
||||
# Default
|
||||
'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
|
||||
'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
|
||||
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
|
||||
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
|
||||
'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
|
||||
'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
|
||||
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
|
||||
'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
|
||||
'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
|
||||
'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
|
||||
'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
|
||||
}
|
||||
|
||||
# Trending agent modes
|
||||
trendingAgentMode = {
|
||||
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
||||
"Gemini Agent": {'mode': True, 'id': 'gemini'},
|
||||
"llama-3.1-405 Agent": {'mode': True, 'id': "llama-3.1-405"},
|
||||
'llama-3.1-70b Agent': {'mode': True, 'id': "llama-3.1-70b"},
|
||||
'llama-3.1-8b Agent': {'mode': True, 'id': "llama-3.1-8b"},
|
||||
'Python Agent': {'mode': True, 'id': "python"},
|
||||
'HTML Agent': {'mode': True, 'id': "html"},
|
||||
'Builder Agent': {'mode': True, 'id': "builder"},
|
||||
|
|
@ -214,180 +225,78 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
# Complete list of all models (for authorized users)
|
||||
_all_models = list(dict.fromkeys([
|
||||
*fallback_models, # Include all free models
|
||||
*premium_models, # Include all premium models
|
||||
*image_models,
|
||||
*models, # Include all free models
|
||||
*list(agentMode.keys()),
|
||||
*list(trendingAgentMode.keys())
|
||||
]))
|
||||
|
||||
# Initialize models with fallback_models
|
||||
models = fallback_models
|
||||
|
||||
model_aliases = {
|
||||
"gpt-4o": "GPT-4o",
|
||||
"gpt-4": default_model,
|
||||
"gpt-4o": default_model,
|
||||
"gpt-4o-mini": default_model,
|
||||
"claude-3.7-sonnet": "Claude-sonnet-3.7",
|
||||
"claude-3.5-sonnet": "Claude-sonnet-3.5",
|
||||
"deepseek-v3": "DeepSeek-V3",
|
||||
"deepseek-r1": "DeepSeek-R1",
|
||||
"deepseek-chat": "DeepSeek-LLM-Chat-(67B)",
|
||||
"llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo",
|
||||
"mixtral-small-24b": "Mistral-Small-24B-Instruct-2501",
|
||||
"qwq-32b": "Qwen-QwQ-32B-Preview",
|
||||
#
|
||||
"deepcoder-14b": "Deepcoder 14B Preview",
|
||||
"deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
|
||||
"deepseek-r1-zero": "DeepSeek R1 Zero",
|
||||
"deepseek-r1": "DeepSeek R1 Zero",
|
||||
"dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
|
||||
"dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
|
||||
"reka-flash": "Flash 3",
|
||||
"gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
|
||||
"gemma-2-9b": "Gemma 2 9B",
|
||||
"gemma-3-12b": "Gemma 3 12B",
|
||||
"gemma-3-1b": "Gemma 3 1B",
|
||||
"gemma-3-27b": "Gemma 3 27B",
|
||||
"gemma-3-4b": "Gemma 3 4B",
|
||||
"kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
|
||||
"llama-3.1-8b": "Llama 3.1 8B Instruct",
|
||||
"nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
|
||||
"llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
|
||||
"llama-3.2-1b": "Llama 3.2 1B Instruct",
|
||||
"llama-3.2-3b": "Llama 3.2 3B Instruct",
|
||||
"llama-3.3-70b": "Llama 3.3 70B Instruct",
|
||||
"nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
|
||||
"llama-4-maverick": "Llama 4 Maverick",
|
||||
"llama-4-scout": "Llama 4 Scout",
|
||||
"mistral-7b": "Mistral 7B Instruct",
|
||||
"mistral-nemo": "Mistral Nemo",
|
||||
"mistral-small-24b": "Mistral Small 3",
|
||||
"mistral-small-24b": "Mistral-Small-24B-Instruct-2501",
|
||||
"mistral-small-3.1-24b": "Mistral Small 3.1 24B",
|
||||
"molmo-7b": "Molmo 7B D",
|
||||
"moonlight-16b": "Moonlight 16B A3B Instruct",
|
||||
"qwen-2.5-72b": "Qwen2.5 72B Instruct",
|
||||
"qwen-2.5-7b": "Qwen2.5 7B Instruct",
|
||||
"qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
|
||||
"qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
|
||||
"qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
|
||||
"qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
|
||||
"qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
|
||||
"qwerky-72b": "Qwerky 72B",
|
||||
"qwq-32b": "QwQ 32B",
|
||||
"qwq-32b-preview": "QwQ 32B Preview",
|
||||
"qwq-32b": "QwQ 32B Preview",
|
||||
"qwq-32b-arliai": "QwQ 32B RpR v1",
|
||||
"qwq-32b": "QwQ 32B RpR v1",
|
||||
"deepseek-r1": "R1",
|
||||
"deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
|
||||
"deepseek-r1": "R1 Distill Llama 70B",
|
||||
"deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
|
||||
"deepseek-r1": "R1 Distill Qwen 14B",
|
||||
"deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
|
||||
"deepseek-r1": "R1 Distill Qwen 32B",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def get_models_async(cls) -> list:
|
||||
def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 365) -> dict:
|
||||
"""
|
||||
Asynchronous version of get_models that checks subscription status.
|
||||
Returns a list of available models based on subscription status.
|
||||
Premium users get the full list of models.
|
||||
Free users get fallback_models.
|
||||
Demo accounts get demo_models.
|
||||
"""
|
||||
# Check if there are valid session data in HAR files
|
||||
session_data = cls._find_session_in_har_files()
|
||||
|
||||
if not session_data:
|
||||
# For demo accounts - return demo models
|
||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
||||
return cls.demo_models
|
||||
|
||||
# Check if this is a demo session
|
||||
demo_session = cls.generate_session()
|
||||
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
|
||||
|
||||
if is_demo:
|
||||
# For demo accounts - return demo models
|
||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
||||
return cls.demo_models
|
||||
|
||||
# For non-demo accounts, check subscription status
|
||||
if 'user' in session_data and 'email' in session_data['user']:
|
||||
subscription = await cls.check_subscription(session_data['user']['email'])
|
||||
if subscription['status'] == "PREMIUM":
|
||||
debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
|
||||
return cls._all_models
|
||||
|
||||
# For free accounts - return free models
|
||||
debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
|
||||
return cls.fallback_models
|
||||
|
||||
@classmethod
|
||||
def get_models(cls) -> list:
|
||||
"""
|
||||
Returns a list of available models based on authorization status.
|
||||
Authorized users get the full list of models.
|
||||
Free users get fallback_models.
|
||||
Demo accounts get demo_models.
|
||||
|
||||
Note: This is a synchronous method that can't check subscription status,
|
||||
so it falls back to the basic premium access check.
|
||||
For more accurate results, use get_models_async when possible.
|
||||
"""
|
||||
# Check if there are valid session data in HAR files
|
||||
session_data = cls._find_session_in_har_files()
|
||||
|
||||
if not session_data:
|
||||
# For demo accounts - return demo models
|
||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
||||
return cls.demo_models
|
||||
|
||||
# Check if this is a demo session
|
||||
demo_session = cls.generate_session()
|
||||
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
|
||||
|
||||
if is_demo:
|
||||
# For demo accounts - return demo models
|
||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
||||
return cls.demo_models
|
||||
|
||||
# For non-demo accounts, check premium access
|
||||
has_premium_access = cls._check_premium_access()
|
||||
|
||||
if has_premium_access:
|
||||
# For premium users - all models
|
||||
debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
|
||||
return cls._all_models
|
||||
|
||||
# For free accounts - return free models
|
||||
debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
|
||||
return cls.fallback_models
|
||||
|
||||
@classmethod
|
||||
async def check_subscription(cls, email: str) -> dict:
|
||||
"""
|
||||
Check subscription status for a given email using the Blackbox API.
|
||||
|
||||
Args:
|
||||
email: The email to check subscription for
|
||||
|
||||
Returns:
|
||||
dict: Subscription status information with keys:
|
||||
- status: "PREMIUM" or "FREE"
|
||||
- customerId: Customer ID if available
|
||||
- isTrialSubscription: Whether this is a trial subscription
|
||||
"""
|
||||
if not email:
|
||||
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
||||
|
||||
headers = {
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://www.blackbox.ai',
|
||||
'referer': 'https://www.blackbox.ai/?ref=login-success',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
|
||||
}
|
||||
|
||||
try:
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.post(
|
||||
'https://www.blackbox.ai/api/check-subscription',
|
||||
json={"email": email}
|
||||
) as response:
|
||||
if response.status != 200:
|
||||
debug.log(f"Blackbox: Subscription check failed with status {response.status}")
|
||||
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
||||
|
||||
result = await response.json()
|
||||
status = "PREMIUM" if result.get("hasActiveSubscription", False) else "FREE"
|
||||
|
||||
return {
|
||||
"status": status,
|
||||
"customerId": result.get("customerId"),
|
||||
"isTrialSubscription": result.get("isTrialSubscription", False)
|
||||
}
|
||||
except Exception as e:
|
||||
debug.log(f"Blackbox: Error checking subscription: {e}")
|
||||
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
||||
|
||||
@classmethod
|
||||
def _check_premium_access(cls) -> bool:
|
||||
"""
|
||||
Checks for an authorized session in HAR files.
|
||||
Returns True if a valid session is found that differs from the demo.
|
||||
"""
|
||||
try:
|
||||
session_data = cls._find_session_in_har_files()
|
||||
if not session_data:
|
||||
return False
|
||||
|
||||
# Check if this is not a demo session
|
||||
demo_session = cls.generate_session()
|
||||
if (session_data['user'].get('email') != demo_session['user'].get('email')):
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
debug.log(f"Blackbox: Error checking premium access: {e}")
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def generate_session(cls, id_length: int = 21, days_ahead: int = 365) -> dict:
|
||||
"""
|
||||
Generate a dynamic session with proper ID and expiry format.
|
||||
Generate a dynamic session with proper ID and expiry format using a specific email.
|
||||
|
||||
Args:
|
||||
email: The email to use for this session
|
||||
id_length: Length of the numeric ID (default: 21)
|
||||
days_ahead: Number of days ahead for expiry (default: 365)
|
||||
|
||||
|
|
@ -401,10 +310,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
future_date = datetime.now() + timedelta(days=days_ahead)
|
||||
expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
|
||||
|
||||
# Decode the encoded email
|
||||
encoded_email = "Z2lzZWxlQGJsYWNrYm94LmFp" # Base64 encoded email
|
||||
email = base64.b64decode(encoded_email).decode('utf-8')
|
||||
|
||||
# Generate random image ID for the new URL format
|
||||
chars = string.ascii_letters + string.digits + "-"
|
||||
random_img_id = ''.join(random.choice(chars) for _ in range(48))
|
||||
|
|
@ -417,68 +322,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"image": image_url,
|
||||
"id": numeric_id
|
||||
},
|
||||
"expires": expiry
|
||||
"expires": expiry,
|
||||
"isNewUser": False
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _find_session_in_har_files(cls) -> Optional[dict]:
|
||||
"""
|
||||
Search for valid session data in HAR files.
|
||||
|
||||
Returns:
|
||||
Optional[dict]: Session data if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
for file in get_har_files():
|
||||
try:
|
||||
with open(file, 'rb') as f:
|
||||
har_data = json.load(f)
|
||||
|
||||
for entry in har_data['log']['entries']:
|
||||
# Only look at blackbox API responses
|
||||
if 'blackbox.ai/api' in entry['request']['url']:
|
||||
# Look for a response that has the right structure
|
||||
if 'response' in entry and 'content' in entry['response']:
|
||||
content = entry['response']['content']
|
||||
# Look for both regular and Google auth session formats
|
||||
if ('text' in content and
|
||||
isinstance(content['text'], str) and
|
||||
'"user"' in content['text'] and
|
||||
'"email"' in content['text'] and
|
||||
'"expires"' in content['text']):
|
||||
try:
|
||||
# Remove any HTML or other non-JSON content
|
||||
text = content['text'].strip()
|
||||
if text.startswith('{') and text.endswith('}'):
|
||||
# Replace escaped quotes
|
||||
text = text.replace('\\"', '"')
|
||||
har_session = json.loads(text)
|
||||
|
||||
# Check if this is a valid session object
|
||||
if (isinstance(har_session, dict) and
|
||||
'user' in har_session and
|
||||
'email' in har_session['user'] and
|
||||
'expires' in har_session):
|
||||
|
||||
debug.log(f"Blackbox: Found session in HAR file: {file}")
|
||||
return har_session
|
||||
except json.JSONDecodeError as e:
|
||||
# Only print error for entries that truly look like session data
|
||||
if ('"user"' in content['text'] and
|
||||
'"email"' in content['text']):
|
||||
debug.log(f"Blackbox: Error parsing likely session data: {e}")
|
||||
except Exception as e:
|
||||
debug.log(f"Blackbox: Error reading HAR file {file}: {e}")
|
||||
return None
|
||||
except NoValidHarFileError:
|
||||
pass
|
||||
except Exception as e:
|
||||
debug.log(f"Blackbox: Error searching HAR files: {e}")
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
|
||||
cache_file = Path(get_cookies_dir()) / 'blackbox.json'
|
||||
cache_path = Path(os.path.expanduser("~")) / ".g4f" / "cache"
|
||||
cache_file = cache_path / 'blackbox.json'
|
||||
|
||||
if not force_refresh and cache_file.exists():
|
||||
try:
|
||||
|
|
@ -517,7 +368,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if is_valid_context(context):
|
||||
validated_value = match.group(1)
|
||||
|
||||
cache_file.parent.mkdir(exist_ok=True)
|
||||
cache_file.parent.mkdir(exist_ok=True, parents=True)
|
||||
try:
|
||||
with open(cache_file, 'w') as f:
|
||||
json.dump({'validated_value': validated_value}, f)
|
||||
|
|
@ -592,41 +443,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"title": ""
|
||||
}
|
||||
|
||||
# Get session data - try HAR files first, fall back to generated session
|
||||
session_data = cls._find_session_in_har_files() or cls.generate_session()
|
||||
# Generate a new email for each request instead of using the one stored in conversation
|
||||
chars = string.ascii_lowercase + string.digits
|
||||
random_team = ''.join(random.choice(chars) for _ in range(8))
|
||||
request_email = f"{random_team}@blackbox.ai"
|
||||
|
||||
# Log which session type is being used
|
||||
demo_session = cls.generate_session()
|
||||
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
|
||||
|
||||
if is_demo:
|
||||
debug.log("Blackbox: Using generated demo session")
|
||||
# For demo account, set default values without checking subscription
|
||||
subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
||||
# Check if the requested model is in demo_models
|
||||
is_premium = model in cls.demo_models
|
||||
if not is_premium:
|
||||
debug.log(f"Blackbox: Model {model} not available in demo account, falling back to default model")
|
||||
model = cls.default_model
|
||||
is_premium = True
|
||||
else:
|
||||
debug.log(f"Blackbox: Using session from HAR file (email: {session_data['user'].get('email', 'unknown')})")
|
||||
# Only check subscription for non-demo accounts
|
||||
subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
||||
if session_data.get('user', {}).get('email'):
|
||||
subscription_status = await cls.check_subscription(session_data['user']['email'])
|
||||
debug.log(f"Blackbox: Subscription status for {session_data['user']['email']}: {subscription_status['status']}")
|
||||
|
||||
# Determine if user has premium access based on subscription status
|
||||
if subscription_status['status'] == "PREMIUM":
|
||||
is_premium = True
|
||||
else:
|
||||
# For free accounts, check if the requested model is in fallback_models
|
||||
is_premium = model in cls.fallback_models
|
||||
if not is_premium:
|
||||
debug.log(f"Blackbox: Model {model} not available in free account, falling back to default model")
|
||||
model = cls.default_model
|
||||
is_premium = True
|
||||
# Generate a session with the new email
|
||||
session_data = cls.generate_session(request_email)
|
||||
debug.log(f"Blackbox: Using generated session with email {request_email}")
|
||||
|
||||
data = {
|
||||
"messages": current_messages,
|
||||
|
|
@ -651,26 +475,28 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"mobileClient": False,
|
||||
"userSelectedModel": model if model in cls.userSelectedModel else None,
|
||||
"validated": conversation.validated_value,
|
||||
"imageGenerationMode": model == cls.default_image_model,
|
||||
"imageGenerationMode": False,
|
||||
"webSearchModePrompt": False,
|
||||
"deepSearchMode": False,
|
||||
"designerMode": False,
|
||||
"domains": None,
|
||||
"vscodeClient": False,
|
||||
"codeInterpreterMode": False,
|
||||
"customProfile": {
|
||||
"additionalInfo": "",
|
||||
"enableNewChats": False,
|
||||
"name": "",
|
||||
"occupation": "",
|
||||
"traits": [],
|
||||
"additionalInfo": "",
|
||||
"enableNewChats": False
|
||||
"traits": []
|
||||
},
|
||||
"session": session_data,
|
||||
"isPremium": is_premium,
|
||||
"isPremium": True,
|
||||
"subscriptionCache": {
|
||||
"status": subscription_status['status'],
|
||||
"customerId": subscription_status['customerId'],
|
||||
"isTrialSubscription": subscription_status['isTrialSubscription'],
|
||||
"lastChecked": int(datetime.now().timestamp() * 1000)
|
||||
"expiryTimestamp": None,
|
||||
"isTrialSubscription": False,
|
||||
"lastChecked": int(datetime.now().timestamp() * 1000),
|
||||
"status": "FREE",
|
||||
"customerId": None
|
||||
},
|
||||
"beastMode": False,
|
||||
"reasoningMode": False,
|
||||
|
|
@ -689,24 +515,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if "You have reached your request limit for the hour" in chunk_text:
|
||||
raise RateLimitError(chunk_text)
|
||||
full_response.append(chunk_text)
|
||||
# Only yield chunks for non-image models
|
||||
if model != cls.default_image_model:
|
||||
yield chunk_text
|
||||
yield chunk_text
|
||||
|
||||
full_response_text = ''.join(full_response)
|
||||
|
||||
# For image models, check for image markdown
|
||||
if model == cls.default_image_model:
|
||||
image_url_match = re.search(r'!\[.*?\]\((.*?)\)', full_response_text)
|
||||
if image_url_match:
|
||||
image_url = image_url_match.group(1)
|
||||
yield ImageResponse(urls=[image_url], alt=format_image_prompt(messages, prompt))
|
||||
return
|
||||
|
||||
# Handle conversation history once, in one place
|
||||
# Handle conversation history
|
||||
if return_conversation:
|
||||
conversation.message_history.append({"role": "assistant", "content": full_response_text})
|
||||
yield conversation
|
||||
# For image models that didn't produce an image, fall back to text response
|
||||
elif model == cls.default_image_model:
|
||||
yield full_response_text
|
||||
|
|
|
|||
|
|
@ -35,9 +35,8 @@ class Chatai(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
supports_message_history = True
|
||||
|
||||
default_model = 'gpt-4o-mini-2024-07-18'
|
||||
models = ['gpt-4o-mini-2024-07-18'] #
|
||||
|
||||
model_aliases = {"gpt-4o-mini":default_model}
|
||||
models = list(model_aliases.keys())
|
||||
|
||||
# --- ProviderModelMixin Methods ---
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -1,316 +1,94 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from aiohttp import ClientSession, ClientTimeout
|
||||
import json
|
||||
import asyncio
|
||||
import random
|
||||
import base64
|
||||
import time
|
||||
import random
|
||||
import hashlib
|
||||
from yarl import URL
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages, Cookies
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt, get_last_user_message
|
||||
from ..providers.response import FinishReason, JsonConversation
|
||||
from ..errors import ModelNotSupportedError, ResponseStatusError, RateLimitError, TimeoutError, ConversationLimitError
|
||||
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
has_bs4 = True
|
||||
except ImportError:
|
||||
has_bs4 = False
|
||||
|
||||
|
||||
class DuckDuckGoSearchException(Exception):
|
||||
"""Base exception class for duckduckgo_search."""
|
||||
|
||||
class DuckDuckGoChallengeError(ResponseStatusError):
|
||||
"""Raised when DuckDuckGo presents a challenge that needs to be solved."""
|
||||
|
||||
class Conversation(JsonConversation):
|
||||
vqd: str = None
|
||||
vqd_hash_1: str = None
|
||||
message_history: Messages = []
|
||||
cookies: dict = {}
|
||||
fe_version: str = None
|
||||
|
||||
def __init__(self, model: str):
|
||||
self.model = model
|
||||
self.message_history = []
|
||||
|
||||
|
||||
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "DuckDuckGo AI Chat"
|
||||
url = "https://duckduckgo.com/aichat"
|
||||
url = "https://duckduckgo.com"
|
||||
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
|
||||
status_url = "https://duckduckgo.com/duckchat/v1/status"
|
||||
|
||||
working = True
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = "gpt-4o-mini"
|
||||
|
||||
# Model mapping from user-friendly names to API model names
|
||||
_chat_models = {
|
||||
model_aliases = {
|
||||
"gpt-4": default_model,
|
||||
"gpt-4o-mini": default_model,
|
||||
"gpt-4o": default_model,
|
||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||
"claude-3-haiku": "claude-3-haiku-20240307",
|
||||
"o3-mini": "o3-mini",
|
||||
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
||||
"mistral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
||||
}
|
||||
|
||||
# Available models (user-friendly names)
|
||||
models = list(_chat_models.keys())
|
||||
|
||||
last_request_time = 0
|
||||
max_retries = 3
|
||||
base_delay = 2
|
||||
|
||||
# Class variable to store the x-fe-version across instances
|
||||
_chat_xfe = ""
|
||||
models = [default_model, "o3-mini"] + list(model_aliases.keys())
|
||||
|
||||
@staticmethod
|
||||
def sha256_base64(text: str) -> str:
|
||||
"""Return the base64 encoding of the SHA256 digest of the text."""
|
||||
sha256_hash = hashlib.sha256(text.encode("utf-8")).digest()
|
||||
return base64.b64encode(sha256_hash).decode()
|
||||
def generate_user_agent() -> str:
|
||||
return f"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.{random.randint(1000,9999)}.0 Safari/537.36"
|
||||
|
||||
@staticmethod
|
||||
def parse_dom_fingerprint(js_text: str) -> str:
|
||||
if not has_bs4:
|
||||
# Fallback if BeautifulSoup is not available
|
||||
return "1000"
|
||||
|
||||
try:
|
||||
html_snippet = js_text.split("e.innerHTML = '")[1].split("';")[0]
|
||||
offset_value = js_text.split("return String(")[1].split(" ")[0]
|
||||
soup = BeautifulSoup(html_snippet, "html.parser")
|
||||
corrected_inner_html = soup.body.decode_contents()
|
||||
inner_html_length = len(corrected_inner_html)
|
||||
fingerprint = int(offset_value) + inner_html_length
|
||||
return str(fingerprint)
|
||||
except Exception:
|
||||
# Return a fallback value if parsing fails
|
||||
return "1000"
|
||||
|
||||
@staticmethod
|
||||
def parse_server_hashes(js_text: str) -> list:
|
||||
try:
|
||||
return js_text.split('server_hashes: ["', maxsplit=1)[1].split('"]', maxsplit=1)[0].split('","')
|
||||
except Exception:
|
||||
# Return a fallback value if parsing fails
|
||||
return ["1", "2"]
|
||||
|
||||
@classmethod
|
||||
def build_x_vqd_hash_1(cls, vqd_hash_1: str, headers: dict) -> str:
|
||||
"""Build the x-vqd-hash-1 header value."""
|
||||
try:
|
||||
# If we received a valid base64 string, try to decode it
|
||||
if vqd_hash_1 and len(vqd_hash_1) > 20:
|
||||
try:
|
||||
# Try to decode and parse as JSON first
|
||||
decoded_json = json.loads(base64.b64decode(vqd_hash_1).decode())
|
||||
# If it's already a complete structure with meta, return it as is
|
||||
if isinstance(decoded_json, dict) and "meta" in decoded_json:
|
||||
return vqd_hash_1
|
||||
|
||||
# Otherwise, extract what we can from it
|
||||
if isinstance(decoded_json, dict) and "server_hashes" in decoded_json:
|
||||
server_hashes = decoded_json.get("server_hashes", ["1", "2"])
|
||||
else:
|
||||
# Fall back to parsing from string
|
||||
decoded = base64.b64decode(vqd_hash_1).decode()
|
||||
server_hashes = cls.parse_server_hashes(decoded)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError):
|
||||
# If it's not valid JSON, try to parse it as a string
|
||||
decoded = base64.b64decode(vqd_hash_1).decode()
|
||||
server_hashes = cls.parse_server_hashes(decoded)
|
||||
else:
|
||||
# Default server hashes if we can't extract them
|
||||
server_hashes = ["1", "2"]
|
||||
|
||||
# Generate fingerprints
|
||||
dom_fingerprint = "1000" # Default value
|
||||
ua_fingerprint = headers.get("User-Agent", "") + headers.get("sec-ch-ua", "")
|
||||
ua_hash = cls.sha256_base64(ua_fingerprint)
|
||||
dom_hash = cls.sha256_base64(dom_fingerprint)
|
||||
|
||||
# Create a challenge ID (random hex string)
|
||||
challenge_id = ''.join(random.choice('0123456789abcdef') for _ in range(40)) + 'h8jbt'
|
||||
|
||||
# Build the complete structure including meta
|
||||
final_result = {
|
||||
"server_hashes": server_hashes,
|
||||
"client_hashes": [ua_hash, dom_hash],
|
||||
"signals": {},
|
||||
"meta": {
|
||||
"v": "1",
|
||||
"challenge_id": challenge_id,
|
||||
"origin": "https://duckduckgo.com",
|
||||
"stack": "Error\nat ke (https://duckduckgo.com/dist/wpm.chat.js:1:29526)\nat async dispatchServiceInitialVQD (https://duckduckgo.com/dist/wpm.chat.js:1:45076)"
|
||||
}
|
||||
}
|
||||
|
||||
base64_final_result = base64.b64encode(json.dumps(final_result).encode()).decode()
|
||||
return base64_final_result
|
||||
except Exception as e:
|
||||
# If anything fails, return an empty string
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def validate_model(cls, model: str) -> str:
|
||||
"""Validates and returns the correct model name for the API"""
|
||||
if not model:
|
||||
return cls.default_model
|
||||
|
||||
# Check aliases first
|
||||
if model in cls.model_aliases:
|
||||
model = cls.model_aliases[model]
|
||||
|
||||
# Check if it's a valid model name
|
||||
if model not in cls.models:
|
||||
raise ModelNotSupportedError(f"Model {model} not supported. Available models: {cls.models}")
|
||||
|
||||
return model
|
||||
|
||||
@classmethod
|
||||
async def sleep(cls, multiplier=1.0):
|
||||
"""Implements rate limiting between requests"""
|
||||
now = time.time()
|
||||
if cls.last_request_time > 0:
|
||||
delay = max(0.0, 1.5 - (now - cls.last_request_time)) * multiplier
|
||||
if delay > 0:
|
||||
await asyncio.sleep(delay)
|
||||
cls.last_request_time = time.time()
|
||||
|
||||
@classmethod
|
||||
async def get_default_cookies(cls, session: ClientSession) -> dict:
|
||||
"""Obtains default cookies needed for API requests"""
|
||||
try:
|
||||
await cls.sleep()
|
||||
# Make initial request to get cookies
|
||||
async with session.get(cls.url) as response:
|
||||
# Set the required cookies
|
||||
cookies = {}
|
||||
cookies_dict = {'dcs': '1', 'dcm': '3'}
|
||||
|
||||
# Add any cookies from the response
|
||||
for cookie in response.cookies.values():
|
||||
cookies[cookie.key] = cookie.value
|
||||
|
||||
# Ensure our required cookies are set
|
||||
for name, value in cookies_dict.items():
|
||||
cookies[name] = value
|
||||
url_obj = URL(cls.url)
|
||||
session.cookie_jar.update_cookies({name: value}, url_obj)
|
||||
|
||||
# Make a second request to the status endpoint to get any additional cookies
|
||||
headers = {
|
||||
"accept": "text/event-stream",
|
||||
"accept-language": "en",
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
|
||||
"origin": "https://duckduckgo.com",
|
||||
"referer": "https://duckduckgo.com/",
|
||||
}
|
||||
|
||||
await cls.sleep()
|
||||
async with session.get(cls.status_url, headers=headers) as status_response:
|
||||
# Add any cookies from the status response
|
||||
for cookie in status_response.cookies.values():
|
||||
cookies[cookie.key] = cookie.value
|
||||
url_obj = URL(cls.url)
|
||||
session.cookie_jar.update_cookies({cookie.key: cookie.value}, url_obj)
|
||||
|
||||
return cookies
|
||||
except Exception as e:
|
||||
# Return at least the required cookies on error
|
||||
cookies = {'dcs': '1', 'dcm': '3'}
|
||||
url_obj = URL(cls.url)
|
||||
for name, value in cookies.items():
|
||||
session.cookie_jar.update_cookies({name: value}, url_obj)
|
||||
return cookies
|
||||
|
||||
@classmethod
|
||||
async def fetch_fe_version(cls, session: ClientSession) -> str:
|
||||
"""Fetches the fe-version from the initial page load."""
|
||||
if cls._chat_xfe:
|
||||
return cls._chat_xfe
|
||||
|
||||
try:
|
||||
url = "https://duckduckgo.com/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1"
|
||||
await cls.sleep()
|
||||
async with session.get(url) as response:
|
||||
await raise_for_status(response)
|
||||
content = await response.text()
|
||||
|
||||
# Extract x-fe-version components
|
||||
try:
|
||||
# Try to extract the version components
|
||||
xfe1 = content.split('__DDG_BE_VERSION__="', 1)[1].split('"', 1)[0]
|
||||
xfe2 = content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
|
||||
|
||||
# Format it like "serp_YYYYMMDD_HHMMSS_ET-hash"
|
||||
from datetime import datetime
|
||||
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
cls._chat_xfe = f"serp_{current_date}_ET-{xfe2}"
|
||||
|
||||
return cls._chat_xfe
|
||||
except Exception:
|
||||
# Fallback to a default format if extraction fails
|
||||
from datetime import datetime
|
||||
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
|
||||
return cls._chat_xfe
|
||||
except Exception:
|
||||
# Fallback to a default format if request fails
|
||||
from datetime import datetime
|
||||
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
|
||||
return cls._chat_xfe
|
||||
|
||||
@classmethod
|
||||
async def fetch_vqd_and_hash(cls, session: ClientSession, retry_count: int = 0) -> tuple[str, str]:
|
||||
"""Fetches the required VQD token and hash for the chat session with retries."""
|
||||
headers = {
|
||||
"accept": "text/event-stream",
|
||||
"accept-language": "en",
|
||||
"cache-control": "no-cache",
|
||||
"pragma": "no-cache",
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
|
||||
"origin": "https://duckduckgo.com",
|
||||
"referer": "https://duckduckgo.com/",
|
||||
"x-vqd-accept": "1",
|
||||
def generate_fe_signals() -> str:
|
||||
current_time = int(time.time() * 1000)
|
||||
signals_data = {
|
||||
"start": current_time - 35000,
|
||||
"events": [
|
||||
{"name": "onboarding_impression_1", "delta": 383},
|
||||
{"name": "onboarding_impression_2", "delta": 6004},
|
||||
{"name": "onboarding_finish", "delta": 9690},
|
||||
{"name": "startNewChat", "delta": 10082},
|
||||
{"name": "initSwitchModel", "delta": 16586}
|
||||
],
|
||||
"end": 35163
|
||||
}
|
||||
return base64.b64encode(json.dumps(signals_data).encode()).decode()
|
||||
|
||||
# Make sure we have cookies first
|
||||
if len(session.cookie_jar) == 0:
|
||||
await cls.get_default_cookies(session)
|
||||
|
||||
@staticmethod
|
||||
def generate_fe_version(page_content: str = "") -> str:
|
||||
try:
|
||||
await cls.sleep(multiplier=1.0 + retry_count * 0.5)
|
||||
async with session.get(cls.status_url, headers=headers) as response:
|
||||
await raise_for_status(response)
|
||||
fe_hash = page_content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
|
||||
return f"serp_20250510_052906_ET-{fe_hash}"
|
||||
except Exception:
|
||||
return "serp_20250510_052906_ET-ed4f51dc2e106020bc4b"
|
||||
|
||||
vqd = response.headers.get("x-vqd-4", "")
|
||||
vqd_hash_1 = response.headers.get("x-vqd-hash-1", "")
|
||||
|
||||
if vqd:
|
||||
# Return the fetched vqd and vqd_hash_1
|
||||
return vqd, vqd_hash_1
|
||||
|
||||
response_text = await response.text()
|
||||
raise RuntimeError(f"Failed to fetch VQD token and hash: {response.status} {response_text}")
|
||||
|
||||
except Exception as e:
|
||||
if retry_count < cls.max_retries:
|
||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
||||
await asyncio.sleep(wait_time)
|
||||
return await cls.fetch_vqd_and_hash(session, retry_count + 1)
|
||||
else:
|
||||
raise RuntimeError(f"Failed to fetch VQD token and hash after {cls.max_retries} attempts: {str(e)}")
|
||||
@staticmethod
|
||||
def generate_x_vqd_hash_1(vqd: str, fe_version: str) -> str:
|
||||
# Placeholder logic; in reality DuckDuckGo uses dynamic JS challenge
|
||||
concat = f"{vqd}#{fe_version}"
|
||||
hash_digest = hashlib.sha256(concat.encode()).digest()
|
||||
b64 = base64.b64encode(hash_digest).decode()
|
||||
return base64.b64encode(json.dumps({
|
||||
"server_hashes": [],
|
||||
"client_hashes": [b64],
|
||||
"signals": {},
|
||||
"meta": {
|
||||
"v": "1",
|
||||
"challenge_id": hashlib.md5(concat.encode()).hexdigest(),
|
||||
"origin": "https://duckduckgo.com",
|
||||
"stack": "Generated in Python"
|
||||
}
|
||||
}).encode()).decode()
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
|
@ -318,229 +96,112 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 60,
|
||||
cookies: Cookies = None,
|
||||
conversation: Conversation = None,
|
||||
return_conversation: bool = True,
|
||||
retry_count: int = 0,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.validate_model(model)
|
||||
retry_count = 0
|
||||
model = cls.get_model(model)
|
||||
|
||||
while retry_count <= cls.max_retries:
|
||||
if conversation is None:
|
||||
conversation = Conversation(model)
|
||||
conversation.message_history = messages.copy()
|
||||
else:
|
||||
last_message = next((m for m in reversed(messages) if m["role"] == "user"), None)
|
||||
if last_message and last_message not in conversation.message_history:
|
||||
conversation.message_history.append(last_message)
|
||||
|
||||
base_headers = {
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"dnt": "1",
|
||||
"origin": "https://duckduckgo.com",
|
||||
"referer": "https://duckduckgo.com/",
|
||||
"sec-ch-ua": '"Chromium";v="135", "Not-A.Brand";v="8"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": cls.generate_user_agent(),
|
||||
}
|
||||
cookies = {'dcs': '1', 'dcm': '3'}
|
||||
|
||||
formatted_prompt = format_prompt(conversation.message_history) if len(conversation.message_history) > 1 else get_last_user_message(messages)
|
||||
data = {"model": model, "messages": [{"role": "user", "content": formatted_prompt}], "canUseTools": False}
|
||||
|
||||
async with ClientSession(cookies=cookies) as session:
|
||||
try:
|
||||
session_timeout = ClientTimeout(total=timeout)
|
||||
async with ClientSession(timeout=session_timeout, cookies=cookies) as session:
|
||||
# Step 1: Ensure we have the fe_version
|
||||
if not cls._chat_xfe:
|
||||
cls._chat_xfe = await cls.fetch_fe_version(session)
|
||||
# Step 1: Initial page load
|
||||
async with session.get(f"{cls.url}/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1",
|
||||
headers={**base_headers, "accept": "text/html"}, proxy=proxy) as r:
|
||||
r.raise_for_status()
|
||||
page = await r.text()
|
||||
fe_version = cls.generate_fe_version(page)
|
||||
|
||||
# Step 2: Initialize or update conversation
|
||||
if conversation is None:
|
||||
# Get initial cookies if not provided
|
||||
if not cookies:
|
||||
await cls.get_default_cookies(session)
|
||||
# Step 2: Get VQD
|
||||
status_headers = {**base_headers, "accept": "*/*", "cache-control": "no-store", "x-vqd-accept": "1"}
|
||||
async with session.get(cls.status_url, headers=status_headers, proxy=proxy) as r:
|
||||
r.raise_for_status()
|
||||
vqd = r.headers.get("x-vqd-4", "") or f"4-{random.randint(10**29, 10**30 - 1)}"
|
||||
|
||||
# Create a new conversation
|
||||
conversation = Conversation(model)
|
||||
conversation.fe_version = cls._chat_xfe
|
||||
x_vqd_hash_1 = cls.generate_x_vqd_hash_1(vqd, fe_version)
|
||||
|
||||
# Step 3: Get VQD tokens
|
||||
vqd, vqd_hash_1 = await cls.fetch_vqd_and_hash(session)
|
||||
conversation.vqd = vqd
|
||||
conversation.vqd_hash_1 = vqd_hash_1
|
||||
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
|
||||
else:
|
||||
# Update existing conversation with new message
|
||||
last_message = get_last_user_message(messages.copy())
|
||||
conversation.message_history.append({"role": "user", "content": last_message})
|
||||
# Step 3: Actual chat request
|
||||
chat_headers = {
|
||||
**base_headers,
|
||||
"accept": "text/event-stream",
|
||||
"content-type": "application/json",
|
||||
"x-fe-signals": cls.generate_fe_signals(),
|
||||
"x-fe-version": fe_version,
|
||||
"x-vqd-4": vqd,
|
||||
"x-vqd-hash-1": x_vqd_hash_1,
|
||||
}
|
||||
|
||||
# Step 4: Prepare headers with proper x-vqd-hash-1
|
||||
headers = {
|
||||
"accept": "text/event-stream",
|
||||
"accept-language": "en",
|
||||
"cache-control": "no-cache",
|
||||
"content-type": "application/json",
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
|
||||
"origin": "https://duckduckgo.com",
|
||||
"referer": "https://duckduckgo.com/",
|
||||
"pragma": "no-cache",
|
||||
"priority": "u=1, i",
|
||||
"sec-ch-ua": '"Not:A-Brand";v="24", "Chromium";v="134"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"x-fe-version": conversation.fe_version or cls._chat_xfe,
|
||||
"x-vqd-4": conversation.vqd,
|
||||
}
|
||||
async with session.post(cls.api_endpoint, json=data, headers=chat_headers, proxy=proxy) as response:
|
||||
if response.status != 200:
|
||||
error_text = await response.text()
|
||||
if "ERR_BN_LIMIT" in error_text:
|
||||
yield "Blocked by DuckDuckGo: Bot limit exceeded (ERR_BN_LIMIT)."
|
||||
return
|
||||
if "ERR_INVALID_VQD" in error_text and retry_count < 3:
|
||||
await asyncio.sleep(random.uniform(2.5, 5.5))
|
||||
async for chunk in cls.create_async_generator(
|
||||
model, messages, proxy, conversation, return_conversation, retry_count + 1, **kwargs
|
||||
):
|
||||
yield chunk
|
||||
return
|
||||
yield f"Error: HTTP {response.status} - {error_text}"
|
||||
return
|
||||
|
||||
# For the first request, send an empty x-vqd-hash-1 header
|
||||
# This matches the behavior in the duckduckgo_search module
|
||||
headers["x-vqd-hash-1"] = ""
|
||||
|
||||
# Step 5: Prepare the request data
|
||||
# Convert the user-friendly model name to the API model name
|
||||
api_model = cls._chat_models.get(model, model)
|
||||
|
||||
data = {
|
||||
"model": api_model,
|
||||
"messages": conversation.message_history,
|
||||
}
|
||||
|
||||
# Step 6: Send the request
|
||||
await cls.sleep(multiplier=1.0 + retry_count * 0.5)
|
||||
async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
|
||||
# Handle 429 and 418 errors specifically
|
||||
if response.status == 429:
|
||||
response_text = await response.text()
|
||||
|
||||
if retry_count < cls.max_retries:
|
||||
retry_count += 1
|
||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Get fresh tokens and cookies
|
||||
cookies = await cls.get_default_cookies(session)
|
||||
continue
|
||||
else:
|
||||
raise RateLimitError(f"Rate limited after {cls.max_retries} retries")
|
||||
elif response.status == 418:
|
||||
# Check if it's a challenge error
|
||||
full_message = ""
|
||||
async for line in response.content:
|
||||
line_text = line.decode("utf-8").strip()
|
||||
if line_text.startswith("data:"):
|
||||
payload = line_text[5:].strip()
|
||||
if payload == "[DONE]":
|
||||
if full_message:
|
||||
conversation.message_history.append({"role": "assistant", "content": full_message})
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
yield FinishReason("stop")
|
||||
break
|
||||
try:
|
||||
response_text = await response.text()
|
||||
try:
|
||||
response_json = json.loads(response_text)
|
||||
|
||||
# Extract challenge data if available
|
||||
challenge_data = None
|
||||
if response_json.get("type") == "ERR_CHALLENGE" and "cd" in response_json:
|
||||
challenge_data = response_json["cd"]
|
||||
|
||||
if retry_count < cls.max_retries:
|
||||
retry_count += 1
|
||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Reset tokens and try again with fresh session
|
||||
conversation = None
|
||||
cls._chat_xfe = ""
|
||||
|
||||
# Get fresh cookies
|
||||
cookies = await cls.get_default_cookies(session)
|
||||
|
||||
# If we have challenge data, try to use it
|
||||
if challenge_data and isinstance(challenge_data, dict):
|
||||
# Extract any useful information from challenge data
|
||||
# This could be used to build a better response in the future
|
||||
pass
|
||||
|
||||
continue
|
||||
else:
|
||||
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
|
||||
except json.JSONDecodeError:
|
||||
# If we can't parse the JSON, assume it's a challenge error anyway
|
||||
if retry_count < cls.max_retries:
|
||||
retry_count += 1
|
||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Reset tokens and try again with fresh session
|
||||
conversation = None
|
||||
cls._chat_xfe = ""
|
||||
cookies = await cls.get_default_cookies(session)
|
||||
continue
|
||||
else:
|
||||
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
|
||||
except Exception as e:
|
||||
# If any other error occurs during handling, still try to recover
|
||||
if retry_count < cls.max_retries:
|
||||
retry_count += 1
|
||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Reset tokens and try again with fresh session
|
||||
conversation = None
|
||||
cls._chat_xfe = ""
|
||||
cookies = await cls.get_default_cookies(session)
|
||||
continue
|
||||
else:
|
||||
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries: {str(e)}")
|
||||
|
||||
# For other status codes, use the standard error handler
|
||||
await raise_for_status(response)
|
||||
reason = None
|
||||
full_message = ""
|
||||
|
||||
# Step 7: Process the streaming response
|
||||
async for line in response.content:
|
||||
line = line.decode("utf-8").strip()
|
||||
|
||||
if line.startswith("data:"):
|
||||
try:
|
||||
message = json.loads(line[5:].strip())
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
if "action" in message and message["action"] == "error":
|
||||
error_type = message.get("type", "")
|
||||
if message.get("status") == 429:
|
||||
if error_type == "ERR_CONVERSATION_LIMIT":
|
||||
raise ConversationLimitError(error_type)
|
||||
raise RateLimitError(error_type)
|
||||
elif message.get("status") == 418 and error_type == "ERR_CHALLENGE":
|
||||
# Handle challenge error by refreshing tokens and retrying
|
||||
if retry_count < cls.max_retries:
|
||||
# Don't raise here, let the outer exception handler retry
|
||||
raise DuckDuckGoChallengeError(f"Challenge detected: {error_type}")
|
||||
raise DuckDuckGoSearchException(error_type)
|
||||
|
||||
if "message" in message:
|
||||
if message["message"]:
|
||||
yield message["message"]
|
||||
full_message += message["message"]
|
||||
reason = "length"
|
||||
else:
|
||||
reason = "stop"
|
||||
|
||||
# Step 8: Update conversation with response information
|
||||
# Always update the VQD tokens from the response headers
|
||||
conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd)
|
||||
conversation.vqd_hash_1 = response.headers.get("x-vqd-hash-1", conversation.vqd_hash_1)
|
||||
|
||||
# Update cookies
|
||||
conversation.cookies = {
|
||||
n: c.value
|
||||
for n, c in session.cookie_jar.filter_cookies(URL(cls.url)).items()
|
||||
}
|
||||
|
||||
# If requested, return the updated conversation
|
||||
if return_conversation:
|
||||
conversation.message_history.append({"role": "assistant", "content": full_message})
|
||||
yield conversation
|
||||
|
||||
if reason is not None:
|
||||
yield FinishReason(reason)
|
||||
|
||||
# If we got here, the request was successful
|
||||
break
|
||||
|
||||
except (RateLimitError, ResponseStatusError, DuckDuckGoChallengeError) as e:
|
||||
if ("429" in str(e) or isinstance(e, DuckDuckGoChallengeError)) and retry_count < cls.max_retries:
|
||||
retry_count += 1
|
||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# For challenge errors, refresh tokens and cookies
|
||||
if isinstance(e, DuckDuckGoChallengeError):
|
||||
# Reset conversation to force new token acquisition
|
||||
conversation = None
|
||||
# Clear class cache to force refresh
|
||||
cls._chat_xfe = ""
|
||||
else:
|
||||
raise
|
||||
except asyncio.TimeoutError as e:
|
||||
raise TimeoutError(f"Request timed out: {str(e)}")
|
||||
msg = json.loads(payload)
|
||||
if msg.get("action") == "error":
|
||||
yield f"Error: {msg.get('type', 'unknown')}"
|
||||
break
|
||||
if "message" in msg:
|
||||
content = msg["message"]
|
||||
yield content
|
||||
full_message += content
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception as e:
|
||||
raise
|
||||
if retry_count < 3:
|
||||
await asyncio.sleep(random.uniform(2.5, 5.5))
|
||||
async for chunk in cls.create_async_generator(
|
||||
model, messages, proxy, conversation, return_conversation, retry_count + 1, **kwargs
|
||||
):
|
||||
yield chunk
|
||||
else:
|
||||
yield f"Error: {str(e)}"
|
||||
|
|
|
|||
|
|
@ -8,12 +8,24 @@ class DeepInfraChat(OpenaiTemplate):
|
|||
working = True
|
||||
|
||||
default_model = 'deepseek-ai/DeepSeek-V3'
|
||||
default_vision_model = 'openbmb/MiniCPM-Llama3-V-2_5'
|
||||
default_vision_model = 'microsoft/Phi-4-multimodal-instruct'
|
||||
vision_models = [default_vision_model, 'meta-llama/Llama-3.2-90B-Vision-Instruct']
|
||||
models = [
|
||||
'deepseek-ai/DeepSeek-Prover-V2-671B',
|
||||
'Qwen/Qwen3-235B-A22B',
|
||||
'Qwen/Qwen3-30B-A3B',
|
||||
'Qwen/Qwen3-32B',
|
||||
'Qwen/Qwen3-14B',
|
||||
'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
|
||||
'meta-llama/Llama-4-Scout-17B-16E-Instruct',
|
||||
'microsoft/phi-4-reasoning-plus',
|
||||
'microsoft/meta-llama/Llama-Guard-4-12B',
|
||||
'Qwen/QwQ-32B',
|
||||
'deepseek-ai/DeepSeek-V3-0324',
|
||||
'google/gemma-3-27b-it',
|
||||
'google/gemma-3-12b-it',
|
||||
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
||||
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
||||
'meta-llama/Llama-3.3-70B-Instruct',
|
||||
default_model,
|
||||
'mistralai/Mistral-Small-24B-Instruct-2501',
|
||||
'deepseek-ai/DeepSeek-R1',
|
||||
|
|
@ -23,37 +35,48 @@ class DeepInfraChat(OpenaiTemplate):
|
|||
'microsoft/phi-4',
|
||||
'microsoft/WizardLM-2-8x22B',
|
||||
'Qwen/Qwen2.5-72B-Instruct',
|
||||
'01-ai/Yi-34B-Chat',
|
||||
'Qwen/Qwen2-72B-Instruct',
|
||||
'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
|
||||
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
|
||||
'databricks/dbrx-instruct',
|
||||
'deepinfra/airoboros-70b',
|
||||
'lizpreciatior/lzlv_70b_fp16_hf',
|
||||
'microsoft/WizardLM-2-7B',
|
||||
'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
||||
] + vision_models
|
||||
model_aliases = {
|
||||
"deepseek-prover-v2-671b": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
||||
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
|
||||
"qwen-3-30b": "Qwen/Qwen3-30B-A3B",
|
||||
"qwen-3-32b": "Qwen/Qwen3-32B",
|
||||
"qwen-3-14b": "Qwen/Qwen3-14B",
|
||||
"llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||
"llama-4-maverick-17b": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||
"llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||
"llama-4-scout-17b": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||
"phi-4-reasoning-plus": "microsoft/phi-4-reasoning-plus",
|
||||
#"": "meta-llama/Llama-Guard-4-12B",
|
||||
"qwq-32b": "Qwen/QwQ-32B",
|
||||
"deepseek-v3": "deepseek-ai/DeepSeek-V3-0324",
|
||||
"deepseek-v3-0324": "deepseek-ai/DeepSeek-V3-0324",
|
||||
"gemma-3-27b": "google/gemma-3-27b-it",
|
||||
"gemma-3-12b": "google/gemma-3-12b-it",
|
||||
"phi-4-multimodal": "microsoft/Phi-4-multimodal-instruct",
|
||||
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
|
||||
"deepseek-v3": default_model,
|
||||
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
||||
"deepseek-r1": "deepseek-ai/DeepSeek-R1-Turbo",
|
||||
"deepseek-r1-turbo": "deepseek-ai/DeepSeek-R1-Turbo",
|
||||
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
|
||||
"deepseek-r1-distill-llama": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
||||
"deepseek-r1-distill-qwen": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||
"deepseek-r1-distill-llama-70b": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
||||
"deepseek-r1-distill-qwen-32b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||
"phi-4": "microsoft/phi-4",
|
||||
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
|
||||
"yi-34b": "01-ai/Yi-34B-Chat",
|
||||
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
|
||||
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
||||
"dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
|
||||
"dbrx-instruct": "databricks/dbrx-instruct",
|
||||
"airoboros-70b": "deepinfra/airoboros-70b",
|
||||
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
|
||||
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
|
||||
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
|
||||
"minicpm-2.5": "openbmb/MiniCPM-Llama3-V-2_5",
|
||||
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from .helper import format_prompt
|
|||
class Dynaspark(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://dynaspark.onrender.com"
|
||||
login_url = None
|
||||
api_endpoint = "https://dynaspark.onrender.com/generate_response"
|
||||
api_endpoint = "https://dynaspark.onrender.com/dsai_fuck_u_spammer"
|
||||
|
||||
working = True
|
||||
needs_auth = False
|
||||
|
|
|
|||
|
|
@ -1,47 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from .helper import format_prompt, get_system_prompt
|
||||
|
||||
class Goabror(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://goabror.uz"
|
||||
api_endpoint = "https://goabror.uz/api/gpt.php"
|
||||
working = True
|
||||
|
||||
default_model = 'gpt-4'
|
||||
models = [default_model]
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
params = {
|
||||
"user": format_prompt(messages, include_system=False),
|
||||
"system": get_system_prompt(messages),
|
||||
}
|
||||
async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
text_response = await response.text()
|
||||
try:
|
||||
json_response = json.loads(text_response)
|
||||
if "data" in json_response:
|
||||
yield json_response["data"]
|
||||
else:
|
||||
yield text_response
|
||||
except json.JSONDecodeError:
|
||||
yield text_response
|
||||
|
|
@ -1,77 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .template import OpenaiTemplate
|
||||
|
||||
class Jmuz(OpenaiTemplate):
|
||||
url = "https://discord.gg/Ew6JzjA2NR"
|
||||
api_base = "https://jmuz.me/gpt/api/v2"
|
||||
api_key = "prod"
|
||||
working = True
|
||||
supports_system_message = False
|
||||
|
||||
default_model = "gpt-4o"
|
||||
model_aliases = {
|
||||
"qwq-32b": "qwq-32b-preview",
|
||||
"gemini-1.5-flash": "gemini-flash",
|
||||
"gemini-1.5-pro": "gemini-pro",
|
||||
"gemini-2.0-flash-thinking": "gemini-thinking",
|
||||
"deepseek-chat": "deepseek-v3",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_models(cls, **kwargs):
|
||||
if not cls.models:
|
||||
cls.models = super().get_models(api_key=cls.api_key, api_base=cls.api_base)
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = True,
|
||||
api_key: str = None, # Remove api_key from kwargs
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {cls.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
"accept": "*/*",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
started = False
|
||||
buffer = ""
|
||||
async for chunk in super().create_async_generator(
|
||||
model=model,
|
||||
messages=messages,
|
||||
api_base=cls.api_base,
|
||||
api_key=cls.api_key,
|
||||
stream=cls.supports_stream,
|
||||
headers=headers,
|
||||
**kwargs
|
||||
):
|
||||
if isinstance(chunk, str):
|
||||
buffer += chunk
|
||||
if "Join for free".startswith(buffer) or buffer.startswith("Join for free"):
|
||||
if buffer.endswith("\n"):
|
||||
buffer = ""
|
||||
continue
|
||||
if "https://discord.gg/".startswith(buffer) or "https://discord.gg/" in buffer:
|
||||
if "..." in buffer:
|
||||
buffer = ""
|
||||
continue
|
||||
if "o1-preview".startswith(buffer) or buffer.startswith("o1-preview"):
|
||||
if "\n" in buffer:
|
||||
buffer = ""
|
||||
continue
|
||||
if not started:
|
||||
buffer = buffer.lstrip()
|
||||
if buffer:
|
||||
started = True
|
||||
yield buffer
|
||||
buffer = ""
|
||||
else:
|
||||
yield chunk
|
||||
|
|
@ -1,28 +1,190 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from .hf.HuggingChat import HuggingChat
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
from aiohttp import ClientSession, FormData
|
||||
|
||||
class LambdaChat(HuggingChat):
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests import raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt, get_last_user_message
|
||||
from ..providers.response import JsonConversation, TitleGeneration, Reasoning, FinishReason
|
||||
|
||||
class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Lambda Chat"
|
||||
domain = "lambda.chat"
|
||||
url = f"https://{domain}"
|
||||
url = "https://lambda.chat"
|
||||
conversation_url = f"{url}/conversation"
|
||||
|
||||
working = True
|
||||
use_nodriver = False
|
||||
needs_auth = False
|
||||
|
||||
default_model = "deepseek-llama3.3-70b"
|
||||
reasoning_model = "deepseek-r1"
|
||||
image_models = []
|
||||
fallback_models = [
|
||||
models = [
|
||||
default_model,
|
||||
reasoning_model,
|
||||
"hermes-3-llama-3.1-405b-fp8",
|
||||
"hermes3-405b-fp8-128k",
|
||||
"llama3.1-nemotron-70b-instruct",
|
||||
"lfm-40b",
|
||||
"llama3.3-70b-instruct-fp8"
|
||||
"llama3.3-70b-instruct-fp8",
|
||||
"qwen25-coder-32b-instruct"
|
||||
]
|
||||
model_aliases = {
|
||||
"deepseek-v3": default_model,
|
||||
"hermes-3": "hermes-3-llama-3.1-405b-fp8",
|
||||
"hermes-3-405b": "hermes3-405b-fp8-128k",
|
||||
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
|
||||
"llama-3.3-70b": "llama3.3-70b-instruct-fp8"
|
||||
"qwen-2.5-coder-32b": "qwen25-coder-32b-instruct"
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls, model: str, messages: Messages,
|
||||
api_key: str = None,
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
headers = {
|
||||
"Origin": cls.url,
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Referer": cls.url,
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Priority": "u=1, i",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache"
|
||||
}
|
||||
|
||||
# Initialize cookies if not provided
|
||||
if cookies is None:
|
||||
cookies = {
|
||||
"hf-chat": str(uuid.uuid4()) # Generate a session ID
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers, cookies=cookies) as session:
|
||||
# Step 1: Create a new conversation
|
||||
data = {"model": model}
|
||||
async with session.post(cls.conversation_url, json=data, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
conversation_response = await response.json()
|
||||
conversation_id = conversation_response["conversationId"]
|
||||
|
||||
# Update cookies with any new ones from the response
|
||||
for cookie_name, cookie in response.cookies.items():
|
||||
cookies[cookie_name] = cookie.value
|
||||
|
||||
# Step 2: Get data for this conversation to extract message ID
|
||||
async with session.get(
|
||||
f"{cls.conversation_url}/{conversation_id}/__data.json?x-sveltekit-invalidated=11",
|
||||
proxy=proxy
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
response_text = await response.text()
|
||||
|
||||
# Update cookies again
|
||||
for cookie_name, cookie in response.cookies.items():
|
||||
cookies[cookie_name] = cookie.value
|
||||
|
||||
# Parse the JSON response to find the message ID
|
||||
message_id = None
|
||||
try:
|
||||
# Try to parse each line as JSON
|
||||
for line in response_text.splitlines():
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
try:
|
||||
data_json = json.loads(line)
|
||||
if "type" in data_json and data_json["type"] == "data" and "nodes" in data_json:
|
||||
for node in data_json["nodes"]:
|
||||
if "type" in node and node["type"] == "data" and "data" in node:
|
||||
# Look for system message ID
|
||||
for item in node["data"]:
|
||||
if isinstance(item, dict) and "id" in item and "from" in item and item.get("from") == "system":
|
||||
message_id = item["id"]
|
||||
break
|
||||
|
||||
# If we found the ID, break out of the loop
|
||||
if message_id:
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# If we still don't have a message ID, try to find any UUID in the response
|
||||
if not message_id:
|
||||
uuid_pattern = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||
uuids = re.findall(uuid_pattern, response_text)
|
||||
if uuids:
|
||||
message_id = uuids[0]
|
||||
|
||||
if not message_id:
|
||||
raise ValueError("Could not find message ID in response")
|
||||
|
||||
except (IndexError, KeyError, ValueError) as e:
|
||||
raise RuntimeError(f"Failed to parse conversation data: {str(e)}")
|
||||
|
||||
# Step 3: Send the user message
|
||||
user_message = get_last_user_message(messages)
|
||||
|
||||
# Prepare form data exactly as in the curl example
|
||||
form_data = FormData()
|
||||
form_data.add_field(
|
||||
"data",
|
||||
json.dumps({
|
||||
"inputs": user_message,
|
||||
"id": message_id,
|
||||
"is_retry": False,
|
||||
"is_continue": False,
|
||||
"web_search": False,
|
||||
"tools": []
|
||||
}),
|
||||
content_type="application/json"
|
||||
)
|
||||
|
||||
async with session.post(
|
||||
f"{cls.conversation_url}/{conversation_id}",
|
||||
data=form_data,
|
||||
proxy=proxy
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
async for chunk in response.content:
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
chunk_str = chunk.decode('utf-8', errors='ignore')
|
||||
|
||||
try:
|
||||
data = json.loads(chunk_str)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# Handling different types of responses
|
||||
if data.get("type") == "stream" and "token" in data:
|
||||
# Remove null characters from the token
|
||||
token = data["token"].replace("\u0000", "")
|
||||
if token:
|
||||
yield token
|
||||
elif data.get("type") == "title":
|
||||
yield TitleGeneration(data.get("title", ""))
|
||||
elif data.get("type") == "reasoning":
|
||||
subtype = data.get("subtype")
|
||||
token = data.get("token", "").replace("\u0000", "")
|
||||
status = data.get("status", "")
|
||||
|
||||
if subtype == "stream" and token:
|
||||
yield Reasoning(token=token)
|
||||
elif subtype == "status" and status:
|
||||
yield Reasoning(status=status)
|
||||
elif data.get("type") == "finalAnswer":
|
||||
yield FinishReason("stop")
|
||||
break
|
||||
elif data.get("type") == "status" and data.get("status") == "keepAlive":
|
||||
# Just a keepalive, ignore
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -8,189 +8,271 @@ from ..typing import AsyncResult, Messages
|
|||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import get_connector
|
||||
from ..requests import raise_for_status
|
||||
from ..errors import RateLimitError
|
||||
|
||||
models = {
|
||||
"claude-3-5-sonnet-20241022": {
|
||||
"id": "claude-3-5-sonnet-20241022",
|
||||
"name": "Claude-3.5-Sonnet-V2",
|
||||
"model": "Claude",
|
||||
"name": "claude-3-5-sonnet-20241022",
|
||||
"model": "claude-3-5-sonnet-20241022",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 25.366666666666667,
|
||||
},
|
||||
"claude-3-5-sonnet-20241022-t": {
|
||||
"id": "claude-3-5-sonnet-20241022-t",
|
||||
"name": "Claude-3.5-Sonnet-V2-T",
|
||||
"model": "Claude",
|
||||
"name": "claude-3-5-sonnet-20241022-t",
|
||||
"model": "claude-3-5-sonnet-20241022-t",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 39.820754716981135,
|
||||
},
|
||||
"claude-3-7-sonnet-20250219": {
|
||||
"id": "claude-3-7-sonnet-20250219",
|
||||
"name": "Claude-3.7-Sonnet",
|
||||
"model": "Claude",
|
||||
"name": "claude-3-7-sonnet-20250219",
|
||||
"model": "claude-3-7-sonnet-20250219",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 47.02970297029703,
|
||||
},
|
||||
"claude-3-7-sonnet-20250219-t": {
|
||||
"id": "claude-3-7-sonnet-20250219-t",
|
||||
"name": "Claude-3.7-Sonnet-T",
|
||||
"model": "Claude",
|
||||
"name": "claude-3-7-sonnet-20250219-t",
|
||||
"model": "claude-3-7-sonnet-20250219-t",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-7-sonnet-20250219-thinking": {
|
||||
"id": "claude-3-7-sonnet-20250219-thinking",
|
||||
"name": "Claude-3.7-Sonnet-Thinking",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-opus-20240229": {
|
||||
"id": "claude-3-opus-20240229",
|
||||
"name": "Claude-3-Opus",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-sonnet-20240229": {
|
||||
"id": "claude-3-sonnet-20240229",
|
||||
"name": "Claude-3-Sonnet",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"deepseek-r1": {
|
||||
"id": "deepseek-r1",
|
||||
"name": "DeepSeek-R1",
|
||||
"model": "DeepSeek-R1",
|
||||
"provider": "DeepSeek",
|
||||
"maxLength": 400000,
|
||||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
},
|
||||
"deepseek-r1-distill-llama-70b": {
|
||||
"id": "deepseek-r1-distill-llama-70b",
|
||||
"name": "DeepSeek-R1-70B",
|
||||
"model": "DeepSeek-R1-70B",
|
||||
"provider": "DeepSeek",
|
||||
"maxLength": 400000,
|
||||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 39.04289693593315,
|
||||
},
|
||||
"deepseek-v3": {
|
||||
"id": "deepseek-v3",
|
||||
"name": "DeepSeek-V3",
|
||||
"model": "DeepSeek-V3",
|
||||
"name": "deepseek-v3",
|
||||
"model": "deepseek-v3",
|
||||
"provider": "DeepSeek",
|
||||
"maxLength": 400000,
|
||||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 40.484657419083646,
|
||||
},
|
||||
"gemini-1.0-pro-latest-123": {
|
||||
"id": "gemini-1.0-pro-latest-123",
|
||||
"name": "gemini-1.0-pro-latest-123",
|
||||
"model": "gemini-1.0-pro-latest-123",
|
||||
"provider": "Google",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 10,
|
||||
},
|
||||
"gemini-2.0-flash": {
|
||||
"id": "gemini-2.0-flash",
|
||||
"name": "Gemini-2.0-Flash",
|
||||
"model": "Gemini",
|
||||
"name": "gemini-2.0-flash",
|
||||
"model": "gemini-2.0-flash",
|
||||
"provider": "Google",
|
||||
"maxLength": 4000000,
|
||||
"tokenLimit": 1000000,
|
||||
"context": "1024K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 216.44162436548223,
|
||||
},
|
||||
"gemini-2.0-flash-exp": {
|
||||
"id": "gemini-2.0-flash-exp",
|
||||
"name": "gemini-2.0-flash-exp",
|
||||
"model": "gemini-2.0-flash-exp",
|
||||
"provider": "Google",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 0,
|
||||
"tps": 0,
|
||||
},
|
||||
"gemini-2.0-flash-thinking-exp": {
|
||||
"id": "gemini-2.0-flash-thinking-exp",
|
||||
"name": "Gemini-2.0-Flash-Thinking-Exp",
|
||||
"model": "Gemini",
|
||||
"name": "gemini-2.0-flash-thinking-exp",
|
||||
"model": "gemini-2.0-flash-thinking-exp",
|
||||
"provider": "Google",
|
||||
"maxLength": 4000000,
|
||||
"tokenLimit": 1000000,
|
||||
"context": "1024K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 0,
|
||||
"tps": 0,
|
||||
},
|
||||
"gemini-2.0-pro-exp": {
|
||||
"id": "gemini-2.0-pro-exp",
|
||||
"name": "Gemini-2.0-Pro-Exp",
|
||||
"model": "Gemini",
|
||||
"gemini-2.5-flash-preview-04-17": {
|
||||
"id": "gemini-2.5-flash-preview-04-17",
|
||||
"name": "gemini-2.5-flash-preview-04-17",
|
||||
"model": "gemini-2.5-flash-preview-04-17",
|
||||
"provider": "Google",
|
||||
"maxLength": 4000000,
|
||||
"tokenLimit": 1000000,
|
||||
"context": "1024K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 189.84010840108402,
|
||||
},
|
||||
"gpt-4o-2024-08-06": {
|
||||
"id": "gpt-4o-2024-08-06",
|
||||
"name": "GPT-4o",
|
||||
"model": "ChatGPT",
|
||||
"gemini-2.5-pro-official": {
|
||||
"id": "gemini-2.5-pro-official",
|
||||
"name": "gemini-2.5-pro-official",
|
||||
"model": "gemini-2.5-pro-official",
|
||||
"provider": "Google",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 91.00613496932516,
|
||||
},
|
||||
"gemini-2.5-pro-preview-03-25": {
|
||||
"id": "gemini-2.5-pro-preview-03-25",
|
||||
"name": "gemini-2.5-pro-preview-03-25",
|
||||
"model": "gemini-2.5-pro-preview-03-25",
|
||||
"provider": "Google",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 99.05660377358491,
|
||||
"tps": 45.050511247443765,
|
||||
},
|
||||
"gemini-2.5-pro-preview-05-06": {
|
||||
"id": "gemini-2.5-pro-preview-05-06",
|
||||
"name": "gemini-2.5-pro-preview-05-06",
|
||||
"model": "gemini-2.5-pro-preview-05-06",
|
||||
"provider": "Google",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 99.29617834394904,
|
||||
},
|
||||
"gpt-4-turbo-2024-04-09": {
|
||||
"id": "gpt-4-turbo-2024-04-09",
|
||||
"name": "gpt-4-turbo-2024-04-09",
|
||||
"model": "gpt-4-turbo-2024-04-09",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 1,
|
||||
},
|
||||
"gpt-4.1": {
|
||||
"id": "gpt-4.1",
|
||||
"name": "gpt-4.1",
|
||||
"model": "gpt-4.1",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 42.857142857142854,
|
||||
"tps": 19.58032786885246,
|
||||
},
|
||||
"gpt-4.1-mini": {
|
||||
"id": "gpt-4.1-mini",
|
||||
"name": "gpt-4.1-mini",
|
||||
"model": "gpt-4.1-mini",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 68.75,
|
||||
"tps": 12.677576601671309,
|
||||
},
|
||||
"gpt-4.1-mini-2025-04-14": {
|
||||
"id": "gpt-4.1-mini-2025-04-14",
|
||||
"name": "gpt-4.1-mini-2025-04-14",
|
||||
"model": "gpt-4.1-mini-2025-04-14",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 94.23076923076923,
|
||||
"tps": 8.297687861271676,
|
||||
},
|
||||
"gpt-4o-2024-11-20": {
|
||||
"id": "gpt-4o-2024-11-20",
|
||||
"name": "gpt-4o-2024-11-20",
|
||||
"model": "gpt-4o-2024-11-20",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 73.3955223880597,
|
||||
},
|
||||
"gpt-4o-mini-2024-07-18": {
|
||||
"id": "gpt-4o-mini-2024-07-18",
|
||||
"name": "GPT-4o-Mini",
|
||||
"model": "ChatGPT",
|
||||
"name": "gpt-4o-mini-2024-07-18",
|
||||
"model": "gpt-4o-mini-2024-07-18",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4o-mini-free": {
|
||||
"id": "gpt-4o-mini-free",
|
||||
"name": "GPT-4o-Mini-Free",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 31200,
|
||||
"tokenLimit": 7800,
|
||||
"context": "8K",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 26.874455100261553,
|
||||
},
|
||||
"grok-3": {
|
||||
"id": "grok-3",
|
||||
"name": "Grok-3",
|
||||
"model": "Grok",
|
||||
"provider": "x.ai",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
"name": "grok-3",
|
||||
"model": "grok-3",
|
||||
"provider": "xAI",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 51.110652663165794,
|
||||
},
|
||||
"grok-3-r1": {
|
||||
"id": "grok-3-r1",
|
||||
"name": "Grok-3-Thinking",
|
||||
"model": "Grok",
|
||||
"provider": "x.ai",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
"grok-3-reason": {
|
||||
"id": "grok-3-reason",
|
||||
"name": "grok-3-reason",
|
||||
"model": "grok-3-reason",
|
||||
"provider": "xAI",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 62.81976744186046,
|
||||
},
|
||||
"o3-mini": {
|
||||
"id": "o3-mini",
|
||||
"name": "o3-mini",
|
||||
"model": "o3",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 400000,
|
||||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
"o3-mini-2025-01-31": {
|
||||
"id": "o3-mini-2025-01-31",
|
||||
"name": "o3-mini-2025-01-31",
|
||||
"model": "o3-mini-2025-01-31",
|
||||
"provider": "Unknown",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 125.31410256410257,
|
||||
},
|
||||
"qwen3-235b-a22b": {
|
||||
"id": "qwen3-235b-a22b",
|
||||
"name": "qwen3-235b-a22b",
|
||||
"model": "qwen3-235b-a22b",
|
||||
"provider": "Alibaba",
|
||||
"maxLength": 0,
|
||||
"tokenLimit": 0,
|
||||
"context": 0,
|
||||
"success_rate": 100,
|
||||
"tps": 25.846153846153847,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://liaobots.site"
|
||||
url = "https://liaobots.work"
|
||||
working = True
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
|
||||
default_model = "gpt-4o-2024-08-06"
|
||||
default_model = "grok-3"
|
||||
models = list(models.keys())
|
||||
model_aliases = {
|
||||
# Anthropic
|
||||
|
|
@ -198,25 +280,33 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
|
||||
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
|
||||
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
|
||||
"claude-3.7-sonnet-thinking": "claude-3-7-sonnet-20250219-thinking",
|
||||
"claude-3-opus": "claude-3-opus-20240229",
|
||||
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
||||
|
||||
# DeepSeek
|
||||
"deepseek-r1": "deepseek-r1-distill-llama-70b",
|
||||
#"deepseek-v3": "deepseek-v3",
|
||||
|
||||
# Google
|
||||
"gemini-1.0-pro": "gemini-1.0-pro-latest-123",
|
||||
"gemini-2.0-flash": "gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-pro": "gemini-2.0-pro-exp",
|
||||
"gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
|
||||
"gemini-2.5-pro": "gemini-2.5-pro-official",
|
||||
"gemini-2.5-pro": "gemini-2.5-pro-preview-03-25",
|
||||
"gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
|
||||
|
||||
# OpenAI
|
||||
"gpt-4": default_model,
|
||||
"gpt-4o": default_model,
|
||||
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
|
||||
"gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4": "gpt-4o-2024-11-20",
|
||||
"gpt-4o": "gpt-4o-2024-11-20",
|
||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
||||
"gpt-4o-mini": "gpt-4o-mini-free",
|
||||
|
||||
# xAI
|
||||
"grok-3-reason": "grok-3-reason",
|
||||
"o3-mini": "o3-mini-2025-01-31",
|
||||
"qwen-3-235b": "qwen3-235b-a22b",
|
||||
}
|
||||
|
||||
_auth_code = ""
|
||||
_auth_code = None
|
||||
_cookie_jar = None
|
||||
|
||||
@classmethod
|
||||
|
|
@ -238,92 +328,213 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
"referer": "https://liaobots.work/",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"dnt": "1",
|
||||
"origin": "https://liaobots.work",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://liaobots.work/en",
|
||||
"sec-ch-ua": "\"Chromium\";v=\"135\", \"Not-A.Brand\";v=\"8\"",
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": "\"Linux\"",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
async with ClientSession(
|
||||
headers=headers,
|
||||
cookie_jar=cls._cookie_jar,
|
||||
connector=get_connector(connector, proxy, True)
|
||||
) as session:
|
||||
# First, get a valid auth code
|
||||
await cls.get_auth_code(session)
|
||||
|
||||
# Create conversation ID
|
||||
conversation_id = str(uuid.uuid4())
|
||||
|
||||
# Prepare request data
|
||||
data = {
|
||||
"conversationId": str(uuid.uuid4()),
|
||||
"model": models[model],
|
||||
"conversationId": conversation_id,
|
||||
"models": [{
|
||||
"modelId": model,
|
||||
"provider": models[model]["provider"]
|
||||
}],
|
||||
"search": "false",
|
||||
"messages": messages,
|
||||
"key": "",
|
||||
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
|
||||
"prompt": kwargs.get("system_message", "你是 {{model}},一个由 {{provider}} 训练的大型语言模型,请仔细遵循用户的指示。")
|
||||
}
|
||||
if not cls._auth_code:
|
||||
async with session.post(
|
||||
"https://liaobots.work/recaptcha/api/login",
|
||||
data={"token": "abcdefghijklmnopqrst"},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
# Try to make the chat request
|
||||
try:
|
||||
# Make the chat request with the current auth code
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/user",
|
||||
json={"authcode": cls._auth_code},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
||||
if not cls._auth_code:
|
||||
raise RuntimeError("Empty auth code")
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/chat",
|
||||
f"{cls.url}/api/chat",
|
||||
json=data,
|
||||
headers={"x-auth-code": cls._auth_code},
|
||||
verify_ssl=False
|
||||
ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
yield json.loads(line[6:]).get("content")
|
||||
except:
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/user",
|
||||
json={"authcode": "jGDRFOqHcZKAo"},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
||||
if not cls._auth_code:
|
||||
raise RuntimeError("Empty auth code")
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/chat",
|
||||
json=data,
|
||||
headers={"x-auth-code": cls._auth_code},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
yield json.loads(line[6:]).get("content")
|
||||
# Check if we got a streaming response
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if "text/event-stream" in content_type:
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
try:
|
||||
response_data = json.loads(line[6:])
|
||||
|
||||
# Check for error response
|
||||
if response_data.get("error") is True:
|
||||
# Raise RateLimitError for payment required or other errors
|
||||
if "402" in str(response_data.get("res_status", "")):
|
||||
raise RateLimitError("This model requires payment or credits")
|
||||
else:
|
||||
error_msg = response_data.get('message', 'Unknown error')
|
||||
raise RateLimitError(f"Error: {error_msg}")
|
||||
|
||||
# Process normal response
|
||||
if response_data.get("role") == "assistant" and "content" in response_data:
|
||||
content = response_data.get("content")
|
||||
yield content
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
else:
|
||||
# Not a streaming response, might be an error or HTML
|
||||
response_text = await response.text()
|
||||
|
||||
# If we got HTML, we need to bypass CAPTCHA
|
||||
if response_text.startswith("<!DOCTYPE html>"):
|
||||
await cls.bypass_captcha(session)
|
||||
|
||||
# Get a fresh auth code
|
||||
await cls.get_auth_code(session)
|
||||
|
||||
# Try the request again
|
||||
async with session.post(
|
||||
f"{cls.url}/api/chat",
|
||||
json=data,
|
||||
headers={"x-auth-code": cls._auth_code},
|
||||
ssl=False
|
||||
) as response2:
|
||||
# Check if we got a streaming response
|
||||
content_type = response2.headers.get("Content-Type", "")
|
||||
if "text/event-stream" in content_type:
|
||||
async for line in response2.content:
|
||||
if line.startswith(b"data: "):
|
||||
try:
|
||||
response_data = json.loads(line[6:])
|
||||
|
||||
# Check for error response
|
||||
if response_data.get("error") is True:
|
||||
# Raise RateLimitError for payment required or other errors
|
||||
if "402" in str(response_data.get("res_status", "")):
|
||||
raise RateLimitError("This model requires payment or credits")
|
||||
else:
|
||||
error_msg = response_data.get('message', 'Unknown error')
|
||||
raise RateLimitError(f"Error: {error_msg}")
|
||||
|
||||
# Process normal response
|
||||
if response_data.get("role") == "assistant" and "content" in response_data:
|
||||
content = response_data.get("content")
|
||||
yield content
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
else:
|
||||
raise RateLimitError("Failed to get streaming response")
|
||||
else:
|
||||
raise RateLimitError("Failed to connect to the service")
|
||||
except Exception as e:
|
||||
# If it's already a RateLimitError, re-raise it
|
||||
if isinstance(e, RateLimitError):
|
||||
raise
|
||||
# Otherwise, wrap it in a RateLimitError
|
||||
raise RateLimitError(f"Error processing request: {str(e)}")
|
||||
|
||||
@classmethod
|
||||
async def initialize_auth_code(cls, session: ClientSession) -> None:
|
||||
async def bypass_captcha(cls, session: ClientSession) -> None:
|
||||
"""
|
||||
Initialize the auth code by making the necessary login requests.
|
||||
Bypass the CAPTCHA verification by directly making the recaptcha API request.
|
||||
"""
|
||||
async with session.post(
|
||||
"https://liaobots.work/api/user",
|
||||
json={"authcode": "pTIQr4FTnVRfr"},
|
||||
verify_ssl=False
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
||||
if not cls._auth_code:
|
||||
raise RuntimeError("Empty auth code")
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
try:
|
||||
# First, try the direct recaptcha API request
|
||||
async with session.post(
|
||||
f"{cls.url}/recaptcha/api/login",
|
||||
json={"token": "abcdefghijklmnopqrst"},
|
||||
ssl=False
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
try:
|
||||
response_text = await response.text()
|
||||
|
||||
# Try to parse as JSON
|
||||
try:
|
||||
response_data = json.loads(response_text)
|
||||
|
||||
# Check if we got a successful response
|
||||
if response_data.get("code") == 200:
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
async def ensure_auth_code(cls, session: ClientSession) -> None:
|
||||
async def get_auth_code(cls, session: ClientSession) -> None:
|
||||
"""
|
||||
Ensure the auth code is initialized, and if not, perform the initialization.
|
||||
Get a valid auth code by sending a request with an empty authcode.
|
||||
"""
|
||||
if not cls._auth_code:
|
||||
await cls.initialize_auth_code(session)
|
||||
try:
|
||||
# Send request with empty authcode to get a new one
|
||||
auth_request_data = {
|
||||
"authcode": "",
|
||||
"recommendUrl": "https://liaobots.work/zh"
|
||||
}
|
||||
|
||||
async with session.post(
|
||||
f"{cls.url}/api/user",
|
||||
json=auth_request_data,
|
||||
ssl=False
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
response_text = await response.text()
|
||||
|
||||
try:
|
||||
response_data = json.loads(response_text)
|
||||
|
||||
if "authCode" in response_data:
|
||||
cls._auth_code = response_data["authCode"]
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
return
|
||||
except json.JSONDecodeError:
|
||||
# If we got HTML, it might be the CAPTCHA page
|
||||
if response_text.startswith("<!DOCTYPE html>"):
|
||||
await cls.bypass_captcha(session)
|
||||
|
||||
# Try again after bypassing CAPTCHA
|
||||
async with session.post(
|
||||
f"{cls.url}/api/user",
|
||||
json=auth_request_data,
|
||||
ssl=False
|
||||
) as response2:
|
||||
if response2.status == 200:
|
||||
response_text2 = await response2.text()
|
||||
|
||||
try:
|
||||
response_data2 = json.loads(response_text2)
|
||||
|
||||
if "authCode" in response_data2:
|
||||
cls._auth_code = response_data2["authCode"]
|
||||
cls._cookie_jar = session.cookie_jar
|
||||
return
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# If we're here, we couldn't get a valid auth code
|
||||
# Set a default one as a fallback
|
||||
cls._auth_code = "DvS3A5GTE9f0D" # Fallback to one of the provided auth codes
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from .template import OpenaiTemplate
|
||||
|
||||
class OIVSCode(OpenaiTemplate):
|
||||
label = "OI VSCode Server"
|
||||
url = "https://oi-vscode-server.onrender.com"
|
||||
api_base = "https://oi-vscode-server-2.onrender.com/v1"
|
||||
|
||||
working = True
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = "gpt-4o-mini-2024-07-18"
|
||||
default_vision_model = default_model
|
||||
vision_models = [default_model, "gpt-4o-mini"]
|
||||
models = vision_models + ["deepseek-ai/DeepSeek-V3"]
|
||||
|
||||
model_aliases = {
|
||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
||||
"deepseek-v3": "deepseek-ai/DeepSeek-V3"
|
||||
}
|
||||
|
|
@ -81,15 +81,31 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
await ws.send_str("3")
|
||||
continue
|
||||
try:
|
||||
if last_message == 0 and model == cls.default_model:
|
||||
yield "<think>"
|
||||
data = json.loads(message[2:])[1]
|
||||
yield data["output"][last_message:]
|
||||
last_message = len(data["output"])
|
||||
if data["final"]:
|
||||
if data["citations"]:
|
||||
yield Sources(data["citations"])
|
||||
yield FinishReason("stop")
|
||||
break
|
||||
if not message.startswith("42"):
|
||||
continue
|
||||
|
||||
parsed_data = json.loads(message[2:])
|
||||
message_type = parsed_data[0]
|
||||
data = parsed_data[1]
|
||||
|
||||
# Handle error responses
|
||||
if message_type.endswith("_query_progress") and data.get("status") == "failed":
|
||||
error_message = data.get("text", "Unknown API error")
|
||||
raise ResponseError(f"API Error: {error_message}")
|
||||
|
||||
# Handle normal responses
|
||||
if "output" in data:
|
||||
if last_message == 0 and model == cls.default_model:
|
||||
yield "<think>"
|
||||
yield data["output"][last_message:]
|
||||
last_message = len(data["output"])
|
||||
if data["final"]:
|
||||
if data["citations"]:
|
||||
yield Sources(data["citations"])
|
||||
yield FinishReason("stop")
|
||||
break
|
||||
except ResponseError as e:
|
||||
# Re-raise ResponseError directly
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise ResponseError(f"Message: {message}") from e
|
||||
raise ResponseError(f"Error processing message: {message}") from e
|
||||
|
|
|
|||
|
|
@ -51,38 +51,70 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
image_models = [default_image_model]
|
||||
audio_models = {default_audio_model: []}
|
||||
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
|
||||
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"]
|
||||
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "openai-reasoning", "searchgpt"]
|
||||
_models_loaded = False
|
||||
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
|
||||
model_aliases = {
|
||||
### Text Models ###
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-4.1-nano": "openai-fast",
|
||||
"gpt-4": "openai-large",
|
||||
"gpt-4o": "openai-large",
|
||||
"gpt-4.1": "openai",
|
||||
"gpt-4.1-nano": "openai",
|
||||
"gpt-4.1-mini": "openai-large",
|
||||
"gpt-4.1-xlarge": "openai-xlarge",
|
||||
"gpt-4.1": "openai-large",
|
||||
"o4-mini": "openai-reasoning",
|
||||
"gpt-4.1-mini": "openai",
|
||||
"command-r-plus-08-2024": "command-r",
|
||||
"gemini-2.5-flash": "gemini",
|
||||
"gemini-2.0-flash-thinking": "gemini-thinking",
|
||||
"qwen-2.5-coder-32b": "qwen-coder",
|
||||
"llama-3.3-70b": "llama",
|
||||
"llama-4-scout": "llamascout",
|
||||
"mistral-nemo": "mistral",
|
||||
"llama-3.1-8b": "llamalight",
|
||||
"llama-3.3-70b": "llama-scaleway",
|
||||
"phi-4": "phi",
|
||||
"llama-4-scout-17b": "llamascout",
|
||||
"mistral-small-3.1-24b": "mistral",
|
||||
"deepseek-r1": "deepseek-reasoning-large",
|
||||
"deepseek-r1-distill-llama-70b": "deepseek-reasoning-large",
|
||||
"deepseek-r1-distill-llama-70b": "deepseek-r1-llama",
|
||||
#"mistral-small-3.1-24b": "unity", # Personas
|
||||
#"mirexa": "mirexa", # Personas
|
||||
#"midijourney": "midijourney", # Personas
|
||||
#"rtist": "rtist", # Personas
|
||||
#"searchgpt": "searchgpt",
|
||||
#"evil": "evil", # Personas
|
||||
"deepseek-r1-distill-qwen-32b": "deepseek-reasoning",
|
||||
"phi-4": "phi",
|
||||
#"pixtral-12b": "pixtral",
|
||||
#"hormoz-8b": "hormoz",
|
||||
"qwq-32b": "qwen-qwq",
|
||||
#"hypnosis-tracy-7b": "hypnosis-tracy", # Personas
|
||||
#"mistral-?": "sur", # Personas
|
||||
"deepseek-v3": "deepseek",
|
||||
"llama-3.2-11b": "llama-vision",
|
||||
"deepseek-v3-0324": "deepseek",
|
||||
#"bidara": "bidara", # Personas
|
||||
|
||||
### Audio Models ###
|
||||
"gpt-4o-audio": "openai-audio",
|
||||
"gpt-4o-audio-preview": "openai-audio",
|
||||
|
||||
### Image Models ###
|
||||
"sdxl-turbo": "turbo",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
"""Get the internal model name from the user-provided model name."""
|
||||
if not model:
|
||||
return cls.default_model
|
||||
|
||||
# Check if the model exists directly in our model lists
|
||||
if model in cls.text_models or model in cls.image_models or model in cls.audio_models:
|
||||
return model
|
||||
|
||||
# Check if there's an alias for this model
|
||||
if model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
|
||||
# If no match is found, raise an error
|
||||
raise ModelNotFoundError(f"Model {model} not found")
|
||||
|
||||
@classmethod
|
||||
def get_models(cls, **kwargs):
|
||||
if not cls._models_loaded:
|
||||
|
|
@ -160,6 +192,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
stream: bool = True,
|
||||
proxy: str = None,
|
||||
cache: bool = False,
|
||||
referrer: str = "https://gpt4free.github.io/",
|
||||
# Image generation parameters
|
||||
prompt: str = None,
|
||||
aspect_ratio: str = "1:1",
|
||||
|
|
@ -210,7 +243,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
private=private,
|
||||
enhance=enhance,
|
||||
safe=safe,
|
||||
n=n
|
||||
n=n,
|
||||
referrer=referrer
|
||||
):
|
||||
yield chunk
|
||||
else:
|
||||
|
|
@ -238,6 +272,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
cache=cache,
|
||||
stream=stream,
|
||||
extra_parameters=extra_parameters,
|
||||
referrer=referrer,
|
||||
**kwargs
|
||||
):
|
||||
yield result
|
||||
|
|
@ -257,7 +292,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
private: bool,
|
||||
enhance: bool,
|
||||
safe: bool,
|
||||
n: int
|
||||
n: int,
|
||||
referrer: str
|
||||
) -> AsyncResult:
|
||||
params = use_aspect_ratio({
|
||||
"width": width,
|
||||
|
|
@ -269,7 +305,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"safe": str(safe).lower()
|
||||
}, aspect_ratio)
|
||||
query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None)
|
||||
prompt = quote_plus(prompt)[:2048-256-len(query)]
|
||||
prompt = quote_plus(prompt)[:2048-len(cls.image_api_endpoint)-len(query)-8]
|
||||
url = f"{cls.image_api_endpoint}prompt/{prompt}?{query}"
|
||||
def get_image_url(i: int, seed: Optional[int] = None):
|
||||
if i == 1:
|
||||
|
|
@ -280,7 +316,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
return f"{url}&seed={seed}" if seed else url
|
||||
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
|
||||
async def get_image(i: int, seed: Optional[int] = None):
|
||||
async with session.get(get_image_url(i, seed), allow_redirects=False) as response:
|
||||
async with session.get(get_image_url(i, seed), allow_redirects=False, headers={"referer": referrer}) as response:
|
||||
try:
|
||||
await raise_for_status(response)
|
||||
except Exception as e:
|
||||
|
|
@ -307,13 +343,11 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
cache: bool,
|
||||
stream: bool,
|
||||
extra_parameters: list[str],
|
||||
referrer: str,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if not cache and seed is None:
|
||||
seed = random.randint(0, 2**32)
|
||||
json_mode = False
|
||||
if response_format and response_format.get("type") == "json_object":
|
||||
json_mode = True
|
||||
|
||||
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
|
||||
if model in cls.audio_models:
|
||||
|
|
@ -331,13 +365,13 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"presence_penalty": presence_penalty,
|
||||
"top_p": top_p,
|
||||
"frequency_penalty": frequency_penalty,
|
||||
"jsonMode": json_mode,
|
||||
"response_format": response_format,
|
||||
"stream": stream,
|
||||
"seed": seed,
|
||||
"cache": cache,
|
||||
**extra_parameters
|
||||
})
|
||||
async with session.post(url, json=data) as response:
|
||||
async with session.post(url, json=data, headers={"referer": referrer}) as response:
|
||||
await raise_for_status(response)
|
||||
if response.headers["content-type"].startswith("text/plain"):
|
||||
yield await response.text()
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from __future__ import annotations
|
|||
import requests
|
||||
|
||||
from .template import OpenaiTemplate
|
||||
from .. import debug
|
||||
|
||||
class TypeGPT(OpenaiTemplate):
|
||||
label = "TypeGpt"
|
||||
|
|
@ -36,6 +37,10 @@ class TypeGPT(OpenaiTemplate):
|
|||
@classmethod
|
||||
def get_models(cls, **kwargs):
|
||||
if not cls.models:
|
||||
cls.models = requests.get(f"{cls.url}/api/config").json()["customModels"].split(",")
|
||||
cls.models = [model.split("@")[0].strip("+") for model in cls.models if not model.startswith("-") and model not in cls.image_models]
|
||||
try:
|
||||
cls.models = requests.get(f"{cls.url}/api/config").json()["customModels"].split(",")
|
||||
cls.models = [model.split("@")[0].strip("+") for model in cls.models if not model.startswith("-") and model not in cls.image_models]
|
||||
except Exception as e:
|
||||
cls.models = cls.fallback_models
|
||||
debug.log(f"Error fetching models: {e}")
|
||||
return cls.models
|
||||
|
|
@ -34,50 +34,34 @@ try:
|
|||
except ImportError as e:
|
||||
debug.error("Audio providers not loaded:", e)
|
||||
|
||||
try:
|
||||
from .AllenAI import AllenAI
|
||||
from .ARTA import ARTA
|
||||
from .Blackbox import Blackbox
|
||||
from .Chatai import Chatai
|
||||
from .ChatGLM import ChatGLM
|
||||
from .ChatGpt import ChatGpt
|
||||
from .ChatGptEs import ChatGptEs
|
||||
from .Cloudflare import Cloudflare
|
||||
from .Copilot import Copilot
|
||||
from .DDG import DDG
|
||||
from .DeepInfraChat import DeepInfraChat
|
||||
from .DuckDuckGo import DuckDuckGo
|
||||
from .Dynaspark import Dynaspark
|
||||
except ImportError as e:
|
||||
debug.error("Providers not loaded (A-D):", e)
|
||||
try:
|
||||
from .Free2GPT import Free2GPT
|
||||
from .FreeGpt import FreeGpt
|
||||
from .FreeRouter import FreeRouter
|
||||
from .GizAI import GizAI
|
||||
from .Glider import Glider
|
||||
from .Goabror import Goabror
|
||||
from .ImageLabs import ImageLabs
|
||||
from .Jmuz import Jmuz
|
||||
from .LambdaChat import LambdaChat
|
||||
from .Liaobots import Liaobots
|
||||
from .LMArenaProvider import LMArenaProvider
|
||||
from .OIVSCode import OIVSCode
|
||||
except ImportError as e:
|
||||
debug.error("Providers not loaded (F-L):", e)
|
||||
try:
|
||||
from .PerplexityLabs import PerplexityLabs
|
||||
from .Pi import Pi
|
||||
from .Pizzagpt import Pizzagpt
|
||||
from .PollinationsAI import PollinationsAI
|
||||
from .PollinationsImage import PollinationsImage
|
||||
from .TeachAnything import TeachAnything
|
||||
from .TypeGPT import TypeGPT
|
||||
from .You import You
|
||||
from .Websim import Websim
|
||||
from .Yqcloud import Yqcloud
|
||||
except ImportError as e:
|
||||
debug.error("Providers not loaded (M-Z):", e)
|
||||
from .ARTA import ARTA
|
||||
from .Blackbox import Blackbox
|
||||
from .Chatai import Chatai
|
||||
from .ChatGLM import ChatGLM
|
||||
from .ChatGpt import ChatGpt
|
||||
from .Cloudflare import Cloudflare
|
||||
from .Copilot import Copilot
|
||||
from .DDG import DDG
|
||||
from .DeepInfraChat import DeepInfraChat
|
||||
from .DuckDuckGo import DuckDuckGo
|
||||
from .Dynaspark import Dynaspark
|
||||
from .Free2GPT import Free2GPT
|
||||
from .FreeGpt import FreeGpt
|
||||
from .GizAI import GizAI
|
||||
from .ImageLabs import ImageLabs
|
||||
from .LambdaChat import LambdaChat
|
||||
from .Liaobots import Liaobots
|
||||
from .LMArenaProvider import LMArenaProvider
|
||||
from .PerplexityLabs import PerplexityLabs
|
||||
from .Pi import Pi
|
||||
from .Pizzagpt import Pizzagpt
|
||||
from .PollinationsAI import PollinationsAI
|
||||
from .PollinationsImage import PollinationsImage
|
||||
from .TeachAnything import TeachAnything
|
||||
from .TypeGPT import TypeGPT
|
||||
from .You import You
|
||||
from .Websim import Websim
|
||||
from .Yqcloud import Yqcloud
|
||||
|
||||
import sys
|
||||
|
||||
|
|
|
|||
|
|
@ -1,75 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import json
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...providers.response import ImageResponse
|
||||
from ...errors import ResponseError
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import format_image_prompt
|
||||
from .raise_for_status import raise_for_status
|
||||
|
||||
class BlackForestLabs_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "BlackForestLabs Flux-1-Schnell"
|
||||
url = "https://black-forest-labs-flux-1-schnell.hf.space"
|
||||
api_endpoint = "https://black-forest-labs-flux-1-schnell.hf.space/call/infer"
|
||||
|
||||
working = True
|
||||
|
||||
default_model = "black-forest-labs-flux-1-schnell"
|
||||
default_image_model = default_model
|
||||
model_aliases = {"flux-schnell": default_image_model, "flux": default_image_model}
|
||||
image_models = list(model_aliases.keys())
|
||||
models = image_models
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
prompt: str = None,
|
||||
width: int = 768,
|
||||
height: int = 768,
|
||||
num_inference_steps: int = 2,
|
||||
seed: int = 0,
|
||||
randomize_seed: bool = True,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
width = max(32, width - (width % 8))
|
||||
height = max(32, height - (height % 8))
|
||||
prompt = format_image_prompt(messages, prompt)
|
||||
payload = {
|
||||
"data": [
|
||||
prompt,
|
||||
seed,
|
||||
randomize_seed,
|
||||
width,
|
||||
height,
|
||||
num_inference_steps
|
||||
]
|
||||
}
|
||||
async with ClientSession() as session:
|
||||
async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
response_data = await response.json()
|
||||
event_id = response_data['event_id']
|
||||
while True:
|
||||
async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response:
|
||||
await raise_for_status(status_response)
|
||||
while not status_response.content.at_eof():
|
||||
event = await status_response.content.readuntil(b'\n\n')
|
||||
if event.startswith(b'event:'):
|
||||
event_parts = event.split(b'\ndata: ')
|
||||
if len(event_parts) < 2:
|
||||
continue
|
||||
event_type = event_parts[0].split(b': ')[1]
|
||||
data = event_parts[1]
|
||||
if event_type == b'error':
|
||||
raise ResponseError(f"Error generating image: {data.decode(errors='ignore')}")
|
||||
elif event_type == b'complete':
|
||||
json_data = json.loads(data)
|
||||
image_url = json_data[0]['url']
|
||||
yield ImageResponse(images=[image_url], alt=prompt)
|
||||
return
|
||||
|
|
@ -17,14 +17,15 @@ class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
working = True
|
||||
|
||||
default_model = "command-a-03-2025"
|
||||
model_aliases = {
|
||||
"command-a": default_model,
|
||||
"command-r-plus": "command-r-plus-08-2024",
|
||||
"command-r": "command-r-08-2024",
|
||||
"command-r": "command-r",
|
||||
"command-r7b": "command-r7b-12-2024",
|
||||
}
|
||||
models = list(model_aliases.keys())
|
||||
models = [
|
||||
default_model,
|
||||
"command-r-plus-08-2024",
|
||||
"command-r-08-2024",
|
||||
"command-r-plus",
|
||||
"command-r",
|
||||
"command-r7b-12-2024",
|
||||
"command-r7b-arabic-02-2025",
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str, **kwargs) -> str:
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ from ... import debug
|
|||
from .DeepseekAI_JanusPro7b import get_zerogpu_token
|
||||
from .raise_for_status import raise_for_status
|
||||
|
||||
class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
class Microsoft_Phi_4_Multimodal(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Microsoft Phi-4"
|
||||
space = "microsoft/phi-4-multimodal"
|
||||
url = f"https://huggingface.co/spaces/{space}"
|
||||
|
|
@ -29,9 +29,9 @@ class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
default_model = "phi-4-multimodal"
|
||||
default_vision_model = default_model
|
||||
model_aliases = {"phi-4": default_vision_model}
|
||||
vision_models = list(model_aliases.keys())
|
||||
vision_models = [default_vision_model]
|
||||
models = vision_models
|
||||
model_aliases = {"phi-4": default_vision_model}
|
||||
|
||||
@classmethod
|
||||
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation, media: list = None):
|
||||
|
|
@ -31,7 +31,15 @@ class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"qwen3-1.7b",
|
||||
"qwen3-0.6b",
|
||||
}
|
||||
model_aliases = {model: model for model in models}
|
||||
model_aliases = {
|
||||
"qwen-3-235b": default_model,
|
||||
"qwen-3-32b": "qwen3-32b",
|
||||
"qwen-3-30b": "qwen3-30b-a3b",
|
||||
"qwen-3-14b": "qwen3-14b",
|
||||
"qwen-3-4b": "qwen3-4b",
|
||||
"qwen-3-1.7b": "qwen3-1.7b",
|
||||
"qwen-3-0.6b": "qwen3-0.6b",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
|
|
|||
|
|
@ -19,7 +19,10 @@ class Voodoohop_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
default_model = "voodoohop-flux-1-schnell"
|
||||
default_image_model = default_model
|
||||
model_aliases = {"flux-schnell": default_model, "flux": default_model}
|
||||
model_aliases = {
|
||||
"flux-schnell": default_image_model,
|
||||
"flux": default_image_model
|
||||
}
|
||||
image_models = list(model_aliases.keys())
|
||||
models = image_models
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,9 @@ from ...errors import ResponseError
|
|||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
||||
from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
|
||||
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
||||
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
|
||||
from .Microsoft_Phi_4 import Microsoft_Phi_4
|
||||
from .Microsoft_Phi_4_Multimodal import Microsoft_Phi_4_Multimodal
|
||||
from .Qwen_QVQ_72B import Qwen_QVQ_72B
|
||||
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
|
||||
from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M
|
||||
|
|
@ -30,10 +29,9 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
default_vision_model = Qwen_QVQ_72B.default_model
|
||||
providers = [
|
||||
BlackForestLabs_Flux1Dev,
|
||||
BlackForestLabs_Flux1Schnell,
|
||||
CohereForAI_C4AI_Command,
|
||||
DeepseekAI_JanusPro7b,
|
||||
Microsoft_Phi_4,
|
||||
Microsoft_Phi_4_Multimodal,
|
||||
Qwen_QVQ_72B,
|
||||
Qwen_Qwen_2_5,
|
||||
Qwen_Qwen_2_5M,
|
||||
|
|
|
|||
1355
g4f/Provider/needs_auth/BlackboxPro.py
Normal file
1355
g4f/Provider/needs_auth/BlackboxPro.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -63,6 +63,7 @@ GGOGLE_SID_COOKIE = "__Secure-1PSID"
|
|||
|
||||
models = {
|
||||
"gemini-2.5-pro-exp": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"2525e3954d185b3c"]'},
|
||||
"gemini-2.5-flash": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"35609594dbe934d8"]'},
|
||||
"gemini-2.0-flash-thinking-exp": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"7ca48d02d802f20a"]'},
|
||||
"gemini-deep-research": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"cd472a54d2abba7e"]'},
|
||||
"gemini-2.0-flash": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f299729663a2343f"]'},
|
||||
|
|
@ -87,7 +88,10 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
models = [
|
||||
default_model, *models.keys()
|
||||
]
|
||||
model_aliases = {"gemini-2.0": ""}
|
||||
model_aliases = {
|
||||
"gemini-2.0": "",
|
||||
"gemini-2.5-pro": "gemini-2.5-pro-exp"
|
||||
}
|
||||
|
||||
synthesize_content_type = "audio/vnd.wav"
|
||||
|
||||
|
|
@ -102,14 +106,11 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
@classmethod
|
||||
async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
|
||||
if not has_nodriver:
|
||||
if debug.logging:
|
||||
print("Skip nodriver login in Gemini provider")
|
||||
debug.log("Skip nodriver login in Gemini provider")
|
||||
return
|
||||
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="gemini")
|
||||
try:
|
||||
login_url = os.environ.get("G4F_LOGIN_URL")
|
||||
if login_url:
|
||||
yield RequestLogin(cls.label, login_url)
|
||||
yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
|
||||
page = await browser.get(f"{cls.url}/app")
|
||||
await page.select("div.ql-editor.textarea", 240)
|
||||
cookies = {}
|
||||
|
|
@ -159,6 +160,8 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
audio: dict = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if model in cls.model_aliases:
|
||||
model = cls.model_aliases[model]
|
||||
if audio is not None or model == "gemini-audio":
|
||||
prompt = format_image_prompt(messages, prompt)
|
||||
filename = get_filename(["gemini"], prompt, ".ogx", prompt)
|
||||
|
|
|
|||
|
|
@ -64,9 +64,8 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
supports_system_message = True
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
default_model = "GigaChat:latest"
|
||||
models = [default_model, "GigaChat-Plus", "GigaChat-Pro"]
|
||||
model_aliases = {"gigachat": default_model}
|
||||
default_model = "GigaChat"
|
||||
models = ["GigaChat-2", "GigaChat-2-Pro", "GigaChat-2-Max", default_model, "GigaChat-Pro", "GigaChat-Max"]
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from .Anthropic import Anthropic
|
||||
from .BingCreateImages import BingCreateImages
|
||||
from .BlackboxPro import BlackboxPro
|
||||
from .CablyAI import CablyAI
|
||||
from .Cerebras import Cerebras
|
||||
from .CopilotAccount import CopilotAccount
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@ from __future__ import annotations
|
|||
import json
|
||||
from uuid import uuid4
|
||||
from aiohttp import ClientSession
|
||||
from ..typing import AsyncResult, Messages, MediaListType
|
||||
from ..image import to_bytes, is_accepted_format, to_data_uri
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from ..providers.response import FinishReason, JsonConversation
|
||||
from .helper import format_prompt, get_last_user_message, format_image_prompt
|
||||
from ..tools.media import merge_media
|
||||
from ...typing import AsyncResult, Messages, MediaListType
|
||||
from ...image import to_bytes, is_accepted_format, to_data_uri
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...providers.response import FinishReason, JsonConversation
|
||||
from ..helper import format_prompt, get_last_user_message, format_image_prompt
|
||||
from ...tools.media import merge_media
|
||||
|
||||
|
||||
class Conversation(JsonConversation):
|
||||
|
|
@ -29,7 +29,7 @@ class AllenAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
login_url = None
|
||||
api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
|
||||
|
||||
working = True
|
||||
working = False
|
||||
needs_auth = False
|
||||
use_nodriver = False
|
||||
supports_stream = True
|
||||
|
|
@ -10,16 +10,16 @@ try:
|
|||
except ImportError:
|
||||
has_curl_cffi = False
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
from ..errors import MissingRequirementsError
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import format_prompt
|
||||
from ...errors import MissingRequirementsError
|
||||
|
||||
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://chatgpt.es"
|
||||
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
|
||||
|
||||
working = True
|
||||
working = False
|
||||
supports_stream = True
|
||||
supports_system_message = False
|
||||
supports_message_history = False
|
||||
|
|
@ -1,9 +1,9 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from .template import OpenaiTemplate
|
||||
from ..template import OpenaiTemplate
|
||||
|
||||
class FreeRouter(OpenaiTemplate):
|
||||
label = "CablyAI FreeRouter"
|
||||
url = "https://freerouter.cablyai.com"
|
||||
api_base = "https://freerouter.cablyai.com/v1"
|
||||
working = True
|
||||
working = False
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from .template import OpenaiTemplate
|
||||
from ..template import OpenaiTemplate
|
||||
|
||||
class Glider(OpenaiTemplate):
|
||||
label = "Glider"
|
||||
url = "https://glider.so"
|
||||
api_endpoint = "https://glider.so/api/chat"
|
||||
working = True
|
||||
working = False
|
||||
|
||||
default_model = 'chat-llama-3-1-70b'
|
||||
models = [
|
||||
|
|
@ -17,7 +17,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
url = "https://rubiks.ai"
|
||||
api_endpoint = "https://rubiks.ai/search/api/"
|
||||
|
||||
working = True
|
||||
working = False
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
|
|
|||
|
|
@ -5,15 +5,19 @@ from .AiChats import AiChats
|
|||
from .Airforce import Airforce
|
||||
from .AutonomousAI import AutonomousAI
|
||||
from .AIUncensored import AIUncensored
|
||||
from .AllenAI import AllenAI
|
||||
from .AmigoChat import AmigoChat
|
||||
from .Aura import Aura
|
||||
from .Chatgpt4o import Chatgpt4o
|
||||
from .Chatgpt4Online import Chatgpt4Online
|
||||
from .ChatGptEs import ChatGptEs
|
||||
from .ChatgptFree import ChatgptFree
|
||||
from .ChatGptt import ChatGptt
|
||||
from .DarkAI import DarkAI
|
||||
from .FlowGpt import FlowGpt
|
||||
from .FreeNetfly import FreeNetfly
|
||||
from .FreeRouter import FreeRouter
|
||||
from .Glider import Glider
|
||||
from .GPROChat import GPROChat
|
||||
from .Koala import Koala
|
||||
from .MagickPen import MagickPen
|
||||
|
|
|
|||
664
g4f/models.py
664
g4f/models.py
File diff suppressed because it is too large
Load diff
|
|
@ -10,7 +10,7 @@ from ..Provider.hf_space import HuggingSpace
|
|||
from .. import Provider
|
||||
from .. import models
|
||||
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
|
||||
from ..Provider import Microsoft_Phi_4, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
|
||||
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
|
@ -100,7 +100,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
cls.image_models.extend([clean_name(model) for model in provider.image_models])
|
||||
cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
|
||||
cls.video_models.extend([clean_name(model) for model in provider.video_models])
|
||||
for provider in [Microsoft_Phi_4, PollinationsAI]:
|
||||
for provider in [Microsoft_Phi_4_Multimodal, PollinationsAI]:
|
||||
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
|
||||
cls.audio_models.update(provider.audio_models)
|
||||
cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)})
|
||||
|
|
@ -137,7 +137,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if "audio" in kwargs or "audio" in kwargs.get("modalities", []):
|
||||
providers = [PollinationsAI, EdgeTTS, gTTS]
|
||||
elif has_audio:
|
||||
providers = [PollinationsAI, Microsoft_Phi_4, MarkItDown]
|
||||
providers = [PollinationsAI, Microsoft_Phi_4_Multimodal, MarkItDown]
|
||||
elif has_image:
|
||||
providers = models.default_vision.best_provider.providers
|
||||
else:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue