mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
refactor: update providers and models for better compatibility
- Changed default model in commit.py from "gpt-4o" to "claude-3.7-sonnet" - Fixed ARTA provider by adding proper auth token handling and form data submission - Updated Blackbox provider to use OpenRouter models instead of premium models - Improved DDG provider with simplified authentication and better error handling - Updated DeepInfraChat provider with new models and aliases - Removed non-working providers: Goabror, Jmuz, OIVSCode, AllenAI, ChatGptEs, FreeRouter, Glider - Moved non-working providers to the not_working directory - Added BlackboxPro provider in needs_auth directory with premium model support - Updated Liaobots provider with new models and improved authentication - Renamed Microsoft_Phi_4 to Microsoft_Phi_4_Multimodal for clarity - Updated LambdaChat provider with direct API implementation instead of HuggingChat - Updated models.py with new model definitions and provider mappings - Removed BlackForestLabs_Flux1Schnell from HuggingSpace providers - Updated model aliases across multiple providers for better compatibility - Fixed Dynaspark provider endpoint URL to prevent spam detection
This commit is contained in:
parent
9fdb60369e
commit
c3d61ad9e3
34 changed files with 3018 additions and 1591 deletions
|
|
@ -31,7 +31,7 @@ from g4f import debug
|
||||||
debug.logging = True
|
debug.logging = True
|
||||||
|
|
||||||
# Constants
|
# Constants
|
||||||
DEFAULT_MODEL = "gpt-4o"
|
DEFAULT_MODEL = "claude-3.7-sonnet"
|
||||||
FALLBACK_MODELS = []
|
FALLBACK_MODELS = []
|
||||||
MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit
|
MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit
|
||||||
MAX_RETRIES = 3
|
MAX_RETRIES = 3
|
||||||
|
|
|
||||||
|
|
@ -151,10 +151,12 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
# Step 1: Get Authentication Token
|
# Step 1: Get Authentication Token
|
||||||
auth_data = await cls.read_and_refresh_token(proxy)
|
auth_data = await cls.read_and_refresh_token(proxy)
|
||||||
|
auth_token = auth_data.get("idToken")
|
||||||
|
|
||||||
async with ClientSession() as session:
|
async with ClientSession() as session:
|
||||||
# Step 2: Generate Images
|
# Step 2: Generate Images
|
||||||
image_payload = {
|
# Create a form data structure as the API might expect form data instead of JSON
|
||||||
|
form_data = {
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"negative_prompt": negative_prompt,
|
"negative_prompt": negative_prompt,
|
||||||
"style": model,
|
"style": model,
|
||||||
|
|
@ -165,11 +167,16 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"seed": str(seed),
|
"seed": str(seed),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Debug: Print the payload being sent
|
||||||
|
print(f"Sending payload: {form_data}")
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"Authorization": auth_data.get("idToken"),
|
"Authorization": auth_token,
|
||||||
|
# No Content-Type header for multipart/form-data, aiohttp sets it automatically
|
||||||
}
|
}
|
||||||
|
|
||||||
async with session.post(cls.image_generation_url, data=image_payload, headers=headers, proxy=proxy) as image_response:
|
# Try with form data instead of JSON
|
||||||
|
async with session.post(cls.image_generation_url, data=form_data, headers=headers, proxy=proxy) as image_response:
|
||||||
await raise_error(f"Failed to initiate image generation", image_response)
|
await raise_error(f"Failed to initiate image generation", image_response)
|
||||||
image_data = await image_response.json()
|
image_data = await image_response.json()
|
||||||
record_id = image_data.get("record_id")
|
record_id = image_data.get("record_id")
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,6 @@ import re
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
import base64
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
@ -14,13 +13,11 @@ from datetime import datetime, timedelta
|
||||||
from ..typing import AsyncResult, Messages, MediaListType
|
from ..typing import AsyncResult, Messages, MediaListType
|
||||||
from ..requests.raise_for_status import raise_for_status
|
from ..requests.raise_for_status import raise_for_status
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .openai.har_file import get_har_files
|
|
||||||
from ..image import to_data_uri
|
from ..image import to_data_uri
|
||||||
from ..cookies import get_cookies_dir
|
from .helper import render_messages
|
||||||
from .helper import format_image_prompt, render_messages
|
from ..providers.response import JsonConversation
|
||||||
from ..providers.response import JsonConversation, ImageResponse
|
|
||||||
from ..tools.media import merge_media
|
from ..tools.media import merge_media
|
||||||
from ..errors import RateLimitError, NoValidHarFileError
|
from ..errors import RateLimitError
|
||||||
from .. import debug
|
from .. import debug
|
||||||
|
|
||||||
class Conversation(JsonConversation):
|
class Conversation(JsonConversation):
|
||||||
|
|
@ -43,20 +40,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
default_model = "blackboxai"
|
default_model = "blackboxai"
|
||||||
default_vision_model = default_model
|
default_vision_model = default_model
|
||||||
default_image_model = 'flux'
|
|
||||||
|
|
||||||
# Free models (available without subscription)
|
# OpenRouter Free
|
||||||
fallback_models = [
|
openrouter_models = [
|
||||||
|
"Deepcoder 14B Preview",
|
||||||
|
"DeepHermes 3 Llama 3 8B Preview",
|
||||||
|
"DeepSeek R1 Zero",
|
||||||
|
"Dolphin3.0 Mistral 24B",
|
||||||
|
"Dolphin3.0 R1 Mistral 24B",
|
||||||
|
"Flash 3", # FIX (<reasoning> ◁</reasoning>)
|
||||||
|
"Gemini 2.0 Flash Experimental",
|
||||||
|
"Gemma 2 9B",
|
||||||
|
"Gemma 3 12B",
|
||||||
|
"Gemma 3 1B",
|
||||||
|
"Gemma 3 27B",
|
||||||
|
"Gemma 3 4B",
|
||||||
|
"Kimi VL A3B Thinking", # FIX (◁think▷ ◁/think▷)
|
||||||
|
"Llama 3.1 8B Instruct",
|
||||||
|
"Llama 3.1 Nemotron Ultra 253B v1",
|
||||||
|
"Llama 3.2 11B Vision Instruct",
|
||||||
|
"Llama 3.2 1B Instruct",
|
||||||
|
"Llama 3.2 3B Instruct",
|
||||||
|
"Llama 3.3 70B Instruct",
|
||||||
|
"Llama 3.3 Nemotron Super 49B v1",
|
||||||
|
"Llama 4 Maverick",
|
||||||
|
"Llama 4 Scout",
|
||||||
|
"Mistral 7B Instruct",
|
||||||
|
"Mistral Nemo",
|
||||||
|
"Mistral Small 3",
|
||||||
|
"Mistral Small 3.1 24B",
|
||||||
|
"Molmo 7B D",
|
||||||
|
"Moonlight 16B A3B Instruct",
|
||||||
|
"Qwen2.5 72B Instruct",
|
||||||
|
"Qwen2.5 7B Instruct",
|
||||||
|
"Qwen2.5 Coder 32B Instruct",
|
||||||
|
"Qwen2.5 VL 32B Instruct",
|
||||||
|
"Qwen2.5 VL 3B Instruct",
|
||||||
|
"Qwen2.5 VL 72B Instruct",
|
||||||
|
"Qwen2.5-VL 7B Instruct",
|
||||||
|
"Qwerky 72B",
|
||||||
|
"QwQ 32B",
|
||||||
|
"QwQ 32B Preview",
|
||||||
|
"QwQ 32B RpR v1",
|
||||||
|
"R1",
|
||||||
|
"R1 Distill Llama 70B",
|
||||||
|
"R1 Distill Qwen 14B",
|
||||||
|
"R1 Distill Qwen 32B",
|
||||||
|
]
|
||||||
|
|
||||||
|
models = [
|
||||||
default_model,
|
default_model,
|
||||||
"gpt-4o-mini",
|
"o3-mini",
|
||||||
"DeepSeek-V3",
|
"gpt-4.1-nano",
|
||||||
|
"Claude-sonnet-3.7",
|
||||||
|
"Claude-sonnet-3.5",
|
||||||
"DeepSeek-R1",
|
"DeepSeek-R1",
|
||||||
"Meta-Llama-3.3-70B-Instruct-Turbo",
|
|
||||||
"Mistral-Small-24B-Instruct-2501",
|
"Mistral-Small-24B-Instruct-2501",
|
||||||
"DeepSeek-LLM-Chat-(67B)",
|
|
||||||
"Qwen-QwQ-32B-Preview",
|
# OpenRouter Free
|
||||||
# Image models
|
*openrouter_models,
|
||||||
"flux",
|
|
||||||
# Trending agent modes
|
# Trending agent modes
|
||||||
'Python Agent',
|
'Python Agent',
|
||||||
'HTML Agent',
|
'HTML Agent',
|
||||||
|
|
@ -89,98 +132,66 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
'Heroku Agent'
|
'Heroku Agent'
|
||||||
]
|
]
|
||||||
|
|
||||||
# Premium models (require subscription)
|
vision_models = [default_vision_model, 'o3-mini']
|
||||||
premium_models = [
|
|
||||||
"GPT-4o",
|
|
||||||
"o1",
|
|
||||||
"o3-mini",
|
|
||||||
"Claude-sonnet-3.7",
|
|
||||||
"Claude-sonnet-3.5",
|
|
||||||
"Gemini-Flash-2.0",
|
|
||||||
"DBRX-Instruct",
|
|
||||||
"blackboxai-pro",
|
|
||||||
"Gemini-PRO"
|
|
||||||
]
|
|
||||||
|
|
||||||
# Models available in the demo account
|
userSelectedModel = ['o3-mini','Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-R1', 'Mistral-Small-24B-Instruct-2501'] + openrouter_models
|
||||||
demo_models = [
|
|
||||||
default_model,
|
|
||||||
"blackboxai-pro",
|
|
||||||
"gpt-4o-mini",
|
|
||||||
"GPT-4o",
|
|
||||||
"o1",
|
|
||||||
"o3-mini",
|
|
||||||
"Claude-sonnet-3.7",
|
|
||||||
"Claude-sonnet-3.5",
|
|
||||||
"DeepSeek-V3",
|
|
||||||
"DeepSeek-R1",
|
|
||||||
"DeepSeek-LLM-Chat-(67B)",
|
|
||||||
"Meta-Llama-3.3-70B-Instruct-Turbo",
|
|
||||||
"Mistral-Small-24B-Instruct-2501",
|
|
||||||
"Qwen-QwQ-32B-Preview",
|
|
||||||
# Image models
|
|
||||||
"flux",
|
|
||||||
# Trending agent modes
|
|
||||||
'Python Agent',
|
|
||||||
'HTML Agent',
|
|
||||||
'Builder Agent',
|
|
||||||
'Java Agent',
|
|
||||||
'JavaScript Agent',
|
|
||||||
'React Agent',
|
|
||||||
'Android Agent',
|
|
||||||
'Flutter Agent',
|
|
||||||
'Next.js Agent',
|
|
||||||
'AngularJS Agent',
|
|
||||||
'Swift Agent',
|
|
||||||
'MongoDB Agent',
|
|
||||||
'PyTorch Agent',
|
|
||||||
'Xcode Agent',
|
|
||||||
'Azure Agent',
|
|
||||||
'Bitbucket Agent',
|
|
||||||
'DigitalOcean Agent',
|
|
||||||
'Docker Agent',
|
|
||||||
'Electron Agent',
|
|
||||||
'Erlang Agent',
|
|
||||||
'FastAPI Agent',
|
|
||||||
'Firebase Agent',
|
|
||||||
'Flask Agent',
|
|
||||||
'Git Agent',
|
|
||||||
'Gitlab Agent',
|
|
||||||
'Go Agent',
|
|
||||||
'Godot Agent',
|
|
||||||
'Google Cloud Agent',
|
|
||||||
'Heroku Agent'
|
|
||||||
]
|
|
||||||
|
|
||||||
image_models = [default_image_model]
|
|
||||||
vision_models = [default_vision_model, 'GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Gemini Agent', 'llama-3.1-8b Agent', 'llama-3.1-70b Agent', 'llama-3.1-405 Agent', 'Gemini-Flash-2.0', 'DeepSeek-V3']
|
|
||||||
|
|
||||||
userSelectedModel = ['GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Claude-sonnet-3.7', 'Claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0']
|
|
||||||
|
|
||||||
# Agent mode configurations
|
# Agent mode configurations
|
||||||
agentMode = {
|
agentMode = {
|
||||||
'GPT-4o': {'mode': True, 'id': "GPT-4o", 'name': "GPT-4o"},
|
# OpenRouter Free
|
||||||
'Gemini-PRO': {'mode': True, 'id': "Gemini-PRO", 'name': "Gemini-PRO"},
|
'Deepcoder 14B Preview': {'mode': True, 'id': "agentica-org/deepcoder-14b-preview:free", 'name': "Deepcoder 14B Preview"},
|
||||||
|
'DeepHermes 3 Llama 3 8B Preview': {'mode': True, 'id': "nousresearch/deephermes-3-llama-3-8b-preview:free", 'name': "DeepHermes 3 Llama 3 8B Preview"},
|
||||||
|
'DeepSeek R1 Zero': {'mode': True, 'id': "deepseek/deepseek-r1-zero:free", 'name': "DeepSeek R1 Zero"},
|
||||||
|
'Dolphin3.0 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-mistral-24b:free", 'name': "Dolphin3.0 Mistral 24B"},
|
||||||
|
'Dolphin3.0 R1 Mistral 24B': {'mode': True, 'id': "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 'name': "Dolphin3.0 R1 Mistral 24B"},
|
||||||
|
'Flash 3': {'mode': True, 'id': "rekaai/reka-flash-3:free", 'name': "Flash 3"},
|
||||||
|
'Gemini 2.0 Flash Experimental': {'mode': True, 'id': "google/gemini-2.0-flash-exp:free", 'name': "Gemini 2.0 Flash Experimental"},
|
||||||
|
'Gemma 2 9B': {'mode': True, 'id': "google/gemma-2-9b-it:free", 'name': "Gemma 2 9B"},
|
||||||
|
'Gemma 3 12B': {'mode': True, 'id': "google/gemma-3-12b-it:free", 'name': "Gemma 3 12B"},
|
||||||
|
'Gemma 3 1B': {'mode': True, 'id': "google/gemma-3-1b-it:free", 'name': "Gemma 3 1B"},
|
||||||
|
'Gemma 3 27B': {'mode': True, 'id': "google/gemma-3-27b-it:free", 'name': "Gemma 3 27B"},
|
||||||
|
'Gemma 3 4B': {'mode': True, 'id': "google/gemma-3-4b-it:free", 'name': "Gemma 3 4B"},
|
||||||
|
'Kimi VL A3B Thinking': {'mode': True, 'id': "moonshotai/kimi-vl-a3b-thinking:free", 'name': "Kimi VL A3B Thinking"},
|
||||||
|
'Llama 3.1 8B Instruct': {'mode': True, 'id': "meta-llama/llama-3.1-8b-instruct:free", 'name': "Llama 3.1 8B Instruct"},
|
||||||
|
'Llama 3.1 Nemotron Ultra 253B v1': {'mode': True, 'id': "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", 'name': "Llama 3.1 Nemotron Ultra 253B v1"},
|
||||||
|
'Llama 3.2 11B Vision Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-11b-vision-instruct:free", 'name': "Llama 3.2 11B Vision Instruct"},
|
||||||
|
'Llama 3.2 1B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-1b-instruct:free", 'name': "Llama 3.2 1B Instruct"},
|
||||||
|
'Llama 3.2 3B Instruct': {'mode': True, 'id': "meta-llama/llama-3.2-3b-instruct:free", 'name': "Llama 3.2 3B Instruct"},
|
||||||
|
'Llama 3.3 70B Instruct': {'mode': True, 'id': "meta-llama/llama-3.3-70b-instruct:free", 'name': "Llama 3.3 70B Instruct"},
|
||||||
|
'Llama 3.3 Nemotron Super 49B v1': {'mode': True, 'id': "nvidia/llama-3.3-nemotron-super-49b-v1:free", 'name': "Llama 3.3 Nemotron Super 49B v1"},
|
||||||
|
'Llama 4 Maverick': {'mode': True, 'id': "meta-llama/llama-4-maverick:free", 'name': "Llama 4 Maverick"},
|
||||||
|
'Llama 4 Scout': {'mode': True, 'id': "meta-llama/llama-4-scout:free", 'name': "Llama 4 Scout"},
|
||||||
|
'Mistral 7B Instruct': {'mode': True, 'id': "mistralai/mistral-7b-instruct:free", 'name': "Mistral 7B Instruct"},
|
||||||
|
'Mistral Nemo': {'mode': True, 'id': "mistralai/mistral-nemo:free", 'name': "Mistral Nemo"},
|
||||||
|
'Mistral Small 3': {'mode': True, 'id': "mistralai/mistral-small-24b-instruct-2501:free", 'name': "Mistral Small 3"},
|
||||||
|
'Mistral Small 3.1 24B': {'mode': True, 'id': "mistralai/mistral-small-3.1-24b-instruct:free", 'name': "Mistral Small 3.1 24B"},
|
||||||
|
'Molmo 7B D': {'mode': True, 'id': "allenai/molmo-7b-d:free", 'name': "Molmo 7B D"},
|
||||||
|
'Moonlight 16B A3B Instruct': {'mode': True, 'id': "moonshotai/moonlight-16b-a3b-instruct:free", 'name': "Moonlight 16B A3B Instruct"},
|
||||||
|
'Qwen2.5 72B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-72b-instruct:free", 'name': "Qwen2.5 72B Instruct"},
|
||||||
|
'Qwen2.5 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-7b-instruct:free", 'name': "Qwen2.5 7B Instruct"},
|
||||||
|
'Qwen2.5 Coder 32B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-coder-32b-instruct:free", 'name': "Qwen2.5 Coder 32B Instruct"},
|
||||||
|
'Qwen2.5 VL 32B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-32b-instruct:free", 'name': "Qwen2.5 VL 32B Instruct"},
|
||||||
|
'Qwen2.5 VL 3B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-3b-instruct:free", 'name': "Qwen2.5 VL 3B Instruct"},
|
||||||
|
'Qwen2.5 VL 72B Instruct': {'mode': True, 'id': "qwen/qwen2.5-vl-72b-instruct:free", 'name': "Qwen2.5 VL 72B Instruct"},
|
||||||
|
'Qwen2.5-VL 7B Instruct': {'mode': True, 'id': "qwen/qwen-2.5-vl-7b-instruct:free", 'name': "Qwen2.5-VL 7B Instruct"},
|
||||||
|
'Qwerky 72B': {'mode': True, 'id': "featherless/qwerky-72b:free", 'name': "Qwerky 72B"},
|
||||||
|
'QwQ 32B': {'mode': True, 'id': "qwen/qwq-32b:free", 'name': "QwQ 32B"},
|
||||||
|
'QwQ 32B Preview': {'mode': True, 'id': "qwen/qwq-32b-preview:free", 'name': "QwQ 32B Preview"},
|
||||||
|
'QwQ 32B RpR v1': {'mode': True, 'id': "arliai/qwq-32b-arliai-rpr-v1:free", 'name': "QwQ 32B RpR v1"},
|
||||||
|
'R1': {'mode': True, 'id': "deepseek/deepseek-r1:free", 'name': "R1"},
|
||||||
|
'R1 Distill Llama 70B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-llama-70b:free", 'name': "R1 Distill Llama 70B"},
|
||||||
|
'R1 Distill Qwen 14B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-14b:free", 'name': "R1 Distill Qwen 14B"},
|
||||||
|
'R1 Distill Qwen 32B': {'mode': True, 'id': "deepseek/deepseek-r1-distill-qwen-32b:free", 'name': "R1 Distill Qwen 32B"},
|
||||||
|
|
||||||
|
# Default
|
||||||
'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
|
'Claude-sonnet-3.7': {'mode': True, 'id': "Claude-sonnet-3.7", 'name': "Claude-sonnet-3.7"},
|
||||||
'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
|
'Claude-sonnet-3.5': {'mode': True, 'id': "Claude-sonnet-3.5", 'name': "Claude-sonnet-3.5"},
|
||||||
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
|
|
||||||
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
|
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
|
||||||
'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
|
|
||||||
'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
|
|
||||||
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
|
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
|
||||||
'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
|
|
||||||
'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
|
|
||||||
'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
|
|
||||||
'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Trending agent modes
|
# Trending agent modes
|
||||||
trendingAgentMode = {
|
trendingAgentMode = {
|
||||||
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
|
||||||
"Gemini Agent": {'mode': True, 'id': 'gemini'},
|
|
||||||
"llama-3.1-405 Agent": {'mode': True, 'id': "llama-3.1-405"},
|
|
||||||
'llama-3.1-70b Agent': {'mode': True, 'id': "llama-3.1-70b"},
|
|
||||||
'llama-3.1-8b Agent': {'mode': True, 'id': "llama-3.1-8b"},
|
|
||||||
'Python Agent': {'mode': True, 'id': "python"},
|
'Python Agent': {'mode': True, 'id': "python"},
|
||||||
'HTML Agent': {'mode': True, 'id': "html"},
|
'HTML Agent': {'mode': True, 'id': "html"},
|
||||||
'Builder Agent': {'mode': True, 'id': "builder"},
|
'Builder Agent': {'mode': True, 'id': "builder"},
|
||||||
|
|
@ -214,180 +225,78 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
# Complete list of all models (for authorized users)
|
# Complete list of all models (for authorized users)
|
||||||
_all_models = list(dict.fromkeys([
|
_all_models = list(dict.fromkeys([
|
||||||
*fallback_models, # Include all free models
|
*models, # Include all free models
|
||||||
*premium_models, # Include all premium models
|
|
||||||
*image_models,
|
|
||||||
*list(agentMode.keys()),
|
*list(agentMode.keys()),
|
||||||
*list(trendingAgentMode.keys())
|
*list(trendingAgentMode.keys())
|
||||||
]))
|
]))
|
||||||
|
|
||||||
# Initialize models with fallback_models
|
|
||||||
models = fallback_models
|
|
||||||
|
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"gpt-4o": "GPT-4o",
|
"gpt-4": default_model,
|
||||||
|
"gpt-4o": default_model,
|
||||||
|
"gpt-4o-mini": default_model,
|
||||||
"claude-3.7-sonnet": "Claude-sonnet-3.7",
|
"claude-3.7-sonnet": "Claude-sonnet-3.7",
|
||||||
"claude-3.5-sonnet": "Claude-sonnet-3.5",
|
"claude-3.5-sonnet": "Claude-sonnet-3.5",
|
||||||
"deepseek-v3": "DeepSeek-V3",
|
|
||||||
"deepseek-r1": "DeepSeek-R1",
|
"deepseek-r1": "DeepSeek-R1",
|
||||||
"deepseek-chat": "DeepSeek-LLM-Chat-(67B)",
|
#
|
||||||
"llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo",
|
"deepcoder-14b": "Deepcoder 14B Preview",
|
||||||
"mixtral-small-24b": "Mistral-Small-24B-Instruct-2501",
|
"deephermes-3-8b": "DeepHermes 3 Llama 3 8B Preview",
|
||||||
"qwq-32b": "Qwen-QwQ-32B-Preview",
|
"deepseek-r1-zero": "DeepSeek R1 Zero",
|
||||||
|
"deepseek-r1": "DeepSeek R1 Zero",
|
||||||
|
"dolphin-3.0-24b": "Dolphin3.0 Mistral 24B",
|
||||||
|
"dolphin-3.0-r1-24b": "Dolphin3.0 R1 Mistral 24B",
|
||||||
|
"reka-flash": "Flash 3",
|
||||||
|
"gemini-2.0-flash": "Gemini 2.0 Flash Experimental",
|
||||||
|
"gemma-2-9b": "Gemma 2 9B",
|
||||||
|
"gemma-3-12b": "Gemma 3 12B",
|
||||||
|
"gemma-3-1b": "Gemma 3 1B",
|
||||||
|
"gemma-3-27b": "Gemma 3 27B",
|
||||||
|
"gemma-3-4b": "Gemma 3 4B",
|
||||||
|
"kimi-vl-a3b-thinking": "Kimi VL A3B Thinking",
|
||||||
|
"llama-3.1-8b": "Llama 3.1 8B Instruct",
|
||||||
|
"nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
|
||||||
|
"llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
|
||||||
|
"llama-3.2-1b": "Llama 3.2 1B Instruct",
|
||||||
|
"llama-3.2-3b": "Llama 3.2 3B Instruct",
|
||||||
|
"llama-3.3-70b": "Llama 3.3 70B Instruct",
|
||||||
|
"nemotron-49b": "Llama 3.3 Nemotron Super 49B v1",
|
||||||
|
"llama-4-maverick": "Llama 4 Maverick",
|
||||||
|
"llama-4-scout": "Llama 4 Scout",
|
||||||
|
"mistral-7b": "Mistral 7B Instruct",
|
||||||
|
"mistral-nemo": "Mistral Nemo",
|
||||||
|
"mistral-small-24b": "Mistral Small 3",
|
||||||
|
"mistral-small-24b": "Mistral-Small-24B-Instruct-2501",
|
||||||
|
"mistral-small-3.1-24b": "Mistral Small 3.1 24B",
|
||||||
|
"molmo-7b": "Molmo 7B D",
|
||||||
|
"moonlight-16b": "Moonlight 16B A3B Instruct",
|
||||||
|
"qwen-2.5-72b": "Qwen2.5 72B Instruct",
|
||||||
|
"qwen-2.5-7b": "Qwen2.5 7B Instruct",
|
||||||
|
"qwen-2.5-coder-32b": "Qwen2.5 Coder 32B Instruct",
|
||||||
|
"qwen-2.5-vl-32b": "Qwen2.5 VL 32B Instruct",
|
||||||
|
"qwen-2.5-vl-3b": "Qwen2.5 VL 3B Instruct",
|
||||||
|
"qwen-2.5-vl-72b": "Qwen2.5 VL 72B Instruct",
|
||||||
|
"qwen-2.5-vl-7b": "Qwen2.5-VL 7B Instruct",
|
||||||
|
"qwerky-72b": "Qwerky 72B",
|
||||||
|
"qwq-32b": "QwQ 32B",
|
||||||
|
"qwq-32b-preview": "QwQ 32B Preview",
|
||||||
|
"qwq-32b": "QwQ 32B Preview",
|
||||||
|
"qwq-32b-arliai": "QwQ 32B RpR v1",
|
||||||
|
"qwq-32b": "QwQ 32B RpR v1",
|
||||||
|
"deepseek-r1": "R1",
|
||||||
|
"deepseek-r1-distill-llama-70b": "R1 Distill Llama 70B",
|
||||||
|
"deepseek-r1": "R1 Distill Llama 70B",
|
||||||
|
"deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
|
||||||
|
"deepseek-r1": "R1 Distill Qwen 14B",
|
||||||
|
"deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
|
||||||
|
"deepseek-r1": "R1 Distill Qwen 32B",
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def get_models_async(cls) -> list:
|
def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 365) -> dict:
|
||||||
"""
|
"""
|
||||||
Asynchronous version of get_models that checks subscription status.
|
Generate a dynamic session with proper ID and expiry format using a specific email.
|
||||||
Returns a list of available models based on subscription status.
|
|
||||||
Premium users get the full list of models.
|
|
||||||
Free users get fallback_models.
|
|
||||||
Demo accounts get demo_models.
|
|
||||||
"""
|
|
||||||
# Check if there are valid session data in HAR files
|
|
||||||
session_data = cls._find_session_in_har_files()
|
|
||||||
|
|
||||||
if not session_data:
|
|
||||||
# For demo accounts - return demo models
|
|
||||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
|
||||||
return cls.demo_models
|
|
||||||
|
|
||||||
# Check if this is a demo session
|
|
||||||
demo_session = cls.generate_session()
|
|
||||||
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
|
|
||||||
|
|
||||||
if is_demo:
|
|
||||||
# For demo accounts - return demo models
|
|
||||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
|
||||||
return cls.demo_models
|
|
||||||
|
|
||||||
# For non-demo accounts, check subscription status
|
|
||||||
if 'user' in session_data and 'email' in session_data['user']:
|
|
||||||
subscription = await cls.check_subscription(session_data['user']['email'])
|
|
||||||
if subscription['status'] == "PREMIUM":
|
|
||||||
debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
|
|
||||||
return cls._all_models
|
|
||||||
|
|
||||||
# For free accounts - return free models
|
|
||||||
debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
|
|
||||||
return cls.fallback_models
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_models(cls) -> list:
|
|
||||||
"""
|
|
||||||
Returns a list of available models based on authorization status.
|
|
||||||
Authorized users get the full list of models.
|
|
||||||
Free users get fallback_models.
|
|
||||||
Demo accounts get demo_models.
|
|
||||||
|
|
||||||
Note: This is a synchronous method that can't check subscription status,
|
|
||||||
so it falls back to the basic premium access check.
|
|
||||||
For more accurate results, use get_models_async when possible.
|
|
||||||
"""
|
|
||||||
# Check if there are valid session data in HAR files
|
|
||||||
session_data = cls._find_session_in_har_files()
|
|
||||||
|
|
||||||
if not session_data:
|
|
||||||
# For demo accounts - return demo models
|
|
||||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
|
||||||
return cls.demo_models
|
|
||||||
|
|
||||||
# Check if this is a demo session
|
|
||||||
demo_session = cls.generate_session()
|
|
||||||
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
|
|
||||||
|
|
||||||
if is_demo:
|
|
||||||
# For demo accounts - return demo models
|
|
||||||
debug.log(f"Blackbox: Returning demo model list with {len(cls.demo_models)} models")
|
|
||||||
return cls.demo_models
|
|
||||||
|
|
||||||
# For non-demo accounts, check premium access
|
|
||||||
has_premium_access = cls._check_premium_access()
|
|
||||||
|
|
||||||
if has_premium_access:
|
|
||||||
# For premium users - all models
|
|
||||||
debug.log(f"Blackbox: Returning premium model list with {len(cls._all_models)} models")
|
|
||||||
return cls._all_models
|
|
||||||
|
|
||||||
# For free accounts - return free models
|
|
||||||
debug.log(f"Blackbox: Returning free model list with {len(cls.fallback_models)} models")
|
|
||||||
return cls.fallback_models
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def check_subscription(cls, email: str) -> dict:
|
|
||||||
"""
|
|
||||||
Check subscription status for a given email using the Blackbox API.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
email: The email to check subscription for
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: Subscription status information with keys:
|
|
||||||
- status: "PREMIUM" or "FREE"
|
|
||||||
- customerId: Customer ID if available
|
|
||||||
- isTrialSubscription: Whether this is a trial subscription
|
|
||||||
"""
|
|
||||||
if not email:
|
|
||||||
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'accept': '*/*',
|
|
||||||
'accept-language': 'en',
|
|
||||||
'content-type': 'application/json',
|
|
||||||
'origin': 'https://www.blackbox.ai',
|
|
||||||
'referer': 'https://www.blackbox.ai/?ref=login-success',
|
|
||||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36'
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
async with ClientSession(headers=headers) as session:
|
|
||||||
async with session.post(
|
|
||||||
'https://www.blackbox.ai/api/check-subscription',
|
|
||||||
json={"email": email}
|
|
||||||
) as response:
|
|
||||||
if response.status != 200:
|
|
||||||
debug.log(f"Blackbox: Subscription check failed with status {response.status}")
|
|
||||||
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
|
||||||
|
|
||||||
result = await response.json()
|
|
||||||
status = "PREMIUM" if result.get("hasActiveSubscription", False) else "FREE"
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": status,
|
|
||||||
"customerId": result.get("customerId"),
|
|
||||||
"isTrialSubscription": result.get("isTrialSubscription", False)
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
debug.log(f"Blackbox: Error checking subscription: {e}")
|
|
||||||
return {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _check_premium_access(cls) -> bool:
|
|
||||||
"""
|
|
||||||
Checks for an authorized session in HAR files.
|
|
||||||
Returns True if a valid session is found that differs from the demo.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
session_data = cls._find_session_in_har_files()
|
|
||||||
if not session_data:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check if this is not a demo session
|
|
||||||
demo_session = cls.generate_session()
|
|
||||||
if (session_data['user'].get('email') != demo_session['user'].get('email')):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
except Exception as e:
|
|
||||||
debug.log(f"Blackbox: Error checking premium access: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def generate_session(cls, id_length: int = 21, days_ahead: int = 365) -> dict:
|
|
||||||
"""
|
|
||||||
Generate a dynamic session with proper ID and expiry format.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
email: The email to use for this session
|
||||||
id_length: Length of the numeric ID (default: 21)
|
id_length: Length of the numeric ID (default: 21)
|
||||||
days_ahead: Number of days ahead for expiry (default: 365)
|
days_ahead: Number of days ahead for expiry (default: 365)
|
||||||
|
|
||||||
|
|
@ -401,10 +310,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
future_date = datetime.now() + timedelta(days=days_ahead)
|
future_date = datetime.now() + timedelta(days=days_ahead)
|
||||||
expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
|
expiry = future_date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
|
||||||
|
|
||||||
# Decode the encoded email
|
|
||||||
encoded_email = "Z2lzZWxlQGJsYWNrYm94LmFp" # Base64 encoded email
|
|
||||||
email = base64.b64decode(encoded_email).decode('utf-8')
|
|
||||||
|
|
||||||
# Generate random image ID for the new URL format
|
# Generate random image ID for the new URL format
|
||||||
chars = string.ascii_letters + string.digits + "-"
|
chars = string.ascii_letters + string.digits + "-"
|
||||||
random_img_id = ''.join(random.choice(chars) for _ in range(48))
|
random_img_id = ''.join(random.choice(chars) for _ in range(48))
|
||||||
|
|
@ -417,68 +322,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"image": image_url,
|
"image": image_url,
|
||||||
"id": numeric_id
|
"id": numeric_id
|
||||||
},
|
},
|
||||||
"expires": expiry
|
"expires": expiry,
|
||||||
|
"isNewUser": False
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _find_session_in_har_files(cls) -> Optional[dict]:
|
|
||||||
"""
|
|
||||||
Search for valid session data in HAR files.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Optional[dict]: Session data if found, None otherwise
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
for file in get_har_files():
|
|
||||||
try:
|
|
||||||
with open(file, 'rb') as f:
|
|
||||||
har_data = json.load(f)
|
|
||||||
|
|
||||||
for entry in har_data['log']['entries']:
|
|
||||||
# Only look at blackbox API responses
|
|
||||||
if 'blackbox.ai/api' in entry['request']['url']:
|
|
||||||
# Look for a response that has the right structure
|
|
||||||
if 'response' in entry and 'content' in entry['response']:
|
|
||||||
content = entry['response']['content']
|
|
||||||
# Look for both regular and Google auth session formats
|
|
||||||
if ('text' in content and
|
|
||||||
isinstance(content['text'], str) and
|
|
||||||
'"user"' in content['text'] and
|
|
||||||
'"email"' in content['text'] and
|
|
||||||
'"expires"' in content['text']):
|
|
||||||
try:
|
|
||||||
# Remove any HTML or other non-JSON content
|
|
||||||
text = content['text'].strip()
|
|
||||||
if text.startswith('{') and text.endswith('}'):
|
|
||||||
# Replace escaped quotes
|
|
||||||
text = text.replace('\\"', '"')
|
|
||||||
har_session = json.loads(text)
|
|
||||||
|
|
||||||
# Check if this is a valid session object
|
|
||||||
if (isinstance(har_session, dict) and
|
|
||||||
'user' in har_session and
|
|
||||||
'email' in har_session['user'] and
|
|
||||||
'expires' in har_session):
|
|
||||||
|
|
||||||
debug.log(f"Blackbox: Found session in HAR file: {file}")
|
|
||||||
return har_session
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
# Only print error for entries that truly look like session data
|
|
||||||
if ('"user"' in content['text'] and
|
|
||||||
'"email"' in content['text']):
|
|
||||||
debug.log(f"Blackbox: Error parsing likely session data: {e}")
|
|
||||||
except Exception as e:
|
|
||||||
debug.log(f"Blackbox: Error reading HAR file {file}: {e}")
|
|
||||||
return None
|
|
||||||
except NoValidHarFileError:
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
|
||||||
debug.log(f"Blackbox: Error searching HAR files: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
|
async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
|
||||||
cache_file = Path(get_cookies_dir()) / 'blackbox.json'
|
cache_path = Path(os.path.expanduser("~")) / ".g4f" / "cache"
|
||||||
|
cache_file = cache_path / 'blackbox.json'
|
||||||
|
|
||||||
if not force_refresh and cache_file.exists():
|
if not force_refresh and cache_file.exists():
|
||||||
try:
|
try:
|
||||||
|
|
@ -517,7 +368,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
if is_valid_context(context):
|
if is_valid_context(context):
|
||||||
validated_value = match.group(1)
|
validated_value = match.group(1)
|
||||||
|
|
||||||
cache_file.parent.mkdir(exist_ok=True)
|
cache_file.parent.mkdir(exist_ok=True, parents=True)
|
||||||
try:
|
try:
|
||||||
with open(cache_file, 'w') as f:
|
with open(cache_file, 'w') as f:
|
||||||
json.dump({'validated_value': validated_value}, f)
|
json.dump({'validated_value': validated_value}, f)
|
||||||
|
|
@ -592,41 +443,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"title": ""
|
"title": ""
|
||||||
}
|
}
|
||||||
|
|
||||||
# Get session data - try HAR files first, fall back to generated session
|
# Generate a new email for each request instead of using the one stored in conversation
|
||||||
session_data = cls._find_session_in_har_files() or cls.generate_session()
|
chars = string.ascii_lowercase + string.digits
|
||||||
|
random_team = ''.join(random.choice(chars) for _ in range(8))
|
||||||
|
request_email = f"{random_team}@blackbox.ai"
|
||||||
|
|
||||||
# Log which session type is being used
|
# Generate a session with the new email
|
||||||
demo_session = cls.generate_session()
|
session_data = cls.generate_session(request_email)
|
||||||
is_demo = (session_data['user'].get('email') == demo_session['user'].get('email'))
|
debug.log(f"Blackbox: Using generated session with email {request_email}")
|
||||||
|
|
||||||
if is_demo:
|
|
||||||
debug.log("Blackbox: Using generated demo session")
|
|
||||||
# For demo account, set default values without checking subscription
|
|
||||||
subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
|
||||||
# Check if the requested model is in demo_models
|
|
||||||
is_premium = model in cls.demo_models
|
|
||||||
if not is_premium:
|
|
||||||
debug.log(f"Blackbox: Model {model} not available in demo account, falling back to default model")
|
|
||||||
model = cls.default_model
|
|
||||||
is_premium = True
|
|
||||||
else:
|
|
||||||
debug.log(f"Blackbox: Using session from HAR file (email: {session_data['user'].get('email', 'unknown')})")
|
|
||||||
# Only check subscription for non-demo accounts
|
|
||||||
subscription_status = {"status": "FREE", "customerId": None, "isTrialSubscription": False}
|
|
||||||
if session_data.get('user', {}).get('email'):
|
|
||||||
subscription_status = await cls.check_subscription(session_data['user']['email'])
|
|
||||||
debug.log(f"Blackbox: Subscription status for {session_data['user']['email']}: {subscription_status['status']}")
|
|
||||||
|
|
||||||
# Determine if user has premium access based on subscription status
|
|
||||||
if subscription_status['status'] == "PREMIUM":
|
|
||||||
is_premium = True
|
|
||||||
else:
|
|
||||||
# For free accounts, check if the requested model is in fallback_models
|
|
||||||
is_premium = model in cls.fallback_models
|
|
||||||
if not is_premium:
|
|
||||||
debug.log(f"Blackbox: Model {model} not available in free account, falling back to default model")
|
|
||||||
model = cls.default_model
|
|
||||||
is_premium = True
|
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
"messages": current_messages,
|
"messages": current_messages,
|
||||||
|
|
@ -651,26 +475,28 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"mobileClient": False,
|
"mobileClient": False,
|
||||||
"userSelectedModel": model if model in cls.userSelectedModel else None,
|
"userSelectedModel": model if model in cls.userSelectedModel else None,
|
||||||
"validated": conversation.validated_value,
|
"validated": conversation.validated_value,
|
||||||
"imageGenerationMode": model == cls.default_image_model,
|
"imageGenerationMode": False,
|
||||||
"webSearchModePrompt": False,
|
"webSearchModePrompt": False,
|
||||||
"deepSearchMode": False,
|
"deepSearchMode": False,
|
||||||
|
"designerMode": False,
|
||||||
"domains": None,
|
"domains": None,
|
||||||
"vscodeClient": False,
|
"vscodeClient": False,
|
||||||
"codeInterpreterMode": False,
|
"codeInterpreterMode": False,
|
||||||
"customProfile": {
|
"customProfile": {
|
||||||
|
"additionalInfo": "",
|
||||||
|
"enableNewChats": False,
|
||||||
"name": "",
|
"name": "",
|
||||||
"occupation": "",
|
"occupation": "",
|
||||||
"traits": [],
|
"traits": []
|
||||||
"additionalInfo": "",
|
|
||||||
"enableNewChats": False
|
|
||||||
},
|
},
|
||||||
"session": session_data,
|
"session": session_data,
|
||||||
"isPremium": is_premium,
|
"isPremium": True,
|
||||||
"subscriptionCache": {
|
"subscriptionCache": {
|
||||||
"status": subscription_status['status'],
|
"expiryTimestamp": None,
|
||||||
"customerId": subscription_status['customerId'],
|
"isTrialSubscription": False,
|
||||||
"isTrialSubscription": subscription_status['isTrialSubscription'],
|
"lastChecked": int(datetime.now().timestamp() * 1000),
|
||||||
"lastChecked": int(datetime.now().timestamp() * 1000)
|
"status": "FREE",
|
||||||
|
"customerId": None
|
||||||
},
|
},
|
||||||
"beastMode": False,
|
"beastMode": False,
|
||||||
"reasoningMode": False,
|
"reasoningMode": False,
|
||||||
|
|
@ -689,24 +515,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
if "You have reached your request limit for the hour" in chunk_text:
|
if "You have reached your request limit for the hour" in chunk_text:
|
||||||
raise RateLimitError(chunk_text)
|
raise RateLimitError(chunk_text)
|
||||||
full_response.append(chunk_text)
|
full_response.append(chunk_text)
|
||||||
# Only yield chunks for non-image models
|
|
||||||
if model != cls.default_image_model:
|
|
||||||
yield chunk_text
|
yield chunk_text
|
||||||
|
|
||||||
full_response_text = ''.join(full_response)
|
full_response_text = ''.join(full_response)
|
||||||
|
|
||||||
# For image models, check for image markdown
|
# Handle conversation history
|
||||||
if model == cls.default_image_model:
|
|
||||||
image_url_match = re.search(r'!\[.*?\]\((.*?)\)', full_response_text)
|
|
||||||
if image_url_match:
|
|
||||||
image_url = image_url_match.group(1)
|
|
||||||
yield ImageResponse(urls=[image_url], alt=format_image_prompt(messages, prompt))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Handle conversation history once, in one place
|
|
||||||
if return_conversation:
|
if return_conversation:
|
||||||
conversation.message_history.append({"role": "assistant", "content": full_response_text})
|
conversation.message_history.append({"role": "assistant", "content": full_response_text})
|
||||||
yield conversation
|
yield conversation
|
||||||
# For image models that didn't produce an image, fall back to text response
|
|
||||||
elif model == cls.default_image_model:
|
|
||||||
yield full_response_text
|
|
||||||
|
|
|
||||||
|
|
@ -35,9 +35,8 @@ class Chatai(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
||||||
default_model = 'gpt-4o-mini-2024-07-18'
|
default_model = 'gpt-4o-mini-2024-07-18'
|
||||||
models = ['gpt-4o-mini-2024-07-18'] #
|
|
||||||
|
|
||||||
model_aliases = {"gpt-4o-mini":default_model}
|
model_aliases = {"gpt-4o-mini":default_model}
|
||||||
|
models = list(model_aliases.keys())
|
||||||
|
|
||||||
# --- ProviderModelMixin Methods ---
|
# --- ProviderModelMixin Methods ---
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
|
||||||
|
|
@ -1,316 +1,77 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import time
|
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
|
||||||
import json
|
import json
|
||||||
import asyncio
|
|
||||||
import random
|
|
||||||
import base64
|
import base64
|
||||||
import hashlib
|
import time
|
||||||
from yarl import URL
|
import random
|
||||||
|
import asyncio
|
||||||
|
from datetime import datetime
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages, Cookies
|
from ..typing import AsyncResult, Messages
|
||||||
from ..requests.raise_for_status import raise_for_status
|
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .helper import format_prompt, get_last_user_message
|
from .helper import format_prompt, get_last_user_message
|
||||||
from ..providers.response import FinishReason, JsonConversation
|
from ..providers.response import FinishReason, JsonConversation
|
||||||
from ..errors import ModelNotSupportedError, ResponseStatusError, RateLimitError, TimeoutError, ConversationLimitError
|
|
||||||
|
|
||||||
try:
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
has_bs4 = True
|
|
||||||
except ImportError:
|
|
||||||
has_bs4 = False
|
|
||||||
|
|
||||||
|
|
||||||
class DuckDuckGoSearchException(Exception):
|
|
||||||
"""Base exception class for duckduckgo_search."""
|
|
||||||
|
|
||||||
class DuckDuckGoChallengeError(ResponseStatusError):
|
|
||||||
"""Raised when DuckDuckGo presents a challenge that needs to be solved."""
|
|
||||||
|
|
||||||
class Conversation(JsonConversation):
|
class Conversation(JsonConversation):
|
||||||
vqd: str = None
|
"""Conversation class for DDG provider.
|
||||||
vqd_hash_1: str = None
|
|
||||||
|
Note: DDG doesn't actually support conversation history through its API,
|
||||||
|
so we simulate it by including the history in the user message.
|
||||||
|
"""
|
||||||
message_history: Messages = []
|
message_history: Messages = []
|
||||||
cookies: dict = {}
|
|
||||||
fe_version: str = None
|
|
||||||
|
|
||||||
def __init__(self, model: str):
|
def __init__(self, model: str):
|
||||||
self.model = model
|
self.model = model
|
||||||
|
self.message_history = []
|
||||||
|
|
||||||
|
|
||||||
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
|
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
label = "DuckDuckGo AI Chat"
|
label = "DuckDuckGo AI Chat"
|
||||||
url = "https://duckduckgo.com/aichat"
|
url = "https://duckduckgo.com"
|
||||||
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
|
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
|
||||||
status_url = "https://duckduckgo.com/duckchat/v1/status"
|
status_url = "https://duckduckgo.com/duckchat/v1/status"
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
|
needs_auth = False
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
||||||
default_model = "gpt-4o-mini"
|
default_model = "gpt-4o-mini"
|
||||||
|
model_aliases = {
|
||||||
# Model mapping from user-friendly names to API model names
|
|
||||||
_chat_models = {
|
|
||||||
"gpt-4": default_model,
|
"gpt-4": default_model,
|
||||||
"gpt-4o-mini": default_model,
|
"gpt-4o": default_model,
|
||||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||||
"claude-3-haiku": "claude-3-haiku-20240307",
|
"claude-3-haiku": "claude-3-haiku-20240307",
|
||||||
"o3-mini": "o3-mini",
|
"mistral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
||||||
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
|
||||||
}
|
}
|
||||||
|
models = [default_model, "o3-mini"] + list(model_aliases.keys())
|
||||||
# Available models (user-friendly names)
|
|
||||||
models = list(_chat_models.keys())
|
|
||||||
|
|
||||||
last_request_time = 0
|
|
||||||
max_retries = 3
|
|
||||||
base_delay = 2
|
|
||||||
|
|
||||||
# Class variable to store the x-fe-version across instances
|
|
||||||
_chat_xfe = ""
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def sha256_base64(text: str) -> str:
|
def generate_fe_signals():
|
||||||
"""Return the base64 encoding of the SHA256 digest of the text."""
|
"""Generate a fake x-fe-signals header value"""
|
||||||
sha256_hash = hashlib.sha256(text.encode("utf-8")).digest()
|
current_time = int(time.time() * 1000)
|
||||||
return base64.b64encode(sha256_hash).decode()
|
|
||||||
|
signals_data = {
|
||||||
|
"start": current_time - 35000,
|
||||||
|
"events": [
|
||||||
|
{"name": "onboarding_impression_1", "delta": 383},
|
||||||
|
{"name": "onboarding_impression_2", "delta": 6004},
|
||||||
|
{"name": "onboarding_finish", "delta": 9690},
|
||||||
|
{"name": "startNewChat", "delta": 10082},
|
||||||
|
{"name": "initSwitchModel", "delta": 16586}
|
||||||
|
],
|
||||||
|
"end": 35163
|
||||||
|
}
|
||||||
|
|
||||||
|
signals_json = json.dumps(signals_data)
|
||||||
|
return base64.b64encode(signals_json.encode()).decode()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse_dom_fingerprint(js_text: str) -> str:
|
def generate_fe_version():
|
||||||
if not has_bs4:
|
"""Generate a fake x-fe-version header value"""
|
||||||
# Fallback if BeautifulSoup is not available
|
return "serp_20250510_052906_ET-ed4f51dc2e106020bc4b"
|
||||||
return "1000"
|
|
||||||
|
|
||||||
try:
|
|
||||||
html_snippet = js_text.split("e.innerHTML = '")[1].split("';")[0]
|
|
||||||
offset_value = js_text.split("return String(")[1].split(" ")[0]
|
|
||||||
soup = BeautifulSoup(html_snippet, "html.parser")
|
|
||||||
corrected_inner_html = soup.body.decode_contents()
|
|
||||||
inner_html_length = len(corrected_inner_html)
|
|
||||||
fingerprint = int(offset_value) + inner_html_length
|
|
||||||
return str(fingerprint)
|
|
||||||
except Exception:
|
|
||||||
# Return a fallback value if parsing fails
|
|
||||||
return "1000"
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_server_hashes(js_text: str) -> list:
|
|
||||||
try:
|
|
||||||
return js_text.split('server_hashes: ["', maxsplit=1)[1].split('"]', maxsplit=1)[0].split('","')
|
|
||||||
except Exception:
|
|
||||||
# Return a fallback value if parsing fails
|
|
||||||
return ["1", "2"]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def build_x_vqd_hash_1(cls, vqd_hash_1: str, headers: dict) -> str:
|
|
||||||
"""Build the x-vqd-hash-1 header value."""
|
|
||||||
try:
|
|
||||||
# If we received a valid base64 string, try to decode it
|
|
||||||
if vqd_hash_1 and len(vqd_hash_1) > 20:
|
|
||||||
try:
|
|
||||||
# Try to decode and parse as JSON first
|
|
||||||
decoded_json = json.loads(base64.b64decode(vqd_hash_1).decode())
|
|
||||||
# If it's already a complete structure with meta, return it as is
|
|
||||||
if isinstance(decoded_json, dict) and "meta" in decoded_json:
|
|
||||||
return vqd_hash_1
|
|
||||||
|
|
||||||
# Otherwise, extract what we can from it
|
|
||||||
if isinstance(decoded_json, dict) and "server_hashes" in decoded_json:
|
|
||||||
server_hashes = decoded_json.get("server_hashes", ["1", "2"])
|
|
||||||
else:
|
|
||||||
# Fall back to parsing from string
|
|
||||||
decoded = base64.b64decode(vqd_hash_1).decode()
|
|
||||||
server_hashes = cls.parse_server_hashes(decoded)
|
|
||||||
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
||||||
# If it's not valid JSON, try to parse it as a string
|
|
||||||
decoded = base64.b64decode(vqd_hash_1).decode()
|
|
||||||
server_hashes = cls.parse_server_hashes(decoded)
|
|
||||||
else:
|
|
||||||
# Default server hashes if we can't extract them
|
|
||||||
server_hashes = ["1", "2"]
|
|
||||||
|
|
||||||
# Generate fingerprints
|
|
||||||
dom_fingerprint = "1000" # Default value
|
|
||||||
ua_fingerprint = headers.get("User-Agent", "") + headers.get("sec-ch-ua", "")
|
|
||||||
ua_hash = cls.sha256_base64(ua_fingerprint)
|
|
||||||
dom_hash = cls.sha256_base64(dom_fingerprint)
|
|
||||||
|
|
||||||
# Create a challenge ID (random hex string)
|
|
||||||
challenge_id = ''.join(random.choice('0123456789abcdef') for _ in range(40)) + 'h8jbt'
|
|
||||||
|
|
||||||
# Build the complete structure including meta
|
|
||||||
final_result = {
|
|
||||||
"server_hashes": server_hashes,
|
|
||||||
"client_hashes": [ua_hash, dom_hash],
|
|
||||||
"signals": {},
|
|
||||||
"meta": {
|
|
||||||
"v": "1",
|
|
||||||
"challenge_id": challenge_id,
|
|
||||||
"origin": "https://duckduckgo.com",
|
|
||||||
"stack": "Error\nat ke (https://duckduckgo.com/dist/wpm.chat.js:1:29526)\nat async dispatchServiceInitialVQD (https://duckduckgo.com/dist/wpm.chat.js:1:45076)"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
base64_final_result = base64.b64encode(json.dumps(final_result).encode()).decode()
|
|
||||||
return base64_final_result
|
|
||||||
except Exception as e:
|
|
||||||
# If anything fails, return an empty string
|
|
||||||
return ""
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def validate_model(cls, model: str) -> str:
|
|
||||||
"""Validates and returns the correct model name for the API"""
|
|
||||||
if not model:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
# Check aliases first
|
|
||||||
if model in cls.model_aliases:
|
|
||||||
model = cls.model_aliases[model]
|
|
||||||
|
|
||||||
# Check if it's a valid model name
|
|
||||||
if model not in cls.models:
|
|
||||||
raise ModelNotSupportedError(f"Model {model} not supported. Available models: {cls.models}")
|
|
||||||
|
|
||||||
return model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def sleep(cls, multiplier=1.0):
|
|
||||||
"""Implements rate limiting between requests"""
|
|
||||||
now = time.time()
|
|
||||||
if cls.last_request_time > 0:
|
|
||||||
delay = max(0.0, 1.5 - (now - cls.last_request_time)) * multiplier
|
|
||||||
if delay > 0:
|
|
||||||
await asyncio.sleep(delay)
|
|
||||||
cls.last_request_time = time.time()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def get_default_cookies(cls, session: ClientSession) -> dict:
|
|
||||||
"""Obtains default cookies needed for API requests"""
|
|
||||||
try:
|
|
||||||
await cls.sleep()
|
|
||||||
# Make initial request to get cookies
|
|
||||||
async with session.get(cls.url) as response:
|
|
||||||
# Set the required cookies
|
|
||||||
cookies = {}
|
|
||||||
cookies_dict = {'dcs': '1', 'dcm': '3'}
|
|
||||||
|
|
||||||
# Add any cookies from the response
|
|
||||||
for cookie in response.cookies.values():
|
|
||||||
cookies[cookie.key] = cookie.value
|
|
||||||
|
|
||||||
# Ensure our required cookies are set
|
|
||||||
for name, value in cookies_dict.items():
|
|
||||||
cookies[name] = value
|
|
||||||
url_obj = URL(cls.url)
|
|
||||||
session.cookie_jar.update_cookies({name: value}, url_obj)
|
|
||||||
|
|
||||||
# Make a second request to the status endpoint to get any additional cookies
|
|
||||||
headers = {
|
|
||||||
"accept": "text/event-stream",
|
|
||||||
"accept-language": "en",
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
|
|
||||||
"origin": "https://duckduckgo.com",
|
|
||||||
"referer": "https://duckduckgo.com/",
|
|
||||||
}
|
|
||||||
|
|
||||||
await cls.sleep()
|
|
||||||
async with session.get(cls.status_url, headers=headers) as status_response:
|
|
||||||
# Add any cookies from the status response
|
|
||||||
for cookie in status_response.cookies.values():
|
|
||||||
cookies[cookie.key] = cookie.value
|
|
||||||
url_obj = URL(cls.url)
|
|
||||||
session.cookie_jar.update_cookies({cookie.key: cookie.value}, url_obj)
|
|
||||||
|
|
||||||
return cookies
|
|
||||||
except Exception as e:
|
|
||||||
# Return at least the required cookies on error
|
|
||||||
cookies = {'dcs': '1', 'dcm': '3'}
|
|
||||||
url_obj = URL(cls.url)
|
|
||||||
for name, value in cookies.items():
|
|
||||||
session.cookie_jar.update_cookies({name: value}, url_obj)
|
|
||||||
return cookies
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def fetch_fe_version(cls, session: ClientSession) -> str:
|
|
||||||
"""Fetches the fe-version from the initial page load."""
|
|
||||||
if cls._chat_xfe:
|
|
||||||
return cls._chat_xfe
|
|
||||||
|
|
||||||
try:
|
|
||||||
url = "https://duckduckgo.com/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1"
|
|
||||||
await cls.sleep()
|
|
||||||
async with session.get(url) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
content = await response.text()
|
|
||||||
|
|
||||||
# Extract x-fe-version components
|
|
||||||
try:
|
|
||||||
# Try to extract the version components
|
|
||||||
xfe1 = content.split('__DDG_BE_VERSION__="', 1)[1].split('"', 1)[0]
|
|
||||||
xfe2 = content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
|
|
||||||
|
|
||||||
# Format it like "serp_YYYYMMDD_HHMMSS_ET-hash"
|
|
||||||
from datetime import datetime
|
|
||||||
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
||||||
cls._chat_xfe = f"serp_{current_date}_ET-{xfe2}"
|
|
||||||
|
|
||||||
return cls._chat_xfe
|
|
||||||
except Exception:
|
|
||||||
# Fallback to a default format if extraction fails
|
|
||||||
from datetime import datetime
|
|
||||||
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
||||||
cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
|
|
||||||
return cls._chat_xfe
|
|
||||||
except Exception:
|
|
||||||
# Fallback to a default format if request fails
|
|
||||||
from datetime import datetime
|
|
||||||
current_date = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
||||||
cls._chat_xfe = f"serp_{current_date}_ET-78c2e87e3d286691cc21"
|
|
||||||
return cls._chat_xfe
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def fetch_vqd_and_hash(cls, session: ClientSession, retry_count: int = 0) -> tuple[str, str]:
|
|
||||||
"""Fetches the required VQD token and hash for the chat session with retries."""
|
|
||||||
headers = {
|
|
||||||
"accept": "text/event-stream",
|
|
||||||
"accept-language": "en",
|
|
||||||
"cache-control": "no-cache",
|
|
||||||
"pragma": "no-cache",
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
|
|
||||||
"origin": "https://duckduckgo.com",
|
|
||||||
"referer": "https://duckduckgo.com/",
|
|
||||||
"x-vqd-accept": "1",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Make sure we have cookies first
|
|
||||||
if len(session.cookie_jar) == 0:
|
|
||||||
await cls.get_default_cookies(session)
|
|
||||||
|
|
||||||
try:
|
|
||||||
await cls.sleep(multiplier=1.0 + retry_count * 0.5)
|
|
||||||
async with session.get(cls.status_url, headers=headers) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
|
|
||||||
vqd = response.headers.get("x-vqd-4", "")
|
|
||||||
vqd_hash_1 = response.headers.get("x-vqd-hash-1", "")
|
|
||||||
|
|
||||||
if vqd:
|
|
||||||
# Return the fetched vqd and vqd_hash_1
|
|
||||||
return vqd, vqd_hash_1
|
|
||||||
|
|
||||||
response_text = await response.text()
|
|
||||||
raise RuntimeError(f"Failed to fetch VQD token and hash: {response.status} {response_text}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if retry_count < cls.max_retries:
|
|
||||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
return await cls.fetch_vqd_and_hash(session, retry_count + 1)
|
|
||||||
else:
|
|
||||||
raise RuntimeError(f"Failed to fetch VQD token and hash after {cls.max_retries} attempts: {str(e)}")
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
|
@ -318,229 +79,221 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 60,
|
|
||||||
cookies: Cookies = None,
|
|
||||||
conversation: Conversation = None,
|
conversation: Conversation = None,
|
||||||
return_conversation: bool = True,
|
return_conversation: bool = True,
|
||||||
|
retry_count: int = 0,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
model = cls.validate_model(model)
|
model = cls.get_model(model)
|
||||||
retry_count = 0
|
|
||||||
|
|
||||||
while retry_count <= cls.max_retries:
|
# Initialize conversation if not provided
|
||||||
try:
|
|
||||||
session_timeout = ClientTimeout(total=timeout)
|
|
||||||
async with ClientSession(timeout=session_timeout, cookies=cookies) as session:
|
|
||||||
# Step 1: Ensure we have the fe_version
|
|
||||||
if not cls._chat_xfe:
|
|
||||||
cls._chat_xfe = await cls.fetch_fe_version(session)
|
|
||||||
|
|
||||||
# Step 2: Initialize or update conversation
|
|
||||||
if conversation is None:
|
if conversation is None:
|
||||||
# Get initial cookies if not provided
|
|
||||||
if not cookies:
|
|
||||||
await cls.get_default_cookies(session)
|
|
||||||
|
|
||||||
# Create a new conversation
|
|
||||||
conversation = Conversation(model)
|
conversation = Conversation(model)
|
||||||
conversation.fe_version = cls._chat_xfe
|
# Initialize message history from the provided messages
|
||||||
|
conversation.message_history = messages.copy()
|
||||||
# Step 3: Get VQD tokens
|
|
||||||
vqd, vqd_hash_1 = await cls.fetch_vqd_and_hash(session)
|
|
||||||
conversation.vqd = vqd
|
|
||||||
conversation.vqd_hash_1 = vqd_hash_1
|
|
||||||
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
|
|
||||||
else:
|
else:
|
||||||
# Update existing conversation with new message
|
# Update message history with the last user message
|
||||||
last_message = get_last_user_message(messages.copy())
|
last_message = None
|
||||||
conversation.message_history.append({"role": "user", "content": last_message})
|
for msg in reversed(messages):
|
||||||
|
if msg["role"] == "user":
|
||||||
|
last_message = msg
|
||||||
|
break
|
||||||
|
|
||||||
# Step 4: Prepare headers with proper x-vqd-hash-1
|
if last_message and last_message not in conversation.message_history:
|
||||||
headers = {
|
conversation.message_history.append(last_message)
|
||||||
"accept": "text/event-stream",
|
|
||||||
"accept-language": "en",
|
# Base headers for all requests
|
||||||
"cache-control": "no-cache",
|
base_headers = {
|
||||||
"content-type": "application/json",
|
"accept-language": "en-US,en;q=0.9",
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
|
"dnt": "1",
|
||||||
"origin": "https://duckduckgo.com",
|
"origin": "https://duckduckgo.com",
|
||||||
"referer": "https://duckduckgo.com/",
|
"referer": "https://duckduckgo.com/",
|
||||||
"pragma": "no-cache",
|
"sec-ch-ua": '"Chromium";v="135", "Not-A.Brand";v="8"',
|
||||||
"priority": "u=1, i",
|
|
||||||
"sec-ch-ua": '"Not:A-Brand";v="24", "Chromium";v="134"',
|
|
||||||
"sec-ch-ua-mobile": "?0",
|
"sec-ch-ua-mobile": "?0",
|
||||||
"sec-ch-ua-platform": '"Linux"',
|
"sec-ch-ua-platform": '"Linux"',
|
||||||
"sec-fetch-dest": "empty",
|
"sec-fetch-dest": "empty",
|
||||||
"sec-fetch-mode": "cors",
|
"sec-fetch-mode": "cors",
|
||||||
"sec-fetch-site": "same-origin",
|
"sec-fetch-site": "same-origin",
|
||||||
"x-fe-version": conversation.fe_version or cls._chat_xfe,
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
|
||||||
"x-vqd-4": conversation.vqd,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# For the first request, send an empty x-vqd-hash-1 header
|
cookies = {'dcs': '1', 'dcm': '3'}
|
||||||
# This matches the behavior in the duckduckgo_search module
|
|
||||||
headers["x-vqd-hash-1"] = ""
|
|
||||||
|
|
||||||
# Step 5: Prepare the request data
|
# Format the conversation history as a single prompt using format_prompt
|
||||||
# Convert the user-friendly model name to the API model name
|
if len(conversation.message_history) > 1:
|
||||||
api_model = cls._chat_models.get(model, model)
|
# If we have conversation history, format it as a single prompt
|
||||||
|
formatted_prompt = format_prompt(conversation.message_history)
|
||||||
data = {
|
|
||||||
"model": api_model,
|
|
||||||
"messages": conversation.message_history,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Step 6: Send the request
|
|
||||||
await cls.sleep(multiplier=1.0 + retry_count * 0.5)
|
|
||||||
async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
|
|
||||||
# Handle 429 and 418 errors specifically
|
|
||||||
if response.status == 429:
|
|
||||||
response_text = await response.text()
|
|
||||||
|
|
||||||
if retry_count < cls.max_retries:
|
|
||||||
retry_count += 1
|
|
||||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
|
|
||||||
# Get fresh tokens and cookies
|
|
||||||
cookies = await cls.get_default_cookies(session)
|
|
||||||
continue
|
|
||||||
else:
|
else:
|
||||||
raise RateLimitError(f"Rate limited after {cls.max_retries} retries")
|
# If we don't have conversation history, just use the last user message
|
||||||
elif response.status == 418:
|
formatted_prompt = get_last_user_message(messages)
|
||||||
# Check if it's a challenge error
|
|
||||||
|
# Prepare the request data
|
||||||
|
data = {
|
||||||
|
"model": model,
|
||||||
|
"messages": [{"role": "user", "content": formatted_prompt}],
|
||||||
|
"canUseTools": False
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create a new session for each request
|
||||||
|
async with ClientSession(cookies=cookies) as session:
|
||||||
|
# Step 1: Visit the main page to get initial cookies
|
||||||
|
main_headers = base_headers.copy()
|
||||||
|
main_headers.update({
|
||||||
|
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||||
|
"priority": "u=0, i",
|
||||||
|
"upgrade-insecure-requests": "1",
|
||||||
|
})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response_text = await response.text()
|
async with session.get(f"{cls.url}/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1",
|
||||||
|
headers=main_headers,
|
||||||
|
proxy=proxy) as main_response:
|
||||||
|
main_response.raise_for_status()
|
||||||
|
|
||||||
|
# Extract fe_version from the page
|
||||||
|
page_content = await main_response.text()
|
||||||
|
fe_version = cls.generate_fe_version()
|
||||||
try:
|
try:
|
||||||
response_json = json.loads(response_text)
|
xfe1 = page_content.split('__DDG_BE_VERSION__="', 1)[1].split('"', 1)[0]
|
||||||
|
xfe2 = page_content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
|
||||||
# Extract challenge data if available
|
fe_version = f"serp_20250510_052906_ET-{xfe2}"
|
||||||
challenge_data = None
|
except Exception:
|
||||||
if response_json.get("type") == "ERR_CHALLENGE" and "cd" in response_json:
|
|
||||||
challenge_data = response_json["cd"]
|
|
||||||
|
|
||||||
if retry_count < cls.max_retries:
|
|
||||||
retry_count += 1
|
|
||||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
|
|
||||||
# Reset tokens and try again with fresh session
|
|
||||||
conversation = None
|
|
||||||
cls._chat_xfe = ""
|
|
||||||
|
|
||||||
# Get fresh cookies
|
|
||||||
cookies = await cls.get_default_cookies(session)
|
|
||||||
|
|
||||||
# If we have challenge data, try to use it
|
|
||||||
if challenge_data and isinstance(challenge_data, dict):
|
|
||||||
# Extract any useful information from challenge data
|
|
||||||
# This could be used to build a better response in the future
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
continue
|
# Step 2: Get the VQD token from the status endpoint
|
||||||
else:
|
status_headers = base_headers.copy()
|
||||||
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
|
status_headers.update({
|
||||||
except json.JSONDecodeError:
|
"accept": "*/*",
|
||||||
# If we can't parse the JSON, assume it's a challenge error anyway
|
"cache-control": "no-store",
|
||||||
if retry_count < cls.max_retries:
|
"priority": "u=1, i",
|
||||||
retry_count += 1
|
"x-vqd-accept": "1",
|
||||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
})
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
|
|
||||||
# Reset tokens and try again with fresh session
|
async with session.get(cls.status_url,
|
||||||
conversation = None
|
headers=status_headers,
|
||||||
cls._chat_xfe = ""
|
proxy=proxy) as status_response:
|
||||||
cookies = await cls.get_default_cookies(session)
|
status_response.raise_for_status()
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries")
|
|
||||||
except Exception as e:
|
|
||||||
# If any other error occurs during handling, still try to recover
|
|
||||||
if retry_count < cls.max_retries:
|
|
||||||
retry_count += 1
|
|
||||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
|
|
||||||
# Reset tokens and try again with fresh session
|
# Get VQD token from headers
|
||||||
conversation = None
|
vqd = status_response.headers.get("x-vqd-4", "")
|
||||||
cls._chat_xfe = ""
|
|
||||||
cookies = await cls.get_default_cookies(session)
|
if not vqd:
|
||||||
continue
|
# If we couldn't get a VQD token, try to generate one
|
||||||
else:
|
vqd = f"4-{random.randint(10**29, 10**30 - 1)}"
|
||||||
raise DuckDuckGoChallengeError(f"Challenge error after {cls.max_retries} retries: {str(e)}")
|
|
||||||
|
# Step 3: Send the chat request
|
||||||
|
chat_headers = base_headers.copy()
|
||||||
|
chat_headers.update({
|
||||||
|
"accept": "text/event-stream",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"priority": "u=1, i",
|
||||||
|
"x-fe-signals": cls.generate_fe_signals(),
|
||||||
|
"x-fe-version": fe_version,
|
||||||
|
"x-vqd-4": vqd,
|
||||||
|
})
|
||||||
|
|
||||||
|
async with session.post(cls.api_endpoint,
|
||||||
|
json=data,
|
||||||
|
headers=chat_headers,
|
||||||
|
proxy=proxy) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
error_text = await response.text()
|
||||||
|
|
||||||
|
# If we get an ERR_INVALID_VQD error and haven't retried too many times, try again
|
||||||
|
if "ERR_INVALID_VQD" in error_text and retry_count < 3:
|
||||||
|
# Wait a bit before retrying
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
# Try again with a new session
|
||||||
|
async for chunk in cls.create_async_generator(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
proxy=proxy,
|
||||||
|
conversation=conversation,
|
||||||
|
return_conversation=return_conversation,
|
||||||
|
retry_count=retry_count + 1,
|
||||||
|
**kwargs
|
||||||
|
):
|
||||||
|
yield chunk
|
||||||
|
return
|
||||||
|
|
||||||
|
yield f"Error: HTTP {response.status} - {error_text}"
|
||||||
|
return
|
||||||
|
|
||||||
# For other status codes, use the standard error handler
|
|
||||||
await raise_for_status(response)
|
|
||||||
reason = None
|
|
||||||
full_message = ""
|
full_message = ""
|
||||||
|
|
||||||
# Step 7: Process the streaming response
|
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
line = line.decode("utf-8").strip()
|
line_text = line.decode("utf-8").strip()
|
||||||
|
|
||||||
if line.startswith("data:"):
|
if line_text.startswith("data:"):
|
||||||
try:
|
data_content = line_text[5:].strip()
|
||||||
message = json.loads(line[5:].strip())
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if "action" in message and message["action"] == "error":
|
# Handle [DONE] marker
|
||||||
error_type = message.get("type", "")
|
if data_content == "[DONE]":
|
||||||
if message.get("status") == 429:
|
# Add the assistant's response to the conversation history
|
||||||
if error_type == "ERR_CONVERSATION_LIMIT":
|
if full_message:
|
||||||
raise ConversationLimitError(error_type)
|
conversation.message_history.append({
|
||||||
raise RateLimitError(error_type)
|
"role": "assistant",
|
||||||
elif message.get("status") == 418 and error_type == "ERR_CHALLENGE":
|
"content": full_message
|
||||||
# Handle challenge error by refreshing tokens and retrying
|
})
|
||||||
if retry_count < cls.max_retries:
|
|
||||||
# Don't raise here, let the outer exception handler retry
|
|
||||||
raise DuckDuckGoChallengeError(f"Challenge detected: {error_type}")
|
|
||||||
raise DuckDuckGoSearchException(error_type)
|
|
||||||
|
|
||||||
if "message" in message:
|
# Return the conversation if requested
|
||||||
if message["message"]:
|
|
||||||
yield message["message"]
|
|
||||||
full_message += message["message"]
|
|
||||||
reason = "length"
|
|
||||||
else:
|
|
||||||
reason = "stop"
|
|
||||||
|
|
||||||
# Step 8: Update conversation with response information
|
|
||||||
# Always update the VQD tokens from the response headers
|
|
||||||
conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd)
|
|
||||||
conversation.vqd_hash_1 = response.headers.get("x-vqd-hash-1", conversation.vqd_hash_1)
|
|
||||||
|
|
||||||
# Update cookies
|
|
||||||
conversation.cookies = {
|
|
||||||
n: c.value
|
|
||||||
for n, c in session.cookie_jar.filter_cookies(URL(cls.url)).items()
|
|
||||||
}
|
|
||||||
|
|
||||||
# If requested, return the updated conversation
|
|
||||||
if return_conversation:
|
if return_conversation:
|
||||||
conversation.message_history.append({"role": "assistant", "content": full_message})
|
|
||||||
yield conversation
|
yield conversation
|
||||||
|
|
||||||
if reason is not None:
|
yield FinishReason("stop")
|
||||||
yield FinishReason(reason)
|
|
||||||
|
|
||||||
# If we got here, the request was successful
|
|
||||||
break
|
break
|
||||||
|
|
||||||
except (RateLimitError, ResponseStatusError, DuckDuckGoChallengeError) as e:
|
try:
|
||||||
if ("429" in str(e) or isinstance(e, DuckDuckGoChallengeError)) and retry_count < cls.max_retries:
|
message_data = json.loads(data_content)
|
||||||
retry_count += 1
|
|
||||||
wait_time = cls.base_delay * (2 ** retry_count) * (1 + random.random())
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
|
|
||||||
# For challenge errors, refresh tokens and cookies
|
# Handle error responses
|
||||||
if isinstance(e, DuckDuckGoChallengeError):
|
if message_data.get("action") == "error":
|
||||||
# Reset conversation to force new token acquisition
|
error_type = message_data.get("type", "Unknown error")
|
||||||
conversation = None
|
|
||||||
# Clear class cache to force refresh
|
# If we get an ERR_INVALID_VQD error and haven't retried too many times, try again
|
||||||
cls._chat_xfe = ""
|
if error_type == "ERR_INVALID_VQD" and retry_count < 3:
|
||||||
else:
|
# Wait a bit before retrying
|
||||||
raise
|
await asyncio.sleep(1)
|
||||||
except asyncio.TimeoutError as e:
|
# Try again with a new session
|
||||||
raise TimeoutError(f"Request timed out: {str(e)}")
|
async for chunk in cls.create_async_generator(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
proxy=proxy,
|
||||||
|
conversation=conversation,
|
||||||
|
return_conversation=return_conversation,
|
||||||
|
retry_count=retry_count + 1,
|
||||||
|
**kwargs
|
||||||
|
):
|
||||||
|
yield chunk
|
||||||
|
return
|
||||||
|
|
||||||
|
yield f"Error: {error_type}"
|
||||||
|
break
|
||||||
|
|
||||||
|
# Extract message content
|
||||||
|
if "message" in message_data:
|
||||||
|
message_content = message_data.get("message", "")
|
||||||
|
if message_content:
|
||||||
|
yield message_content
|
||||||
|
full_message += message_content
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise
|
# If we get an exception and haven't retried too many times, try again
|
||||||
|
if retry_count < 3:
|
||||||
|
# Wait a bit before retrying
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
# Try again with a new session
|
||||||
|
async for chunk in cls.create_async_generator(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
proxy=proxy,
|
||||||
|
conversation=conversation,
|
||||||
|
return_conversation=return_conversation,
|
||||||
|
retry_count=retry_count + 1,
|
||||||
|
**kwargs
|
||||||
|
):
|
||||||
|
yield chunk
|
||||||
|
else:
|
||||||
|
yield f"Error: {str(e)}"
|
||||||
|
|
|
||||||
|
|
@ -8,12 +8,24 @@ class DeepInfraChat(OpenaiTemplate):
|
||||||
working = True
|
working = True
|
||||||
|
|
||||||
default_model = 'deepseek-ai/DeepSeek-V3'
|
default_model = 'deepseek-ai/DeepSeek-V3'
|
||||||
default_vision_model = 'openbmb/MiniCPM-Llama3-V-2_5'
|
default_vision_model = 'microsoft/Phi-4-multimodal-instruct'
|
||||||
vision_models = [default_vision_model, 'meta-llama/Llama-3.2-90B-Vision-Instruct']
|
vision_models = [default_vision_model, 'meta-llama/Llama-3.2-90B-Vision-Instruct']
|
||||||
models = [
|
models = [
|
||||||
|
'deepseek-ai/DeepSeek-Prover-V2-671B',
|
||||||
|
'Qwen/Qwen3-235B-A22B',
|
||||||
|
'Qwen/Qwen3-30B-A3B',
|
||||||
|
'Qwen/Qwen3-32B',
|
||||||
|
'Qwen/Qwen3-14B',
|
||||||
|
'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
|
||||||
|
'meta-llama/Llama-4-Scout-17B-16E-Instruct',
|
||||||
|
'microsoft/phi-4-reasoning-plus',
|
||||||
|
'microsoft/meta-llama/Llama-Guard-4-12B',
|
||||||
|
'Qwen/QwQ-32B',
|
||||||
|
'deepseek-ai/DeepSeek-V3-0324',
|
||||||
|
'google/gemma-3-27b-it',
|
||||||
|
'google/gemma-3-12b-it',
|
||||||
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
||||||
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
||||||
'meta-llama/Llama-3.3-70B-Instruct',
|
|
||||||
default_model,
|
default_model,
|
||||||
'mistralai/Mistral-Small-24B-Instruct-2501',
|
'mistralai/Mistral-Small-24B-Instruct-2501',
|
||||||
'deepseek-ai/DeepSeek-R1',
|
'deepseek-ai/DeepSeek-R1',
|
||||||
|
|
@ -23,37 +35,48 @@ class DeepInfraChat(OpenaiTemplate):
|
||||||
'microsoft/phi-4',
|
'microsoft/phi-4',
|
||||||
'microsoft/WizardLM-2-8x22B',
|
'microsoft/WizardLM-2-8x22B',
|
||||||
'Qwen/Qwen2.5-72B-Instruct',
|
'Qwen/Qwen2.5-72B-Instruct',
|
||||||
'01-ai/Yi-34B-Chat',
|
|
||||||
'Qwen/Qwen2-72B-Instruct',
|
'Qwen/Qwen2-72B-Instruct',
|
||||||
'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
|
'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
|
||||||
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
|
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
|
||||||
'databricks/dbrx-instruct',
|
|
||||||
'deepinfra/airoboros-70b',
|
'deepinfra/airoboros-70b',
|
||||||
'lizpreciatior/lzlv_70b_fp16_hf',
|
'lizpreciatior/lzlv_70b_fp16_hf',
|
||||||
'microsoft/WizardLM-2-7B',
|
'microsoft/WizardLM-2-7B',
|
||||||
'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
||||||
] + vision_models
|
] + vision_models
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
|
"deepseek-prover-v2-671b": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
||||||
|
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
|
||||||
|
"qwen-3-30b": "Qwen/Qwen3-30B-A3B",
|
||||||
|
"qwen-3-32b": "Qwen/Qwen3-32B",
|
||||||
|
"qwen-3-14b": "Qwen/Qwen3-14B",
|
||||||
|
"llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
"llama-4-maverick-17b": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
"llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||||
|
"llama-4-scout-17b": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||||
|
"phi-4-reasoning-plus": "microsoft/phi-4-reasoning-plus",
|
||||||
|
#"": "meta-llama/Llama-Guard-4-12B",
|
||||||
|
"qwq-32b": "Qwen/QwQ-32B",
|
||||||
|
"deepseek-v3": "deepseek-ai/DeepSeek-V3-0324",
|
||||||
|
"deepseek-v3-0324": "deepseek-ai/DeepSeek-V3-0324",
|
||||||
|
"gemma-3-27b": "google/gemma-3-27b-it",
|
||||||
|
"gemma-3-12b": "google/gemma-3-12b-it",
|
||||||
|
"phi-4-multimodal": "microsoft/Phi-4-multimodal-instruct",
|
||||||
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||||
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
||||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
||||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
|
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
|
||||||
"deepseek-v3": default_model,
|
"deepseek-v3": default_model,
|
||||||
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
||||||
"deepseek-r1": "deepseek-ai/DeepSeek-R1-Turbo",
|
"deepseek-r1-turbo": "deepseek-ai/DeepSeek-R1-Turbo",
|
||||||
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
|
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
|
||||||
"deepseek-r1-distill-llama": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
"deepseek-r1-distill-llama-70b": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
||||||
"deepseek-r1-distill-qwen": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
"deepseek-r1-distill-qwen-32b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||||
"phi-4": "microsoft/phi-4",
|
"phi-4": "microsoft/phi-4",
|
||||||
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
|
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
|
||||||
"yi-34b": "01-ai/Yi-34B-Chat",
|
|
||||||
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
|
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
|
||||||
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
||||||
"dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
|
"dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
|
||||||
"dbrx-instruct": "databricks/dbrx-instruct",
|
|
||||||
"airoboros-70b": "deepinfra/airoboros-70b",
|
"airoboros-70b": "deepinfra/airoboros-70b",
|
||||||
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
|
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
|
||||||
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
|
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
|
||||||
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
|
"mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1"
|
||||||
"minicpm-2.5": "openbmb/MiniCPM-Llama3-V-2_5",
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ from .helper import format_prompt
|
||||||
class Dynaspark(AsyncGeneratorProvider, ProviderModelMixin):
|
class Dynaspark(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://dynaspark.onrender.com"
|
url = "https://dynaspark.onrender.com"
|
||||||
login_url = None
|
login_url = None
|
||||||
api_endpoint = "https://dynaspark.onrender.com/generate_response"
|
api_endpoint = "https://dynaspark.onrender.com/dsai_fuck_u_spammer"
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
needs_auth = False
|
needs_auth = False
|
||||||
|
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
from aiohttp import ClientSession
|
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages
|
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
||||||
from ..requests.raise_for_status import raise_for_status
|
|
||||||
from .helper import format_prompt, get_system_prompt
|
|
||||||
|
|
||||||
class Goabror(AsyncGeneratorProvider, ProviderModelMixin):
|
|
||||||
url = "https://goabror.uz"
|
|
||||||
api_endpoint = "https://goabror.uz/api/gpt.php"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = 'gpt-4'
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
headers = {
|
|
||||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
||||||
'accept-language': 'en-US,en;q=0.9',
|
|
||||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
|
|
||||||
}
|
|
||||||
async with ClientSession(headers=headers) as session:
|
|
||||||
params = {
|
|
||||||
"user": format_prompt(messages, include_system=False),
|
|
||||||
"system": get_system_prompt(messages),
|
|
||||||
}
|
|
||||||
async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
text_response = await response.text()
|
|
||||||
try:
|
|
||||||
json_response = json.loads(text_response)
|
|
||||||
if "data" in json_response:
|
|
||||||
yield json_response["data"]
|
|
||||||
else:
|
|
||||||
yield text_response
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
yield text_response
|
|
||||||
|
|
@ -1,77 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages
|
|
||||||
from .template import OpenaiTemplate
|
|
||||||
|
|
||||||
class Jmuz(OpenaiTemplate):
|
|
||||||
url = "https://discord.gg/Ew6JzjA2NR"
|
|
||||||
api_base = "https://jmuz.me/gpt/api/v2"
|
|
||||||
api_key = "prod"
|
|
||||||
working = True
|
|
||||||
supports_system_message = False
|
|
||||||
|
|
||||||
default_model = "gpt-4o"
|
|
||||||
model_aliases = {
|
|
||||||
"qwq-32b": "qwq-32b-preview",
|
|
||||||
"gemini-1.5-flash": "gemini-flash",
|
|
||||||
"gemini-1.5-pro": "gemini-pro",
|
|
||||||
"gemini-2.0-flash-thinking": "gemini-thinking",
|
|
||||||
"deepseek-chat": "deepseek-v3",
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_models(cls, **kwargs):
|
|
||||||
if not cls.models:
|
|
||||||
cls.models = super().get_models(api_key=cls.api_key, api_base=cls.api_base)
|
|
||||||
return cls.models
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool = True,
|
|
||||||
api_key: str = None, # Remove api_key from kwargs
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
headers = {
|
|
||||||
"Authorization": f"Bearer {cls.api_key}",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"accept": "*/*",
|
|
||||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
|
|
||||||
}
|
|
||||||
|
|
||||||
started = False
|
|
||||||
buffer = ""
|
|
||||||
async for chunk in super().create_async_generator(
|
|
||||||
model=model,
|
|
||||||
messages=messages,
|
|
||||||
api_base=cls.api_base,
|
|
||||||
api_key=cls.api_key,
|
|
||||||
stream=cls.supports_stream,
|
|
||||||
headers=headers,
|
|
||||||
**kwargs
|
|
||||||
):
|
|
||||||
if isinstance(chunk, str):
|
|
||||||
buffer += chunk
|
|
||||||
if "Join for free".startswith(buffer) or buffer.startswith("Join for free"):
|
|
||||||
if buffer.endswith("\n"):
|
|
||||||
buffer = ""
|
|
||||||
continue
|
|
||||||
if "https://discord.gg/".startswith(buffer) or "https://discord.gg/" in buffer:
|
|
||||||
if "..." in buffer:
|
|
||||||
buffer = ""
|
|
||||||
continue
|
|
||||||
if "o1-preview".startswith(buffer) or buffer.startswith("o1-preview"):
|
|
||||||
if "\n" in buffer:
|
|
||||||
buffer = ""
|
|
||||||
continue
|
|
||||||
if not started:
|
|
||||||
buffer = buffer.lstrip()
|
|
||||||
if buffer:
|
|
||||||
started = True
|
|
||||||
yield buffer
|
|
||||||
buffer = ""
|
|
||||||
else:
|
|
||||||
yield chunk
|
|
||||||
|
|
@ -1,28 +1,190 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from .hf.HuggingChat import HuggingChat
|
import json
|
||||||
|
import re
|
||||||
|
import uuid
|
||||||
|
from aiohttp import ClientSession, FormData
|
||||||
|
|
||||||
class LambdaChat(HuggingChat):
|
from ..typing import AsyncResult, Messages
|
||||||
|
from ..requests import raise_for_status
|
||||||
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
from .helper import format_prompt, get_last_user_message
|
||||||
|
from ..providers.response import JsonConversation, TitleGeneration, Reasoning, FinishReason
|
||||||
|
|
||||||
|
class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
label = "Lambda Chat"
|
label = "Lambda Chat"
|
||||||
domain = "lambda.chat"
|
url = "https://lambda.chat"
|
||||||
url = f"https://{domain}"
|
conversation_url = f"{url}/conversation"
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
use_nodriver = False
|
|
||||||
needs_auth = False
|
|
||||||
|
|
||||||
default_model = "deepseek-llama3.3-70b"
|
default_model = "deepseek-llama3.3-70b"
|
||||||
reasoning_model = "deepseek-r1"
|
reasoning_model = "deepseek-r1"
|
||||||
image_models = []
|
models = [
|
||||||
fallback_models = [
|
|
||||||
default_model,
|
default_model,
|
||||||
reasoning_model,
|
reasoning_model,
|
||||||
"hermes-3-llama-3.1-405b-fp8",
|
"hermes-3-llama-3.1-405b-fp8",
|
||||||
|
"hermes3-405b-fp8-128k",
|
||||||
"llama3.1-nemotron-70b-instruct",
|
"llama3.1-nemotron-70b-instruct",
|
||||||
"lfm-40b",
|
"lfm-40b",
|
||||||
"llama3.3-70b-instruct-fp8"
|
"llama3.3-70b-instruct-fp8",
|
||||||
|
"qwen25-coder-32b-instruct"
|
||||||
]
|
]
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
|
"deepseek-v3": default_model,
|
||||||
"hermes-3": "hermes-3-llama-3.1-405b-fp8",
|
"hermes-3": "hermes-3-llama-3.1-405b-fp8",
|
||||||
|
"hermes-3-405b": "hermes3-405b-fp8-128k",
|
||||||
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
|
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
|
||||||
"llama-3.3-70b": "llama3.3-70b-instruct-fp8"
|
"qwen-2.5-coder-32b": "qwen25-coder-32b-instruct"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls, model: str, messages: Messages,
|
||||||
|
api_key: str = None,
|
||||||
|
proxy: str = None,
|
||||||
|
cookies: dict = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
model = cls.get_model(model)
|
||||||
|
headers = {
|
||||||
|
"Origin": cls.url,
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Accept-Language": "en-US,en;q=0.9",
|
||||||
|
"Referer": cls.url,
|
||||||
|
"Sec-Fetch-Dest": "empty",
|
||||||
|
"Sec-Fetch-Mode": "cors",
|
||||||
|
"Sec-Fetch-Site": "same-origin",
|
||||||
|
"Priority": "u=1, i",
|
||||||
|
"Pragma": "no-cache",
|
||||||
|
"Cache-Control": "no-cache"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize cookies if not provided
|
||||||
|
if cookies is None:
|
||||||
|
cookies = {
|
||||||
|
"hf-chat": str(uuid.uuid4()) # Generate a session ID
|
||||||
|
}
|
||||||
|
|
||||||
|
async with ClientSession(headers=headers, cookies=cookies) as session:
|
||||||
|
# Step 1: Create a new conversation
|
||||||
|
data = {"model": model}
|
||||||
|
async with session.post(cls.conversation_url, json=data, proxy=proxy) as response:
|
||||||
|
await raise_for_status(response)
|
||||||
|
conversation_response = await response.json()
|
||||||
|
conversation_id = conversation_response["conversationId"]
|
||||||
|
|
||||||
|
# Update cookies with any new ones from the response
|
||||||
|
for cookie_name, cookie in response.cookies.items():
|
||||||
|
cookies[cookie_name] = cookie.value
|
||||||
|
|
||||||
|
# Step 2: Get data for this conversation to extract message ID
|
||||||
|
async with session.get(
|
||||||
|
f"{cls.conversation_url}/{conversation_id}/__data.json?x-sveltekit-invalidated=11",
|
||||||
|
proxy=proxy
|
||||||
|
) as response:
|
||||||
|
await raise_for_status(response)
|
||||||
|
response_text = await response.text()
|
||||||
|
|
||||||
|
# Update cookies again
|
||||||
|
for cookie_name, cookie in response.cookies.items():
|
||||||
|
cookies[cookie_name] = cookie.value
|
||||||
|
|
||||||
|
# Parse the JSON response to find the message ID
|
||||||
|
message_id = None
|
||||||
|
try:
|
||||||
|
# Try to parse each line as JSON
|
||||||
|
for line in response_text.splitlines():
|
||||||
|
if not line.strip():
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
data_json = json.loads(line)
|
||||||
|
if "type" in data_json and data_json["type"] == "data" and "nodes" in data_json:
|
||||||
|
for node in data_json["nodes"]:
|
||||||
|
if "type" in node and node["type"] == "data" and "data" in node:
|
||||||
|
# Look for system message ID
|
||||||
|
for item in node["data"]:
|
||||||
|
if isinstance(item, dict) and "id" in item and "from" in item and item.get("from") == "system":
|
||||||
|
message_id = item["id"]
|
||||||
|
break
|
||||||
|
|
||||||
|
# If we found the ID, break out of the loop
|
||||||
|
if message_id:
|
||||||
|
break
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If we still don't have a message ID, try to find any UUID in the response
|
||||||
|
if not message_id:
|
||||||
|
uuid_pattern = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
||||||
|
uuids = re.findall(uuid_pattern, response_text)
|
||||||
|
if uuids:
|
||||||
|
message_id = uuids[0]
|
||||||
|
|
||||||
|
if not message_id:
|
||||||
|
raise ValueError("Could not find message ID in response")
|
||||||
|
|
||||||
|
except (IndexError, KeyError, ValueError) as e:
|
||||||
|
raise RuntimeError(f"Failed to parse conversation data: {str(e)}")
|
||||||
|
|
||||||
|
# Step 3: Send the user message
|
||||||
|
user_message = get_last_user_message(messages)
|
||||||
|
|
||||||
|
# Prepare form data exactly as in the curl example
|
||||||
|
form_data = FormData()
|
||||||
|
form_data.add_field(
|
||||||
|
"data",
|
||||||
|
json.dumps({
|
||||||
|
"inputs": user_message,
|
||||||
|
"id": message_id,
|
||||||
|
"is_retry": False,
|
||||||
|
"is_continue": False,
|
||||||
|
"web_search": False,
|
||||||
|
"tools": []
|
||||||
|
}),
|
||||||
|
content_type="application/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
async with session.post(
|
||||||
|
f"{cls.conversation_url}/{conversation_id}",
|
||||||
|
data=form_data,
|
||||||
|
proxy=proxy
|
||||||
|
) as response:
|
||||||
|
await raise_for_status(response)
|
||||||
|
|
||||||
|
async for chunk in response.content:
|
||||||
|
if not chunk:
|
||||||
|
continue
|
||||||
|
|
||||||
|
chunk_str = chunk.decode('utf-8', errors='ignore')
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = json.loads(chunk_str)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Handling different types of responses
|
||||||
|
if data.get("type") == "stream" and "token" in data:
|
||||||
|
# Remove null characters from the token
|
||||||
|
token = data["token"].replace("\u0000", "")
|
||||||
|
if token:
|
||||||
|
yield token
|
||||||
|
elif data.get("type") == "title":
|
||||||
|
yield TitleGeneration(data.get("title", ""))
|
||||||
|
elif data.get("type") == "reasoning":
|
||||||
|
subtype = data.get("subtype")
|
||||||
|
token = data.get("token", "").replace("\u0000", "")
|
||||||
|
status = data.get("status", "")
|
||||||
|
|
||||||
|
if subtype == "stream" and token:
|
||||||
|
yield Reasoning(token=token)
|
||||||
|
elif subtype == "status" and status:
|
||||||
|
yield Reasoning(status=status)
|
||||||
|
elif data.get("type") == "finalAnswer":
|
||||||
|
yield FinishReason("stop")
|
||||||
|
break
|
||||||
|
elif data.get("type") == "status" and data.get("status") == "keepAlive":
|
||||||
|
# Just a keepalive, ignore
|
||||||
|
continue
|
||||||
|
|
|
||||||
|
|
@ -8,189 +8,271 @@ from ..typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .helper import get_connector
|
from .helper import get_connector
|
||||||
from ..requests import raise_for_status
|
from ..requests import raise_for_status
|
||||||
|
from ..errors import RateLimitError
|
||||||
|
|
||||||
models = {
|
models = {
|
||||||
"claude-3-5-sonnet-20241022": {
|
"claude-3-5-sonnet-20241022": {
|
||||||
"id": "claude-3-5-sonnet-20241022",
|
"id": "claude-3-5-sonnet-20241022",
|
||||||
"name": "Claude-3.5-Sonnet-V2",
|
"name": "claude-3-5-sonnet-20241022",
|
||||||
"model": "Claude",
|
"model": "claude-3-5-sonnet-20241022",
|
||||||
"provider": "Anthropic",
|
"provider": "Anthropic",
|
||||||
"maxLength": 800000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 200000,
|
"tokenLimit": 0,
|
||||||
"context": "200K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 25.366666666666667,
|
||||||
},
|
},
|
||||||
"claude-3-5-sonnet-20241022-t": {
|
"claude-3-5-sonnet-20241022-t": {
|
||||||
"id": "claude-3-5-sonnet-20241022-t",
|
"id": "claude-3-5-sonnet-20241022-t",
|
||||||
"name": "Claude-3.5-Sonnet-V2-T",
|
"name": "claude-3-5-sonnet-20241022-t",
|
||||||
"model": "Claude",
|
"model": "claude-3-5-sonnet-20241022-t",
|
||||||
"provider": "Anthropic",
|
"provider": "Anthropic",
|
||||||
"maxLength": 800000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 200000,
|
"tokenLimit": 0,
|
||||||
"context": "200K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 39.820754716981135,
|
||||||
},
|
},
|
||||||
"claude-3-7-sonnet-20250219": {
|
"claude-3-7-sonnet-20250219": {
|
||||||
"id": "claude-3-7-sonnet-20250219",
|
"id": "claude-3-7-sonnet-20250219",
|
||||||
"name": "Claude-3.7-Sonnet",
|
"name": "claude-3-7-sonnet-20250219",
|
||||||
"model": "Claude",
|
"model": "claude-3-7-sonnet-20250219",
|
||||||
"provider": "Anthropic",
|
"provider": "Anthropic",
|
||||||
"maxLength": 800000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 200000,
|
"tokenLimit": 0,
|
||||||
"context": "200K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 47.02970297029703,
|
||||||
},
|
},
|
||||||
"claude-3-7-sonnet-20250219-t": {
|
"claude-3-7-sonnet-20250219-t": {
|
||||||
"id": "claude-3-7-sonnet-20250219-t",
|
"id": "claude-3-7-sonnet-20250219-t",
|
||||||
"name": "Claude-3.7-Sonnet-T",
|
"name": "claude-3-7-sonnet-20250219-t",
|
||||||
"model": "Claude",
|
"model": "claude-3-7-sonnet-20250219-t",
|
||||||
"provider": "Anthropic",
|
"provider": "Anthropic",
|
||||||
"maxLength": 800000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 200000,
|
"tokenLimit": 0,
|
||||||
"context": "200K",
|
"context": 0,
|
||||||
},
|
"success_rate": 100,
|
||||||
"claude-3-7-sonnet-20250219-thinking": {
|
"tps": 39.04289693593315,
|
||||||
"id": "claude-3-7-sonnet-20250219-thinking",
|
|
||||||
"name": "Claude-3.7-Sonnet-Thinking",
|
|
||||||
"model": "Claude",
|
|
||||||
"provider": "Anthropic",
|
|
||||||
"maxLength": 800000,
|
|
||||||
"tokenLimit": 200000,
|
|
||||||
"context": "200K",
|
|
||||||
},
|
|
||||||
"claude-3-opus-20240229": {
|
|
||||||
"id": "claude-3-opus-20240229",
|
|
||||||
"name": "Claude-3-Opus",
|
|
||||||
"model": "Claude",
|
|
||||||
"provider": "Anthropic",
|
|
||||||
"maxLength": 800000,
|
|
||||||
"tokenLimit": 200000,
|
|
||||||
"context": "200K",
|
|
||||||
},
|
|
||||||
"claude-3-sonnet-20240229": {
|
|
||||||
"id": "claude-3-sonnet-20240229",
|
|
||||||
"name": "Claude-3-Sonnet",
|
|
||||||
"model": "Claude",
|
|
||||||
"provider": "Anthropic",
|
|
||||||
"maxLength": 800000,
|
|
||||||
"tokenLimit": 200000,
|
|
||||||
"context": "200K",
|
|
||||||
},
|
|
||||||
"deepseek-r1": {
|
|
||||||
"id": "deepseek-r1",
|
|
||||||
"name": "DeepSeek-R1",
|
|
||||||
"model": "DeepSeek-R1",
|
|
||||||
"provider": "DeepSeek",
|
|
||||||
"maxLength": 400000,
|
|
||||||
"tokenLimit": 100000,
|
|
||||||
"context": "128K",
|
|
||||||
},
|
|
||||||
"deepseek-r1-distill-llama-70b": {
|
|
||||||
"id": "deepseek-r1-distill-llama-70b",
|
|
||||||
"name": "DeepSeek-R1-70B",
|
|
||||||
"model": "DeepSeek-R1-70B",
|
|
||||||
"provider": "DeepSeek",
|
|
||||||
"maxLength": 400000,
|
|
||||||
"tokenLimit": 100000,
|
|
||||||
"context": "128K",
|
|
||||||
},
|
},
|
||||||
"deepseek-v3": {
|
"deepseek-v3": {
|
||||||
"id": "deepseek-v3",
|
"id": "deepseek-v3",
|
||||||
"name": "DeepSeek-V3",
|
"name": "deepseek-v3",
|
||||||
"model": "DeepSeek-V3",
|
"model": "deepseek-v3",
|
||||||
"provider": "DeepSeek",
|
"provider": "DeepSeek",
|
||||||
"maxLength": 400000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 100000,
|
"tokenLimit": 0,
|
||||||
"context": "128K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 40.484657419083646,
|
||||||
|
},
|
||||||
|
"gemini-1.0-pro-latest-123": {
|
||||||
|
"id": "gemini-1.0-pro-latest-123",
|
||||||
|
"name": "gemini-1.0-pro-latest-123",
|
||||||
|
"model": "gemini-1.0-pro-latest-123",
|
||||||
|
"provider": "Google",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 10,
|
||||||
},
|
},
|
||||||
"gemini-2.0-flash": {
|
"gemini-2.0-flash": {
|
||||||
"id": "gemini-2.0-flash",
|
"id": "gemini-2.0-flash",
|
||||||
"name": "Gemini-2.0-Flash",
|
"name": "gemini-2.0-flash",
|
||||||
"model": "Gemini",
|
"model": "gemini-2.0-flash",
|
||||||
"provider": "Google",
|
"provider": "Google",
|
||||||
"maxLength": 4000000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 1000000,
|
"tokenLimit": 0,
|
||||||
"context": "1024K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 216.44162436548223,
|
||||||
|
},
|
||||||
|
"gemini-2.0-flash-exp": {
|
||||||
|
"id": "gemini-2.0-flash-exp",
|
||||||
|
"name": "gemini-2.0-flash-exp",
|
||||||
|
"model": "gemini-2.0-flash-exp",
|
||||||
|
"provider": "Google",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 0,
|
||||||
|
"tps": 0,
|
||||||
},
|
},
|
||||||
"gemini-2.0-flash-thinking-exp": {
|
"gemini-2.0-flash-thinking-exp": {
|
||||||
"id": "gemini-2.0-flash-thinking-exp",
|
"id": "gemini-2.0-flash-thinking-exp",
|
||||||
"name": "Gemini-2.0-Flash-Thinking-Exp",
|
"name": "gemini-2.0-flash-thinking-exp",
|
||||||
"model": "Gemini",
|
"model": "gemini-2.0-flash-thinking-exp",
|
||||||
"provider": "Google",
|
"provider": "Google",
|
||||||
"maxLength": 4000000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 1000000,
|
"tokenLimit": 0,
|
||||||
"context": "1024K",
|
"context": 0,
|
||||||
|
"success_rate": 0,
|
||||||
|
"tps": 0,
|
||||||
},
|
},
|
||||||
"gemini-2.0-pro-exp": {
|
"gemini-2.5-flash-preview-04-17": {
|
||||||
"id": "gemini-2.0-pro-exp",
|
"id": "gemini-2.5-flash-preview-04-17",
|
||||||
"name": "Gemini-2.0-Pro-Exp",
|
"name": "gemini-2.5-flash-preview-04-17",
|
||||||
"model": "Gemini",
|
"model": "gemini-2.5-flash-preview-04-17",
|
||||||
"provider": "Google",
|
"provider": "Google",
|
||||||
"maxLength": 4000000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 1000000,
|
"tokenLimit": 0,
|
||||||
"context": "1024K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 189.84010840108402,
|
||||||
},
|
},
|
||||||
"gpt-4o-2024-08-06": {
|
"gemini-2.5-pro-official": {
|
||||||
"id": "gpt-4o-2024-08-06",
|
"id": "gemini-2.5-pro-official",
|
||||||
"name": "GPT-4o",
|
"name": "gemini-2.5-pro-official",
|
||||||
"model": "ChatGPT",
|
"model": "gemini-2.5-pro-official",
|
||||||
|
"provider": "Google",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 91.00613496932516,
|
||||||
|
},
|
||||||
|
"gemini-2.5-pro-preview-03-25": {
|
||||||
|
"id": "gemini-2.5-pro-preview-03-25",
|
||||||
|
"name": "gemini-2.5-pro-preview-03-25",
|
||||||
|
"model": "gemini-2.5-pro-preview-03-25",
|
||||||
|
"provider": "Google",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 99.05660377358491,
|
||||||
|
"tps": 45.050511247443765,
|
||||||
|
},
|
||||||
|
"gemini-2.5-pro-preview-05-06": {
|
||||||
|
"id": "gemini-2.5-pro-preview-05-06",
|
||||||
|
"name": "gemini-2.5-pro-preview-05-06",
|
||||||
|
"model": "gemini-2.5-pro-preview-05-06",
|
||||||
|
"provider": "Google",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 99.29617834394904,
|
||||||
|
},
|
||||||
|
"gpt-4-turbo-2024-04-09": {
|
||||||
|
"id": "gpt-4-turbo-2024-04-09",
|
||||||
|
"name": "gpt-4-turbo-2024-04-09",
|
||||||
|
"model": "gpt-4-turbo-2024-04-09",
|
||||||
"provider": "OpenAI",
|
"provider": "OpenAI",
|
||||||
"maxLength": 260000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 126000,
|
"tokenLimit": 0,
|
||||||
"context": "128K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 1,
|
||||||
|
},
|
||||||
|
"gpt-4.1": {
|
||||||
|
"id": "gpt-4.1",
|
||||||
|
"name": "gpt-4.1",
|
||||||
|
"model": "gpt-4.1",
|
||||||
|
"provider": "OpenAI",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 42.857142857142854,
|
||||||
|
"tps": 19.58032786885246,
|
||||||
|
},
|
||||||
|
"gpt-4.1-mini": {
|
||||||
|
"id": "gpt-4.1-mini",
|
||||||
|
"name": "gpt-4.1-mini",
|
||||||
|
"model": "gpt-4.1-mini",
|
||||||
|
"provider": "OpenAI",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 68.75,
|
||||||
|
"tps": 12.677576601671309,
|
||||||
|
},
|
||||||
|
"gpt-4.1-mini-2025-04-14": {
|
||||||
|
"id": "gpt-4.1-mini-2025-04-14",
|
||||||
|
"name": "gpt-4.1-mini-2025-04-14",
|
||||||
|
"model": "gpt-4.1-mini-2025-04-14",
|
||||||
|
"provider": "OpenAI",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 94.23076923076923,
|
||||||
|
"tps": 8.297687861271676,
|
||||||
|
},
|
||||||
|
"gpt-4o-2024-11-20": {
|
||||||
|
"id": "gpt-4o-2024-11-20",
|
||||||
|
"name": "gpt-4o-2024-11-20",
|
||||||
|
"model": "gpt-4o-2024-11-20",
|
||||||
|
"provider": "OpenAI",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 73.3955223880597,
|
||||||
},
|
},
|
||||||
"gpt-4o-mini-2024-07-18": {
|
"gpt-4o-mini-2024-07-18": {
|
||||||
"id": "gpt-4o-mini-2024-07-18",
|
"id": "gpt-4o-mini-2024-07-18",
|
||||||
"name": "GPT-4o-Mini",
|
"name": "gpt-4o-mini-2024-07-18",
|
||||||
"model": "ChatGPT",
|
"model": "gpt-4o-mini-2024-07-18",
|
||||||
"provider": "OpenAI",
|
"provider": "OpenAI",
|
||||||
"maxLength": 260000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 126000,
|
"tokenLimit": 0,
|
||||||
"context": "128K",
|
"context": 0,
|
||||||
},
|
"success_rate": 100,
|
||||||
"gpt-4o-mini-free": {
|
"tps": 26.874455100261553,
|
||||||
"id": "gpt-4o-mini-free",
|
|
||||||
"name": "GPT-4o-Mini-Free",
|
|
||||||
"model": "ChatGPT",
|
|
||||||
"provider": "OpenAI",
|
|
||||||
"maxLength": 31200,
|
|
||||||
"tokenLimit": 7800,
|
|
||||||
"context": "8K",
|
|
||||||
},
|
},
|
||||||
"grok-3": {
|
"grok-3": {
|
||||||
"id": "grok-3",
|
"id": "grok-3",
|
||||||
"name": "Grok-3",
|
"name": "grok-3",
|
||||||
"model": "Grok",
|
"model": "grok-3",
|
||||||
"provider": "x.ai",
|
"provider": "xAI",
|
||||||
"maxLength": 800000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 200000,
|
"tokenLimit": 0,
|
||||||
"context": "200K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 51.110652663165794,
|
||||||
},
|
},
|
||||||
"grok-3-r1": {
|
"grok-3-reason": {
|
||||||
"id": "grok-3-r1",
|
"id": "grok-3-reason",
|
||||||
"name": "Grok-3-Thinking",
|
"name": "grok-3-reason",
|
||||||
"model": "Grok",
|
"model": "grok-3-reason",
|
||||||
"provider": "x.ai",
|
"provider": "xAI",
|
||||||
"maxLength": 800000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 200000,
|
"tokenLimit": 0,
|
||||||
"context": "200K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 62.81976744186046,
|
||||||
},
|
},
|
||||||
"o3-mini": {
|
"o3-mini-2025-01-31": {
|
||||||
"id": "o3-mini",
|
"id": "o3-mini-2025-01-31",
|
||||||
"name": "o3-mini",
|
"name": "o3-mini-2025-01-31",
|
||||||
"model": "o3",
|
"model": "o3-mini-2025-01-31",
|
||||||
"provider": "OpenAI",
|
"provider": "Unknown",
|
||||||
"maxLength": 400000,
|
"maxLength": 0,
|
||||||
"tokenLimit": 100000,
|
"tokenLimit": 0,
|
||||||
"context": "128K",
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 125.31410256410257,
|
||||||
|
},
|
||||||
|
"qwen3-235b-a22b": {
|
||||||
|
"id": "qwen3-235b-a22b",
|
||||||
|
"name": "qwen3-235b-a22b",
|
||||||
|
"model": "qwen3-235b-a22b",
|
||||||
|
"provider": "Alibaba",
|
||||||
|
"maxLength": 0,
|
||||||
|
"tokenLimit": 0,
|
||||||
|
"context": 0,
|
||||||
|
"success_rate": 100,
|
||||||
|
"tps": 25.846153846153847,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://liaobots.site"
|
url = "https://liaobots.work"
|
||||||
working = True
|
working = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
|
|
||||||
default_model = "gpt-4o-2024-08-06"
|
default_model = "grok-3"
|
||||||
models = list(models.keys())
|
models = list(models.keys())
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
# Anthropic
|
# Anthropic
|
||||||
|
|
@ -198,25 +280,33 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
|
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
|
||||||
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
|
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
|
||||||
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
|
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
|
||||||
"claude-3.7-sonnet-thinking": "claude-3-7-sonnet-20250219-thinking",
|
|
||||||
"claude-3-opus": "claude-3-opus-20240229",
|
|
||||||
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
|
||||||
|
|
||||||
# DeepSeek
|
# DeepSeek
|
||||||
"deepseek-r1": "deepseek-r1-distill-llama-70b",
|
#"deepseek-v3": "deepseek-v3",
|
||||||
|
|
||||||
# Google
|
# Google
|
||||||
|
"gemini-1.0-pro": "gemini-1.0-pro-latest-123",
|
||||||
|
"gemini-2.0-flash": "gemini-2.0-flash-exp",
|
||||||
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
|
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
|
||||||
"gemini-2.0-pro": "gemini-2.0-pro-exp",
|
"gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
|
||||||
|
"gemini-2.5-pro": "gemini-2.5-pro-official",
|
||||||
|
"gemini-2.5-pro": "gemini-2.5-pro-preview-03-25",
|
||||||
|
"gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
|
||||||
|
|
||||||
# OpenAI
|
# OpenAI
|
||||||
"gpt-4": default_model,
|
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
|
||||||
"gpt-4o": default_model,
|
"gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
|
||||||
|
"gpt-4": "gpt-4o-2024-11-20",
|
||||||
|
"gpt-4o": "gpt-4o-2024-11-20",
|
||||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
||||||
"gpt-4o-mini": "gpt-4o-mini-free",
|
|
||||||
|
# xAI
|
||||||
|
"grok-3-reason": "grok-3-reason",
|
||||||
|
"o3-mini": "o3-mini-2025-01-31",
|
||||||
|
"qwen-3-235b": "qwen3-235b-a22b",
|
||||||
}
|
}
|
||||||
|
|
||||||
_auth_code = ""
|
_auth_code = None
|
||||||
_cookie_jar = None
|
_cookie_jar = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -238,92 +328,213 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
model = cls.get_model(model)
|
model = cls.get_model(model)
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"referer": "https://liaobots.work/",
|
"accept": "*/*",
|
||||||
|
"accept-language": "en-US,en;q=0.9",
|
||||||
|
"content-type": "application/json",
|
||||||
|
"dnt": "1",
|
||||||
"origin": "https://liaobots.work",
|
"origin": "https://liaobots.work",
|
||||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
"priority": "u=1, i",
|
||||||
|
"referer": "https://liaobots.work/en",
|
||||||
|
"sec-ch-ua": "\"Chromium\";v=\"135\", \"Not-A.Brand\";v=\"8\"",
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
"sec-ch-ua-platform": "\"Linux\"",
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-site": "same-origin",
|
||||||
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=headers,
|
headers=headers,
|
||||||
cookie_jar=cls._cookie_jar,
|
cookie_jar=cls._cookie_jar,
|
||||||
connector=get_connector(connector, proxy, True)
|
connector=get_connector(connector, proxy, True)
|
||||||
) as session:
|
) as session:
|
||||||
|
# First, get a valid auth code
|
||||||
|
await cls.get_auth_code(session)
|
||||||
|
|
||||||
|
# Create conversation ID
|
||||||
|
conversation_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Prepare request data
|
||||||
data = {
|
data = {
|
||||||
"conversationId": str(uuid.uuid4()),
|
"conversationId": conversation_id,
|
||||||
"model": models[model],
|
"models": [{
|
||||||
|
"modelId": model,
|
||||||
|
"provider": models[model]["provider"]
|
||||||
|
}],
|
||||||
|
"search": "false",
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"key": "",
|
"key": "",
|
||||||
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
|
"prompt": kwargs.get("system_message", "你是 {{model}},一个由 {{provider}} 训练的大型语言模型,请仔细遵循用户的指示。")
|
||||||
}
|
}
|
||||||
if not cls._auth_code:
|
|
||||||
async with session.post(
|
# Try to make the chat request
|
||||||
"https://liaobots.work/recaptcha/api/login",
|
|
||||||
data={"token": "abcdefghijklmnopqrst"},
|
|
||||||
verify_ssl=False
|
|
||||||
) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
try:
|
try:
|
||||||
|
# Make the chat request with the current auth code
|
||||||
async with session.post(
|
async with session.post(
|
||||||
"https://liaobots.work/api/user",
|
f"{cls.url}/api/chat",
|
||||||
json={"authcode": cls._auth_code},
|
|
||||||
verify_ssl=False
|
|
||||||
) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
|
||||||
if not cls._auth_code:
|
|
||||||
raise RuntimeError("Empty auth code")
|
|
||||||
cls._cookie_jar = session.cookie_jar
|
|
||||||
async with session.post(
|
|
||||||
"https://liaobots.work/api/chat",
|
|
||||||
json=data,
|
json=data,
|
||||||
headers={"x-auth-code": cls._auth_code},
|
headers={"x-auth-code": cls._auth_code},
|
||||||
verify_ssl=False
|
verify_ssl=False
|
||||||
) as response:
|
) as response:
|
||||||
await raise_for_status(response)
|
# Check if we got a streaming response
|
||||||
|
content_type = response.headers.get("Content-Type", "")
|
||||||
|
if "text/event-stream" in content_type:
|
||||||
async for line in response.content:
|
async for line in response.content:
|
||||||
if line.startswith(b"data: "):
|
if line.startswith(b"data: "):
|
||||||
yield json.loads(line[6:]).get("content")
|
try:
|
||||||
except:
|
response_data = json.loads(line[6:])
|
||||||
|
|
||||||
|
# Check for error response
|
||||||
|
if response_data.get("error") is True:
|
||||||
|
# Raise RateLimitError for payment required or other errors
|
||||||
|
if "402" in str(response_data.get("res_status", "")):
|
||||||
|
raise RateLimitError("This model requires payment or credits")
|
||||||
|
else:
|
||||||
|
error_msg = response_data.get('message', 'Unknown error')
|
||||||
|
raise RateLimitError(f"Error: {error_msg}")
|
||||||
|
|
||||||
|
# Process normal response
|
||||||
|
if response_data.get("role") == "assistant" and "content" in response_data:
|
||||||
|
content = response_data.get("content")
|
||||||
|
yield content
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# Not a streaming response, might be an error or HTML
|
||||||
|
response_text = await response.text()
|
||||||
|
|
||||||
|
# If we got HTML, we need to bypass CAPTCHA
|
||||||
|
if response_text.startswith("<!DOCTYPE html>"):
|
||||||
|
await cls.bypass_captcha(session)
|
||||||
|
|
||||||
|
# Get a fresh auth code
|
||||||
|
await cls.get_auth_code(session)
|
||||||
|
|
||||||
|
# Try the request again
|
||||||
async with session.post(
|
async with session.post(
|
||||||
"https://liaobots.work/api/user",
|
f"{cls.url}/api/chat",
|
||||||
json={"authcode": "jGDRFOqHcZKAo"},
|
|
||||||
verify_ssl=False
|
|
||||||
) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
|
||||||
if not cls._auth_code:
|
|
||||||
raise RuntimeError("Empty auth code")
|
|
||||||
cls._cookie_jar = session.cookie_jar
|
|
||||||
async with session.post(
|
|
||||||
"https://liaobots.work/api/chat",
|
|
||||||
json=data,
|
json=data,
|
||||||
headers={"x-auth-code": cls._auth_code},
|
headers={"x-auth-code": cls._auth_code},
|
||||||
verify_ssl=False
|
verify_ssl=False
|
||||||
) as response:
|
) as response2:
|
||||||
await raise_for_status(response)
|
# Check if we got a streaming response
|
||||||
async for line in response.content:
|
content_type = response2.headers.get("Content-Type", "")
|
||||||
|
if "text/event-stream" in content_type:
|
||||||
|
async for line in response2.content:
|
||||||
if line.startswith(b"data: "):
|
if line.startswith(b"data: "):
|
||||||
yield json.loads(line[6:]).get("content")
|
try:
|
||||||
|
response_data = json.loads(line[6:])
|
||||||
|
|
||||||
|
# Check for error response
|
||||||
|
if response_data.get("error") is True:
|
||||||
|
# Raise RateLimitError for payment required or other errors
|
||||||
|
if "402" in str(response_data.get("res_status", "")):
|
||||||
|
raise RateLimitError("This model requires payment or credits")
|
||||||
|
else:
|
||||||
|
error_msg = response_data.get('message', 'Unknown error')
|
||||||
|
raise RateLimitError(f"Error: {error_msg}")
|
||||||
|
|
||||||
|
# Process normal response
|
||||||
|
if response_data.get("role") == "assistant" and "content" in response_data:
|
||||||
|
content = response_data.get("content")
|
||||||
|
yield content
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise RateLimitError("Failed to get streaming response")
|
||||||
|
else:
|
||||||
|
raise RateLimitError("Failed to connect to the service")
|
||||||
|
except Exception as e:
|
||||||
|
# If it's already a RateLimitError, re-raise it
|
||||||
|
if isinstance(e, RateLimitError):
|
||||||
|
raise
|
||||||
|
# Otherwise, wrap it in a RateLimitError
|
||||||
|
raise RateLimitError(f"Error processing request: {str(e)}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def initialize_auth_code(cls, session: ClientSession) -> None:
|
async def bypass_captcha(cls, session: ClientSession) -> None:
|
||||||
"""
|
"""
|
||||||
Initialize the auth code by making the necessary login requests.
|
Bypass the CAPTCHA verification by directly making the recaptcha API request.
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
|
# First, try the direct recaptcha API request
|
||||||
async with session.post(
|
async with session.post(
|
||||||
"https://liaobots.work/api/user",
|
f"{cls.url}/recaptcha/api/login",
|
||||||
json={"authcode": "pTIQr4FTnVRfr"},
|
json={"token": "abcdefghijklmnopqrst"},
|
||||||
verify_ssl=False
|
verify_ssl=False
|
||||||
) as response:
|
) as response:
|
||||||
await raise_for_status(response)
|
if response.status == 200:
|
||||||
cls._auth_code = (await response.json(content_type=None))["authCode"]
|
try:
|
||||||
if not cls._auth_code:
|
response_text = await response.text()
|
||||||
raise RuntimeError("Empty auth code")
|
|
||||||
|
# Try to parse as JSON
|
||||||
|
try:
|
||||||
|
response_data = json.loads(response_text)
|
||||||
|
|
||||||
|
# Check if we got a successful response
|
||||||
|
if response_data.get("code") == 200:
|
||||||
cls._cookie_jar = session.cookie_jar
|
cls._cookie_jar = session.cookie_jar
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def ensure_auth_code(cls, session: ClientSession) -> None:
|
async def get_auth_code(cls, session: ClientSession) -> None:
|
||||||
"""
|
"""
|
||||||
Ensure the auth code is initialized, and if not, perform the initialization.
|
Get a valid auth code by sending a request with an empty authcode.
|
||||||
"""
|
"""
|
||||||
if not cls._auth_code:
|
try:
|
||||||
await cls.initialize_auth_code(session)
|
# Send request with empty authcode to get a new one
|
||||||
|
auth_request_data = {
|
||||||
|
"authcode": "",
|
||||||
|
"recommendUrl": "https://liaobots.work/zh"
|
||||||
|
}
|
||||||
|
|
||||||
|
async with session.post(
|
||||||
|
f"{cls.url}/api/user",
|
||||||
|
json=auth_request_data,
|
||||||
|
verify_ssl=False
|
||||||
|
) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
response_text = await response.text()
|
||||||
|
|
||||||
|
try:
|
||||||
|
response_data = json.loads(response_text)
|
||||||
|
|
||||||
|
if "authCode" in response_data:
|
||||||
|
cls._auth_code = response_data["authCode"]
|
||||||
|
cls._cookie_jar = session.cookie_jar
|
||||||
|
return
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# If we got HTML, it might be the CAPTCHA page
|
||||||
|
if response_text.startswith("<!DOCTYPE html>"):
|
||||||
|
await cls.bypass_captcha(session)
|
||||||
|
|
||||||
|
# Try again after bypassing CAPTCHA
|
||||||
|
async with session.post(
|
||||||
|
f"{cls.url}/api/user",
|
||||||
|
json=auth_request_data,
|
||||||
|
verify_ssl=False
|
||||||
|
) as response2:
|
||||||
|
if response2.status == 200:
|
||||||
|
response_text2 = await response2.text()
|
||||||
|
|
||||||
|
try:
|
||||||
|
response_data2 = json.loads(response_text2)
|
||||||
|
|
||||||
|
if "authCode" in response_data2:
|
||||||
|
cls._auth_code = response_data2["authCode"]
|
||||||
|
cls._cookie_jar = session.cookie_jar
|
||||||
|
return
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# If we're here, we couldn't get a valid auth code
|
||||||
|
# Set a default one as a fallback
|
||||||
|
cls._auth_code = "DvS3A5GTE9f0D" # Fallback to one of the provided auth codes
|
||||||
|
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from .template import OpenaiTemplate
|
|
||||||
|
|
||||||
class OIVSCode(OpenaiTemplate):
|
|
||||||
label = "OI VSCode Server"
|
|
||||||
url = "https://oi-vscode-server.onrender.com"
|
|
||||||
api_base = "https://oi-vscode-server-2.onrender.com/v1"
|
|
||||||
|
|
||||||
working = True
|
|
||||||
needs_auth = False
|
|
||||||
supports_stream = True
|
|
||||||
supports_system_message = True
|
|
||||||
supports_message_history = True
|
|
||||||
|
|
||||||
default_model = "gpt-4o-mini-2024-07-18"
|
|
||||||
default_vision_model = default_model
|
|
||||||
vision_models = [default_model, "gpt-4o-mini"]
|
|
||||||
models = vision_models + ["deepseek-ai/DeepSeek-V3"]
|
|
||||||
|
|
||||||
model_aliases = {
|
|
||||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
|
||||||
"deepseek-v3": "deepseek-ai/DeepSeek-V3"
|
|
||||||
}
|
|
||||||
|
|
@ -81,9 +81,22 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
await ws.send_str("3")
|
await ws.send_str("3")
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
|
if not message.startswith("42"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
parsed_data = json.loads(message[2:])
|
||||||
|
message_type = parsed_data[0]
|
||||||
|
data = parsed_data[1]
|
||||||
|
|
||||||
|
# Handle error responses
|
||||||
|
if message_type.endswith("_query_progress") and data.get("status") == "failed":
|
||||||
|
error_message = data.get("text", "Unknown API error")
|
||||||
|
raise ResponseError(f"API Error: {error_message}")
|
||||||
|
|
||||||
|
# Handle normal responses
|
||||||
|
if "output" in data:
|
||||||
if last_message == 0 and model == cls.default_model:
|
if last_message == 0 and model == cls.default_model:
|
||||||
yield "<think>"
|
yield "<think>"
|
||||||
data = json.loads(message[2:])[1]
|
|
||||||
yield data["output"][last_message:]
|
yield data["output"][last_message:]
|
||||||
last_message = len(data["output"])
|
last_message = len(data["output"])
|
||||||
if data["final"]:
|
if data["final"]:
|
||||||
|
|
@ -91,5 +104,8 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
yield Sources(data["citations"])
|
yield Sources(data["citations"])
|
||||||
yield FinishReason("stop")
|
yield FinishReason("stop")
|
||||||
break
|
break
|
||||||
|
except ResponseError as e:
|
||||||
|
# Re-raise ResponseError directly
|
||||||
|
raise e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise ResponseError(f"Message: {message}") from e
|
raise ResponseError(f"Error processing message: {message}") from e
|
||||||
|
|
|
||||||
|
|
@ -51,33 +51,55 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
image_models = [default_image_model]
|
image_models = [default_image_model]
|
||||||
audio_models = {default_audio_model: []}
|
audio_models = {default_audio_model: []}
|
||||||
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
|
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
|
||||||
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"]
|
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "openai-reasoning", "searchgpt"]
|
||||||
_models_loaded = False
|
_models_loaded = False
|
||||||
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
|
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
### Text Models ###
|
### Text Models ###
|
||||||
"gpt-4o-mini": "openai",
|
"gpt-4o-mini": "openai",
|
||||||
|
"gpt-4.1": "openai",
|
||||||
|
"gpt-4.1-mini": "openai",
|
||||||
|
"gpt-4.1-nano": "openai-fast",
|
||||||
|
"gpt-4.1-nano": "openai-small",
|
||||||
"gpt-4": "openai-large",
|
"gpt-4": "openai-large",
|
||||||
"gpt-4o": "openai-large",
|
"gpt-4o": "openai-large",
|
||||||
"gpt-4.1": "openai",
|
"gpt-4.1": "openai-large",
|
||||||
"gpt-4.1-nano": "openai",
|
"gpt-4.1": "openai-xlarge",
|
||||||
"gpt-4.1-mini": "openai-large",
|
|
||||||
"gpt-4.1-xlarge": "openai-xlarge",
|
|
||||||
"o4-mini": "openai-reasoning",
|
"o4-mini": "openai-reasoning",
|
||||||
|
"gpt-4.1-mini": "openai-roblox",
|
||||||
|
"gpt-4.1-mini": "roblox-rp",
|
||||||
|
"command-r-plus-08-2024": "command-r",
|
||||||
|
"gemini-2.5-flash": "gemini",
|
||||||
|
"gemini-2.0-flash-thinking": "gemini-thinking",
|
||||||
"qwen-2.5-coder-32b": "qwen-coder",
|
"qwen-2.5-coder-32b": "qwen-coder",
|
||||||
"llama-3.3-70b": "llama",
|
"llama-3.3-70b": "llama",
|
||||||
"llama-4-scout": "llamascout",
|
"llama-4-scout": "llamascout",
|
||||||
"mistral-nemo": "mistral",
|
"llama-4-scout-17b": "llamascout",
|
||||||
"llama-3.1-8b": "llamalight",
|
"mistral-small-3.1-24b": "mistral",
|
||||||
"llama-3.3-70b": "llama-scaleway",
|
|
||||||
"phi-4": "phi",
|
|
||||||
"deepseek-r1": "deepseek-reasoning-large",
|
"deepseek-r1": "deepseek-reasoning-large",
|
||||||
"deepseek-r1-distill-llama-70b": "deepseek-reasoning-large",
|
"deepseek-r1-distill-llama-70b": "deepseek-reasoning-large",
|
||||||
|
"deepseek-r1-distill-llama-70b": "deepseek-r1-llama",
|
||||||
|
#"mistral-small-3.1-24b": "unity", # Personas
|
||||||
|
#"mirexa": "mirexa", # Personas
|
||||||
|
#"midijourney": "midijourney", # Personas
|
||||||
|
#"rtist": "rtist", # Personas
|
||||||
|
#"searchgpt": "searchgpt",
|
||||||
|
#"evil": "evil", # Personas
|
||||||
|
"deepseek-r1": "deepseek-reasoning",
|
||||||
"deepseek-r1-distill-qwen-32b": "deepseek-reasoning",
|
"deepseek-r1-distill-qwen-32b": "deepseek-reasoning",
|
||||||
|
"phi-4": "phi",
|
||||||
|
#"pixtral-12b": "pixtral",
|
||||||
|
#"hormoz-8b": "hormoz",
|
||||||
|
"qwq-32b": "qwen-qwq",
|
||||||
|
#"hypnosis-tracy-7b": "hypnosis-tracy", # Personas
|
||||||
|
#"mistral-?": "sur", # Personas
|
||||||
"deepseek-v3": "deepseek",
|
"deepseek-v3": "deepseek",
|
||||||
"llama-3.2-11b": "llama-vision",
|
"deepseek-v3-0324": "deepseek",
|
||||||
|
#"bidara": "bidara", # Personas
|
||||||
|
|
||||||
|
### Audio Models ###
|
||||||
"gpt-4o-audio": "openai-audio",
|
"gpt-4o-audio": "openai-audio",
|
||||||
"gpt-4o-audio-preview": "openai-audio",
|
#"gpt-4o-audio-preview": "openai-audio",
|
||||||
|
|
||||||
### Image Models ###
|
### Image Models ###
|
||||||
"sdxl-turbo": "turbo",
|
"sdxl-turbo": "turbo",
|
||||||
|
|
|
||||||
|
|
@ -35,13 +35,11 @@ except ImportError as e:
|
||||||
debug.error("Audio providers not loaded:", e)
|
debug.error("Audio providers not loaded:", e)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from .AllenAI import AllenAI
|
|
||||||
from .ARTA import ARTA
|
from .ARTA import ARTA
|
||||||
from .Blackbox import Blackbox
|
from .Blackbox import Blackbox
|
||||||
from .Chatai import Chatai
|
from .Chatai import Chatai
|
||||||
from .ChatGLM import ChatGLM
|
from .ChatGLM import ChatGLM
|
||||||
from .ChatGpt import ChatGpt
|
from .ChatGpt import ChatGpt
|
||||||
from .ChatGptEs import ChatGptEs
|
|
||||||
from .Cloudflare import Cloudflare
|
from .Cloudflare import Cloudflare
|
||||||
from .Copilot import Copilot
|
from .Copilot import Copilot
|
||||||
from .DDG import DDG
|
from .DDG import DDG
|
||||||
|
|
@ -53,16 +51,11 @@ except ImportError as e:
|
||||||
try:
|
try:
|
||||||
from .Free2GPT import Free2GPT
|
from .Free2GPT import Free2GPT
|
||||||
from .FreeGpt import FreeGpt
|
from .FreeGpt import FreeGpt
|
||||||
from .FreeRouter import FreeRouter
|
|
||||||
from .GizAI import GizAI
|
from .GizAI import GizAI
|
||||||
from .Glider import Glider
|
|
||||||
from .Goabror import Goabror
|
|
||||||
from .ImageLabs import ImageLabs
|
from .ImageLabs import ImageLabs
|
||||||
from .Jmuz import Jmuz
|
|
||||||
from .LambdaChat import LambdaChat
|
from .LambdaChat import LambdaChat
|
||||||
from .Liaobots import Liaobots
|
from .Liaobots import Liaobots
|
||||||
from .LMArenaProvider import LMArenaProvider
|
from .LMArenaProvider import LMArenaProvider
|
||||||
from .OIVSCode import OIVSCode
|
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
debug.error("Providers not loaded (F-L):", e)
|
debug.error("Providers not loaded (F-L):", e)
|
||||||
try:
|
try:
|
||||||
|
|
|
||||||
|
|
@ -1,75 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from aiohttp import ClientSession
|
|
||||||
import json
|
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
from ...providers.response import ImageResponse
|
|
||||||
from ...errors import ResponseError
|
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
||||||
from ..helper import format_image_prompt
|
|
||||||
from .raise_for_status import raise_for_status
|
|
||||||
|
|
||||||
class BlackForestLabs_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
|
|
||||||
label = "BlackForestLabs Flux-1-Schnell"
|
|
||||||
url = "https://black-forest-labs-flux-1-schnell.hf.space"
|
|
||||||
api_endpoint = "https://black-forest-labs-flux-1-schnell.hf.space/call/infer"
|
|
||||||
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = "black-forest-labs-flux-1-schnell"
|
|
||||||
default_image_model = default_model
|
|
||||||
model_aliases = {"flux-schnell": default_image_model, "flux": default_image_model}
|
|
||||||
image_models = list(model_aliases.keys())
|
|
||||||
models = image_models
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
prompt: str = None,
|
|
||||||
width: int = 768,
|
|
||||||
height: int = 768,
|
|
||||||
num_inference_steps: int = 2,
|
|
||||||
seed: int = 0,
|
|
||||||
randomize_seed: bool = True,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
width = max(32, width - (width % 8))
|
|
||||||
height = max(32, height - (height % 8))
|
|
||||||
prompt = format_image_prompt(messages, prompt)
|
|
||||||
payload = {
|
|
||||||
"data": [
|
|
||||||
prompt,
|
|
||||||
seed,
|
|
||||||
randomize_seed,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
num_inference_steps
|
|
||||||
]
|
|
||||||
}
|
|
||||||
async with ClientSession() as session:
|
|
||||||
async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
|
|
||||||
await raise_for_status(response)
|
|
||||||
response_data = await response.json()
|
|
||||||
event_id = response_data['event_id']
|
|
||||||
while True:
|
|
||||||
async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response:
|
|
||||||
await raise_for_status(status_response)
|
|
||||||
while not status_response.content.at_eof():
|
|
||||||
event = await status_response.content.readuntil(b'\n\n')
|
|
||||||
if event.startswith(b'event:'):
|
|
||||||
event_parts = event.split(b'\ndata: ')
|
|
||||||
if len(event_parts) < 2:
|
|
||||||
continue
|
|
||||||
event_type = event_parts[0].split(b': ')[1]
|
|
||||||
data = event_parts[1]
|
|
||||||
if event_type == b'error':
|
|
||||||
raise ResponseError(f"Error generating image: {data.decode(errors='ignore')}")
|
|
||||||
elif event_type == b'complete':
|
|
||||||
json_data = json.loads(data)
|
|
||||||
image_url = json_data[0]['url']
|
|
||||||
yield ImageResponse(images=[image_url], alt=prompt)
|
|
||||||
return
|
|
||||||
|
|
@ -17,14 +17,15 @@ class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
working = True
|
working = True
|
||||||
|
|
||||||
default_model = "command-a-03-2025"
|
default_model = "command-a-03-2025"
|
||||||
model_aliases = {
|
models = [
|
||||||
"command-a": default_model,
|
default_model,
|
||||||
"command-r-plus": "command-r-plus-08-2024",
|
"command-r-plus-08-2024",
|
||||||
"command-r": "command-r-08-2024",
|
"command-r-08-2024",
|
||||||
"command-r": "command-r",
|
"command-r-plus",
|
||||||
"command-r7b": "command-r7b-12-2024",
|
"command-r",
|
||||||
}
|
"command-r7b-12-2024",
|
||||||
models = list(model_aliases.keys())
|
"command-r7b-arabic-02-2025",
|
||||||
|
]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_model(cls, model: str, **kwargs) -> str:
|
def get_model(cls, model: str, **kwargs) -> str:
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ from ... import debug
|
||||||
from .DeepseekAI_JanusPro7b import get_zerogpu_token
|
from .DeepseekAI_JanusPro7b import get_zerogpu_token
|
||||||
from .raise_for_status import raise_for_status
|
from .raise_for_status import raise_for_status
|
||||||
|
|
||||||
class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
|
class Microsoft_Phi_4_Multimodal(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
label = "Microsoft Phi-4"
|
label = "Microsoft Phi-4"
|
||||||
space = "microsoft/phi-4-multimodal"
|
space = "microsoft/phi-4-multimodal"
|
||||||
url = f"https://huggingface.co/spaces/{space}"
|
url = f"https://huggingface.co/spaces/{space}"
|
||||||
|
|
@ -29,9 +29,9 @@ class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
default_model = "phi-4-multimodal"
|
default_model = "phi-4-multimodal"
|
||||||
default_vision_model = default_model
|
default_vision_model = default_model
|
||||||
model_aliases = {"phi-4": default_vision_model}
|
vision_models = [default_vision_model]
|
||||||
vision_models = list(model_aliases.keys())
|
|
||||||
models = vision_models
|
models = vision_models
|
||||||
|
model_aliases = {"phi-4": default_vision_model}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation, media: list = None):
|
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation, media: list = None):
|
||||||
|
|
@ -31,7 +31,15 @@ class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"qwen3-1.7b",
|
"qwen3-1.7b",
|
||||||
"qwen3-0.6b",
|
"qwen3-0.6b",
|
||||||
}
|
}
|
||||||
model_aliases = {model: model for model in models}
|
model_aliases = {
|
||||||
|
"qwen-3-235b": default_model,
|
||||||
|
"qwen-3-32b": "qwen3-32b",
|
||||||
|
"qwen-3-30b": "qwen3-30b-a3b",
|
||||||
|
"qwen-3-14b": "qwen3-14b",
|
||||||
|
"qwen-3-4b": "qwen3-4b",
|
||||||
|
"qwen-3-1.7b": "qwen3-1.7b",
|
||||||
|
"qwen-3-0.6b": "qwen3-0.6b",
|
||||||
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,10 @@ class Voodoohop_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
default_model = "voodoohop-flux-1-schnell"
|
default_model = "voodoohop-flux-1-schnell"
|
||||||
default_image_model = default_model
|
default_image_model = default_model
|
||||||
model_aliases = {"flux-schnell": default_model, "flux": default_model}
|
model_aliases = {
|
||||||
|
"flux-schnell": default_image_model,
|
||||||
|
"flux": default_image_model
|
||||||
|
}
|
||||||
image_models = list(model_aliases.keys())
|
image_models = list(model_aliases.keys())
|
||||||
models = image_models
|
models = image_models
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,10 +7,9 @@ from ...errors import ResponseError
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
|
||||||
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
||||||
from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
|
|
||||||
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
||||||
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
|
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
|
||||||
from .Microsoft_Phi_4 import Microsoft_Phi_4
|
from .Microsoft_Phi_4_Multimodal import Microsoft_Phi_4_Multimodal
|
||||||
from .Qwen_QVQ_72B import Qwen_QVQ_72B
|
from .Qwen_QVQ_72B import Qwen_QVQ_72B
|
||||||
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
|
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
|
||||||
from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M
|
from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M
|
||||||
|
|
@ -30,10 +29,9 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
default_vision_model = Qwen_QVQ_72B.default_model
|
default_vision_model = Qwen_QVQ_72B.default_model
|
||||||
providers = [
|
providers = [
|
||||||
BlackForestLabs_Flux1Dev,
|
BlackForestLabs_Flux1Dev,
|
||||||
BlackForestLabs_Flux1Schnell,
|
|
||||||
CohereForAI_C4AI_Command,
|
CohereForAI_C4AI_Command,
|
||||||
DeepseekAI_JanusPro7b,
|
DeepseekAI_JanusPro7b,
|
||||||
Microsoft_Phi_4,
|
Microsoft_Phi_4_Multimodal,
|
||||||
Qwen_QVQ_72B,
|
Qwen_QVQ_72B,
|
||||||
Qwen_Qwen_2_5,
|
Qwen_Qwen_2_5,
|
||||||
Qwen_Qwen_2_5M,
|
Qwen_Qwen_2_5M,
|
||||||
|
|
|
||||||
1355
g4f/Provider/needs_auth/BlackboxPro.py
Normal file
1355
g4f/Provider/needs_auth/BlackboxPro.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -64,9 +64,8 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
default_model = "GigaChat:latest"
|
default_model = "GigaChat"
|
||||||
models = [default_model, "GigaChat-Plus", "GigaChat-Pro"]
|
models = ["GigaChat-2", "GigaChat-2-Pro", "GigaChat-2-Max", default_model, "GigaChat-Pro", "GigaChat-Max"]
|
||||||
model_aliases = {"gigachat": default_model}
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
from .Anthropic import Anthropic
|
from .Anthropic import Anthropic
|
||||||
from .BingCreateImages import BingCreateImages
|
from .BingCreateImages import BingCreateImages
|
||||||
|
from .BlackboxPro import BlackboxPro
|
||||||
from .CablyAI import CablyAI
|
from .CablyAI import CablyAI
|
||||||
from .Cerebras import Cerebras
|
from .Cerebras import Cerebras
|
||||||
from .CopilotAccount import CopilotAccount
|
from .CopilotAccount import CopilotAccount
|
||||||
|
|
|
||||||
|
|
@ -2,13 +2,13 @@ from __future__ import annotations
|
||||||
import json
|
import json
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from ..typing import AsyncResult, Messages, MediaListType
|
from ...typing import AsyncResult, Messages, MediaListType
|
||||||
from ..image import to_bytes, is_accepted_format, to_data_uri
|
from ...image import to_bytes, is_accepted_format, to_data_uri
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from ..requests.raise_for_status import raise_for_status
|
from ...requests.raise_for_status import raise_for_status
|
||||||
from ..providers.response import FinishReason, JsonConversation
|
from ...providers.response import FinishReason, JsonConversation
|
||||||
from .helper import format_prompt, get_last_user_message, format_image_prompt
|
from ..helper import format_prompt, get_last_user_message, format_image_prompt
|
||||||
from ..tools.media import merge_media
|
from ...tools.media import merge_media
|
||||||
|
|
||||||
|
|
||||||
class Conversation(JsonConversation):
|
class Conversation(JsonConversation):
|
||||||
|
|
@ -29,7 +29,7 @@ class AllenAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
login_url = None
|
login_url = None
|
||||||
api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
|
api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
|
||||||
|
|
||||||
working = True
|
working = False
|
||||||
needs_auth = False
|
needs_auth = False
|
||||||
use_nodriver = False
|
use_nodriver = False
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
|
@ -10,16 +10,16 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
has_curl_cffi = False
|
has_curl_cffi = False
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .helper import format_prompt
|
from ..helper import format_prompt
|
||||||
from ..errors import MissingRequirementsError
|
from ...errors import MissingRequirementsError
|
||||||
|
|
||||||
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://chatgpt.es"
|
url = "https://chatgpt.es"
|
||||||
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
|
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
|
||||||
|
|
||||||
working = True
|
working = False
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_system_message = False
|
supports_system_message = False
|
||||||
supports_message_history = False
|
supports_message_history = False
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from .template import OpenaiTemplate
|
from ..template import OpenaiTemplate
|
||||||
|
|
||||||
class FreeRouter(OpenaiTemplate):
|
class FreeRouter(OpenaiTemplate):
|
||||||
label = "CablyAI FreeRouter"
|
label = "CablyAI FreeRouter"
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from .template import OpenaiTemplate
|
from ..template import OpenaiTemplate
|
||||||
|
|
||||||
class Glider(OpenaiTemplate):
|
class Glider(OpenaiTemplate):
|
||||||
label = "Glider"
|
label = "Glider"
|
||||||
url = "https://glider.so"
|
url = "https://glider.so"
|
||||||
api_endpoint = "https://glider.so/api/chat"
|
api_endpoint = "https://glider.so/api/chat"
|
||||||
working = True
|
working = False
|
||||||
|
|
||||||
default_model = 'chat-llama-3-1-70b'
|
default_model = 'chat-llama-3-1-70b'
|
||||||
models = [
|
models = [
|
||||||
|
|
@ -17,7 +17,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
url = "https://rubiks.ai"
|
url = "https://rubiks.ai"
|
||||||
api_endpoint = "https://rubiks.ai/search/api/"
|
api_endpoint = "https://rubiks.ai/search/api/"
|
||||||
|
|
||||||
working = True
|
working = False
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
|
||||||
|
|
@ -5,15 +5,19 @@ from .AiChats import AiChats
|
||||||
from .Airforce import Airforce
|
from .Airforce import Airforce
|
||||||
from .AutonomousAI import AutonomousAI
|
from .AutonomousAI import AutonomousAI
|
||||||
from .AIUncensored import AIUncensored
|
from .AIUncensored import AIUncensored
|
||||||
|
from .AllenAI import AllenAI
|
||||||
from .AmigoChat import AmigoChat
|
from .AmigoChat import AmigoChat
|
||||||
from .Aura import Aura
|
from .Aura import Aura
|
||||||
from .Chatgpt4o import Chatgpt4o
|
from .Chatgpt4o import Chatgpt4o
|
||||||
from .Chatgpt4Online import Chatgpt4Online
|
from .Chatgpt4Online import Chatgpt4Online
|
||||||
|
from .ChatGptEs import ChatGptEs
|
||||||
from .ChatgptFree import ChatgptFree
|
from .ChatgptFree import ChatgptFree
|
||||||
from .ChatGptt import ChatGptt
|
from .ChatGptt import ChatGptt
|
||||||
from .DarkAI import DarkAI
|
from .DarkAI import DarkAI
|
||||||
from .FlowGpt import FlowGpt
|
from .FlowGpt import FlowGpt
|
||||||
from .FreeNetfly import FreeNetfly
|
from .FreeNetfly import FreeNetfly
|
||||||
|
from .FreeRouter import FreeRouter
|
||||||
|
from .Glider import Glider
|
||||||
from .GPROChat import GPROChat
|
from .GPROChat import GPROChat
|
||||||
from .Koala import Koala
|
from .Koala import Koala
|
||||||
from .MagickPen import MagickPen
|
from .MagickPen import MagickPen
|
||||||
|
|
|
||||||
664
g4f/models.py
664
g4f/models.py
File diff suppressed because it is too large
Load diff
|
|
@ -10,7 +10,7 @@ from ..Provider.hf_space import HuggingSpace
|
||||||
from .. import Provider
|
from .. import Provider
|
||||||
from .. import models
|
from .. import models
|
||||||
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
|
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
|
||||||
from ..Provider import Microsoft_Phi_4, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
|
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown, HarProvider
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
|
||||||
class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
@ -100,7 +100,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
cls.image_models.extend([clean_name(model) for model in provider.image_models])
|
cls.image_models.extend([clean_name(model) for model in provider.image_models])
|
||||||
cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
|
cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
|
||||||
cls.video_models.extend([clean_name(model) for model in provider.video_models])
|
cls.video_models.extend([clean_name(model) for model in provider.video_models])
|
||||||
for provider in [Microsoft_Phi_4, PollinationsAI]:
|
for provider in [Microsoft_Phi_4_Multimodal, PollinationsAI]:
|
||||||
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
|
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
|
||||||
cls.audio_models.update(provider.audio_models)
|
cls.audio_models.update(provider.audio_models)
|
||||||
cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)})
|
cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)})
|
||||||
|
|
@ -137,7 +137,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
if "audio" in kwargs or "audio" in kwargs.get("modalities", []):
|
if "audio" in kwargs or "audio" in kwargs.get("modalities", []):
|
||||||
providers = [PollinationsAI, EdgeTTS, gTTS]
|
providers = [PollinationsAI, EdgeTTS, gTTS]
|
||||||
elif has_audio:
|
elif has_audio:
|
||||||
providers = [PollinationsAI, Microsoft_Phi_4, MarkItDown]
|
providers = [PollinationsAI, Microsoft_Phi_4_Multimodal, MarkItDown]
|
||||||
elif has_image:
|
elif has_image:
|
||||||
providers = models.default_vision.best_provider.providers
|
providers = models.default_vision.best_provider.providers
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue