mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
- Changed default model in commit.py from "gpt-4o" to "claude-3.7-sonnet" - Fixed ARTA provider by adding proper auth token handling and form data submission - Updated Blackbox provider to use OpenRouter models instead of premium models - Improved DDG provider with simplified authentication and better error handling - Updated DeepInfraChat provider with new models and aliases - Removed non-working providers: Goabror, Jmuz, OIVSCode, AllenAI, ChatGptEs, FreeRouter, Glider - Moved non-working providers to the not_working directory - Added BlackboxPro provider in needs_auth directory with premium model support - Updated Liaobots provider with new models and improved authentication - Renamed Microsoft_Phi_4 to Microsoft_Phi_4_Multimodal for clarity - Updated LambdaChat provider with direct API implementation instead of HuggingChat - Updated models.py with new model definitions and provider mappings - Removed BlackForestLabs_Flux1Schnell from HuggingSpace providers - Updated model aliases across multiple providers for better compatibility - Fixed Dynaspark provider endpoint URL to prevent spam detection
137 lines
5.8 KiB
Python
137 lines
5.8 KiB
Python
from __future__ import annotations
|
|
|
|
import aiohttp
|
|
import json
|
|
import uuid
|
|
|
|
from ...typing import AsyncResult, Messages
|
|
from ...providers.response import Reasoning, JsonConversation
|
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
from ..helper import get_last_user_message
|
|
from ... import debug
|
|
|
|
|
|
class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
|
|
label = "Qwen Qwen-3"
|
|
url = "https://qwen-qwen3-demo.hf.space"
|
|
api_endpoint = "https://qwen-qwen3-demo.hf.space/gradio_api/queue/join?__theme=system"
|
|
|
|
working = True
|
|
supports_stream = True
|
|
supports_system_message = True
|
|
|
|
default_model = "qwen3-235b-a22b"
|
|
models = {
|
|
default_model,
|
|
"qwen3-32b",
|
|
"qwen3-30b-a3b",
|
|
"qwen3-14b",
|
|
"qwen3-8b",
|
|
"qwen3-4b",
|
|
"qwen3-1.7b",
|
|
"qwen3-0.6b",
|
|
}
|
|
model_aliases = {
|
|
"qwen-3-235b": default_model,
|
|
"qwen-3-32b": "qwen3-32b",
|
|
"qwen-3-30b": "qwen3-30b-a3b",
|
|
"qwen-3-14b": "qwen3-14b",
|
|
"qwen-3-4b": "qwen3-4b",
|
|
"qwen-3-1.7b": "qwen3-1.7b",
|
|
"qwen-3-0.6b": "qwen3-0.6b",
|
|
}
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
proxy: str = None,
|
|
conversation: JsonConversation = None,
|
|
thinking_budget: int = 38,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
if conversation is None:
|
|
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', ''))
|
|
|
|
headers_join = {
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
|
|
'Accept': '*/*',
|
|
'Accept-Language': 'en-US,en;q=0.5',
|
|
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
|
'Referer': f'{cls.url}/?__theme=system',
|
|
'content-type': 'application/json',
|
|
'Origin': cls.url,
|
|
'Connection': 'keep-alive',
|
|
'Sec-Fetch-Dest': 'empty',
|
|
'Sec-Fetch-Mode': 'cors',
|
|
'Sec-Fetch-Site': 'same-origin',
|
|
'Pragma': 'no-cache',
|
|
'Cache-Control': 'no-cache',
|
|
}
|
|
|
|
sys_prompt = "\n".join([message['content'] for message in messages if message['role'] == 'system'])
|
|
sys_prompt = sys_prompt if sys_prompt else "You are a helpful and harmless assistant."
|
|
|
|
payload_join = {"data": [
|
|
get_last_user_message(messages),
|
|
{"thinking_budget": thinking_budget, "model": cls.get_model(model), "sys_prompt": sys_prompt}, None, None],
|
|
"event_data": None, "fn_index": 13, "trigger_id": 31, "session_hash": conversation.session_hash
|
|
}
|
|
|
|
async with aiohttp.ClientSession() as session:
|
|
# Send join request
|
|
async with session.post(cls.api_endpoint, headers=headers_join, json=payload_join) as response:
|
|
(await response.json())['event_id']
|
|
|
|
# Prepare data stream request
|
|
url_data = f'{cls.url}/gradio_api/queue/data'
|
|
|
|
headers_data = {
|
|
'Accept': 'text/event-stream',
|
|
'Accept-Language': 'en-US,en;q=0.5',
|
|
'Referer': f'{cls.url}/?__theme=system',
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:136.0) Gecko/20100101 Firefox/136.0',
|
|
}
|
|
|
|
params_data = {
|
|
'session_hash': conversation.session_hash,
|
|
}
|
|
|
|
# Send data stream request
|
|
async with session.get(url_data, headers=headers_data, params=params_data) as response:
|
|
is_thinking = False
|
|
async for line in response.content:
|
|
decoded_line = line.decode('utf-8')
|
|
if decoded_line.startswith('data: '):
|
|
try:
|
|
json_data = json.loads(decoded_line[6:])
|
|
|
|
# Look for generation stages
|
|
if json_data.get('msg') == 'process_generating':
|
|
if 'output' in json_data and 'data' in json_data['output'] and len(
|
|
json_data['output']['data']) > 5:
|
|
updates = json_data['output']['data'][5]
|
|
for update in updates:
|
|
if isinstance(update[2], dict):
|
|
if update[2].get('type') == 'tool':
|
|
yield Reasoning(update[2].get('content'),
|
|
status=update[2].get('options', {}).get('title'))
|
|
is_thinking = True
|
|
elif update[2].get('type') == 'text':
|
|
yield Reasoning(update[2].get('content'))
|
|
is_thinking = False
|
|
elif isinstance(update, list) and isinstance(update[1], list) and len(
|
|
update[1]) > 4:
|
|
if update[1][4] == "content":
|
|
yield Reasoning(update[2]) if is_thinking else update[2]
|
|
elif update[1][4] == "options":
|
|
if update[2] != "done":
|
|
yield Reasoning(status=update[2])
|
|
is_thinking = False
|
|
# Check for completion
|
|
if json_data.get('msg') == 'process_completed':
|
|
break
|
|
|
|
except json.JSONDecodeError:
|
|
debug.log("Could not parse JSON:", decoded_line)
|