mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
- Changed default model in commit.py from "gpt-4o" to "claude-3.7-sonnet" - Fixed ARTA provider by adding proper auth token handling and form data submission - Updated Blackbox provider to use OpenRouter models instead of premium models - Improved DDG provider with simplified authentication and better error handling - Updated DeepInfraChat provider with new models and aliases - Removed non-working providers: Goabror, Jmuz, OIVSCode, AllenAI, ChatGptEs, FreeRouter, Glider - Moved non-working providers to the not_working directory - Added BlackboxPro provider in needs_auth directory with premium model support - Updated Liaobots provider with new models and improved authentication - Renamed Microsoft_Phi_4 to Microsoft_Phi_4_Multimodal for clarity - Updated LambdaChat provider with direct API implementation instead of HuggingChat - Updated models.py with new model definitions and provider mappings - Removed BlackForestLabs_Flux1Schnell from HuggingSpace providers - Updated model aliases across multiple providers for better compatibility - Fixed Dynaspark provider endpoint URL to prevent spam detection
190 lines
8 KiB
Python
190 lines
8 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
import re
|
|
import uuid
|
|
from aiohttp import ClientSession, FormData
|
|
|
|
from ..typing import AsyncResult, Messages
|
|
from ..requests import raise_for_status
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
from .helper import format_prompt, get_last_user_message
|
|
from ..providers.response import JsonConversation, TitleGeneration, Reasoning, FinishReason
|
|
|
|
class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|
label = "Lambda Chat"
|
|
url = "https://lambda.chat"
|
|
conversation_url = f"{url}/conversation"
|
|
|
|
working = True
|
|
|
|
default_model = "deepseek-llama3.3-70b"
|
|
reasoning_model = "deepseek-r1"
|
|
models = [
|
|
default_model,
|
|
reasoning_model,
|
|
"hermes-3-llama-3.1-405b-fp8",
|
|
"hermes3-405b-fp8-128k",
|
|
"llama3.1-nemotron-70b-instruct",
|
|
"lfm-40b",
|
|
"llama3.3-70b-instruct-fp8",
|
|
"qwen25-coder-32b-instruct"
|
|
]
|
|
model_aliases = {
|
|
"deepseek-v3": default_model,
|
|
"hermes-3": "hermes-3-llama-3.1-405b-fp8",
|
|
"hermes-3-405b": "hermes3-405b-fp8-128k",
|
|
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
|
|
"qwen-2.5-coder-32b": "qwen25-coder-32b-instruct"
|
|
}
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls, model: str, messages: Messages,
|
|
api_key: str = None,
|
|
proxy: str = None,
|
|
cookies: dict = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
model = cls.get_model(model)
|
|
headers = {
|
|
"Origin": cls.url,
|
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
|
|
"Accept": "*/*",
|
|
"Accept-Language": "en-US,en;q=0.9",
|
|
"Referer": cls.url,
|
|
"Sec-Fetch-Dest": "empty",
|
|
"Sec-Fetch-Mode": "cors",
|
|
"Sec-Fetch-Site": "same-origin",
|
|
"Priority": "u=1, i",
|
|
"Pragma": "no-cache",
|
|
"Cache-Control": "no-cache"
|
|
}
|
|
|
|
# Initialize cookies if not provided
|
|
if cookies is None:
|
|
cookies = {
|
|
"hf-chat": str(uuid.uuid4()) # Generate a session ID
|
|
}
|
|
|
|
async with ClientSession(headers=headers, cookies=cookies) as session:
|
|
# Step 1: Create a new conversation
|
|
data = {"model": model}
|
|
async with session.post(cls.conversation_url, json=data, proxy=proxy) as response:
|
|
await raise_for_status(response)
|
|
conversation_response = await response.json()
|
|
conversation_id = conversation_response["conversationId"]
|
|
|
|
# Update cookies with any new ones from the response
|
|
for cookie_name, cookie in response.cookies.items():
|
|
cookies[cookie_name] = cookie.value
|
|
|
|
# Step 2: Get data for this conversation to extract message ID
|
|
async with session.get(
|
|
f"{cls.conversation_url}/{conversation_id}/__data.json?x-sveltekit-invalidated=11",
|
|
proxy=proxy
|
|
) as response:
|
|
await raise_for_status(response)
|
|
response_text = await response.text()
|
|
|
|
# Update cookies again
|
|
for cookie_name, cookie in response.cookies.items():
|
|
cookies[cookie_name] = cookie.value
|
|
|
|
# Parse the JSON response to find the message ID
|
|
message_id = None
|
|
try:
|
|
# Try to parse each line as JSON
|
|
for line in response_text.splitlines():
|
|
if not line.strip():
|
|
continue
|
|
|
|
try:
|
|
data_json = json.loads(line)
|
|
if "type" in data_json and data_json["type"] == "data" and "nodes" in data_json:
|
|
for node in data_json["nodes"]:
|
|
if "type" in node and node["type"] == "data" and "data" in node:
|
|
# Look for system message ID
|
|
for item in node["data"]:
|
|
if isinstance(item, dict) and "id" in item and "from" in item and item.get("from") == "system":
|
|
message_id = item["id"]
|
|
break
|
|
|
|
# If we found the ID, break out of the loop
|
|
if message_id:
|
|
break
|
|
except json.JSONDecodeError:
|
|
continue
|
|
|
|
# If we still don't have a message ID, try to find any UUID in the response
|
|
if not message_id:
|
|
uuid_pattern = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
|
|
uuids = re.findall(uuid_pattern, response_text)
|
|
if uuids:
|
|
message_id = uuids[0]
|
|
|
|
if not message_id:
|
|
raise ValueError("Could not find message ID in response")
|
|
|
|
except (IndexError, KeyError, ValueError) as e:
|
|
raise RuntimeError(f"Failed to parse conversation data: {str(e)}")
|
|
|
|
# Step 3: Send the user message
|
|
user_message = get_last_user_message(messages)
|
|
|
|
# Prepare form data exactly as in the curl example
|
|
form_data = FormData()
|
|
form_data.add_field(
|
|
"data",
|
|
json.dumps({
|
|
"inputs": user_message,
|
|
"id": message_id,
|
|
"is_retry": False,
|
|
"is_continue": False,
|
|
"web_search": False,
|
|
"tools": []
|
|
}),
|
|
content_type="application/json"
|
|
)
|
|
|
|
async with session.post(
|
|
f"{cls.conversation_url}/{conversation_id}",
|
|
data=form_data,
|
|
proxy=proxy
|
|
) as response:
|
|
await raise_for_status(response)
|
|
|
|
async for chunk in response.content:
|
|
if not chunk:
|
|
continue
|
|
|
|
chunk_str = chunk.decode('utf-8', errors='ignore')
|
|
|
|
try:
|
|
data = json.loads(chunk_str)
|
|
except json.JSONDecodeError:
|
|
continue
|
|
|
|
# Handling different types of responses
|
|
if data.get("type") == "stream" and "token" in data:
|
|
# Remove null characters from the token
|
|
token = data["token"].replace("\u0000", "")
|
|
if token:
|
|
yield token
|
|
elif data.get("type") == "title":
|
|
yield TitleGeneration(data.get("title", ""))
|
|
elif data.get("type") == "reasoning":
|
|
subtype = data.get("subtype")
|
|
token = data.get("token", "").replace("\u0000", "")
|
|
status = data.get("status", "")
|
|
|
|
if subtype == "stream" and token:
|
|
yield Reasoning(token=token)
|
|
elif subtype == "status" and status:
|
|
yield Reasoning(status=status)
|
|
elif data.get("type") == "finalAnswer":
|
|
yield FinishReason("stop")
|
|
break
|
|
elif data.get("type") == "status" and data.get("status") == "keepAlive":
|
|
# Just a keepalive, ignore
|
|
continue
|