mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Compare commits
25 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
957d73a76e | ||
|
|
468dc7bd67 | ||
|
|
1fd9b8d116 | ||
|
|
ed84c2dc6b | ||
|
|
cda4634d34 | ||
|
|
1b3628dfee | ||
|
|
0d9c00b4e3 | ||
|
|
21113c51a6 | ||
|
|
098b2401ea | ||
|
|
04e300d7a6 | ||
|
|
c364425250 | ||
|
|
f57663cbe8 | ||
|
|
da4d7d118d | ||
|
|
07883bc9f0 | ||
|
|
f0ea4c5b95 | ||
|
|
7b32f89eca | ||
|
|
d76e56a66f | ||
|
|
6be76e3e84 | ||
|
|
688640b764 | ||
|
|
2e6d417d02 | ||
|
|
7771cf3d43 | ||
|
|
32215bb7bb | ||
|
|
05c108d3f6 | ||
|
|
18fda760cb | ||
|
|
9c7fc9fe4a |
20 changed files with 1104 additions and 349 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -9,3 +9,4 @@ g4f.egg-info
|
|||
models/models.json
|
||||
pyvenv.cfg
|
||||
lib64
|
||||
/.idea
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
g4f-slim:
|
||||
container_name: g4f-slim
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
gpt4free:
|
||||
image: hlohaus789/g4f:latest
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ RUN if [ "$G4F_VERSION" = "" ] ; then \
|
|||
RUN apt-get -qqy update \
|
||||
&& apt-get -qqy upgrade \
|
||||
&& apt-get -qyy autoremove \
|
||||
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac libavcodec-extra build-essential python3-dev \
|
||||
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac libavcodec-extra \
|
||||
&& apt-get -qyy remove openjdk-11-jre-headless \
|
||||
&& apt-get -qyy autoremove \
|
||||
&& apt-get -qyy clean \
|
||||
|
|
|
|||
107
g4f/Provider/GradientNetwork.py
Normal file
107
g4f/Provider/GradientNetwork.py
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..providers.response import Reasoning, JsonResponse
|
||||
from ..requests import StreamSession
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
|
||||
class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""
|
||||
Provider for chat.gradient.network
|
||||
Supports streaming text generation with Qwen and GPT OSS models.
|
||||
"""
|
||||
label = "Gradient Network"
|
||||
url = "https://chat.gradient.network"
|
||||
api_endpoint = "https://chat.gradient.network/api/generate"
|
||||
|
||||
working = True
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = "GPT OSS 120B"
|
||||
models = [
|
||||
default_model,
|
||||
"GPT OSS 120B",
|
||||
]
|
||||
model_aliases = {
|
||||
"qwen-3-235b": "Qwen3 235B",
|
||||
"qwen3-235b": "Qwen3 235B",
|
||||
"gpt-oss-120b": "GPT OSS 120B",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
enable_thinking: bool = True,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
"""
|
||||
Create an async generator for streaming chat responses.
|
||||
|
||||
Args:
|
||||
model: The model name to use
|
||||
messages: List of message dictionaries
|
||||
proxy: Optional proxy URL
|
||||
enable_thinking: Enable the thinking/analysis channel (maps to enableThinking in API)
|
||||
**kwargs: Additional arguments
|
||||
|
||||
Yields:
|
||||
str: Content chunks from the response
|
||||
Reasoning: Reasoning content when enable_thinking is True
|
||||
"""
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
"Accept": "application/x-ndjson",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Referer": f"{cls.url}/",
|
||||
}
|
||||
|
||||
payload = {
|
||||
"clusterMode": "nvidia" if "GPT OSS" in model else "hybrid",
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
}
|
||||
if enable_thinking:
|
||||
payload["enableThinking"] = enable_thinking
|
||||
async with StreamSession(headers=headers, proxy=proxy, impersonate="chrome") as session:
|
||||
async with session.post(
|
||||
cls.api_endpoint,
|
||||
json=payload,
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
async for line in response.iter_lines():
|
||||
if not line:
|
||||
continue
|
||||
|
||||
try:
|
||||
data = json.loads(line)
|
||||
yield JsonResponse.from_dict(data)
|
||||
msg_type = data.get("type")
|
||||
|
||||
if msg_type == "reply":
|
||||
# Response chunks with content or reasoningContent
|
||||
reply_data = data.get("data", {})
|
||||
content = reply_data.get("content")
|
||||
reasoning_content = reply_data.get("reasoningContent")
|
||||
|
||||
if reasoning_content:
|
||||
yield Reasoning(reasoning_content)
|
||||
if content:
|
||||
yield content
|
||||
|
||||
# Skip clusterInfo and blockUpdate GPU visualization messages
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# Skip non-JSON lines (may be partial data or empty)
|
||||
raise
|
||||
46
g4f/Provider/ItalyGPT.py
Normal file
46
g4f/Provider/ItalyGPT.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests import DEFAULT_HEADERS
|
||||
from aiohttp import ClientSession
|
||||
|
||||
class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "ItalyGPT"
|
||||
url = "https://italygpt.it"
|
||||
working = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = "gpt-4o"
|
||||
models = [default_model]
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = True,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
headers = {
|
||||
**DEFAULT_HEADERS,
|
||||
"content-type": "application/json",
|
||||
"origin": "https://italygpt.it",
|
||||
"referer": "https://italygpt.it/",
|
||||
}
|
||||
payload = {
|
||||
"messages": messages,
|
||||
"stream": stream,
|
||||
}
|
||||
async with ClientSession() as session:
|
||||
async with session.post(
|
||||
f"{cls.url}/api/chat",
|
||||
json=payload,
|
||||
headers=headers,
|
||||
proxy=proxy,
|
||||
) as resp:
|
||||
resp.raise_for_status()
|
||||
async for chunk in resp.content.iter_any():
|
||||
if chunk:
|
||||
yield chunk.decode()
|
||||
|
|
@ -1,22 +1,29 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import datetime
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import mimetypes
|
||||
import re
|
||||
import uuid
|
||||
from time import time
|
||||
from typing import Literal, Optional
|
||||
from typing import Literal, Optional, Dict
|
||||
from urllib.parse import quote
|
||||
|
||||
import aiohttp
|
||||
from ..errors import RateLimitError, ResponseError
|
||||
from ..typing import AsyncResult, Messages, MediaListType
|
||||
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
|
||||
from ..requests import sse_stream
|
||||
from ..tools.media import merge_media
|
||||
|
||||
from g4f.image import to_bytes, detect_file_type
|
||||
from g4f.requests import raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import get_last_user_message
|
||||
from .. import debug
|
||||
from ..errors import RateLimitError, ResponseError
|
||||
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
|
||||
from ..requests import sse_stream
|
||||
from ..requests.aiohttp import StreamSession
|
||||
from ..tools.media import merge_media
|
||||
from ..typing import AsyncResult, Messages, MediaListType
|
||||
|
||||
try:
|
||||
import curl_cffi
|
||||
|
|
@ -25,6 +32,56 @@ try:
|
|||
except ImportError:
|
||||
has_curl_cffi = False
|
||||
|
||||
# Global variables to manage Qwen Image Cache
|
||||
ImagesCache: Dict[str, dict] = {}
|
||||
|
||||
|
||||
def get_oss_headers(method: str, date_str: str, sts_data: dict, content_type: str) -> dict[str, str]:
|
||||
bucket_name = sts_data.get('bucketname', 'qwen-webui-prod')
|
||||
file_path = sts_data.get('file_path', '')
|
||||
access_key_id = sts_data.get('access_key_id')
|
||||
access_key_secret = sts_data.get('access_key_secret')
|
||||
security_token = sts_data.get('security_token')
|
||||
headers = {
|
||||
'Content-Type': content_type,
|
||||
'x-oss-content-sha256': 'UNSIGNED-PAYLOAD',
|
||||
'x-oss-date': date_str,
|
||||
'x-oss-security-token': security_token,
|
||||
'x-oss-user-agent': 'aliyun-sdk-js/6.23.0 Chrome 132.0.0.0 on Windows 10 64-bit'
|
||||
}
|
||||
headers_lower = {k.lower(): v for k, v in headers.items()}
|
||||
|
||||
canonical_headers_list = []
|
||||
signed_headers_list = []
|
||||
required_headers = ['content-md5', 'content-type', 'x-oss-content-sha256', 'x-oss-date', 'x-oss-security-token',
|
||||
'x-oss-user-agent']
|
||||
for header_name in sorted(required_headers):
|
||||
if header_name in headers_lower:
|
||||
canonical_headers_list.append(f"{header_name}:{headers_lower[header_name]}")
|
||||
signed_headers_list.append(header_name)
|
||||
|
||||
canonical_headers = '\n'.join(canonical_headers_list) + '\n'
|
||||
canonical_uri = f"/{bucket_name}/{quote(file_path, safe='/')}"
|
||||
|
||||
canonical_request = f"{method}\n{canonical_uri}\n\n{canonical_headers}\n\nUNSIGNED-PAYLOAD"
|
||||
|
||||
date_parts = date_str.split('T')
|
||||
date_scope = f"{date_parts[0]}/ap-southeast-1/oss/aliyun_v4_request"
|
||||
string_to_sign = f"OSS4-HMAC-SHA256\n{date_str}\n{date_scope}\n{hashlib.sha256(canonical_request.encode()).hexdigest()}"
|
||||
|
||||
def sign(key, msg):
|
||||
return hmac.new(key, msg.encode() if isinstance(msg, str) else msg, hashlib.sha256).digest()
|
||||
|
||||
date_key = sign(f"aliyun_v4{access_key_secret}".encode(), date_parts[0])
|
||||
region_key = sign(date_key, "ap-southeast-1")
|
||||
service_key = sign(region_key, "oss")
|
||||
signing_key = sign(service_key, "aliyun_v4_request")
|
||||
signature = hmac.new(signing_key, string_to_sign.encode(), hashlib.sha256).hexdigest()
|
||||
|
||||
headers['authorization'] = f"OSS4-HMAC-SHA256 Credential={access_key_id}/{date_scope},Signature={signature}"
|
||||
return headers
|
||||
|
||||
|
||||
text_models = [
|
||||
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
|
||||
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwq-32b', 'qwen-turbo-2025-02-11',
|
||||
|
|
@ -60,19 +117,19 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
active_by_default = True
|
||||
supports_stream = True
|
||||
supports_message_history = False
|
||||
|
||||
image_cache = True
|
||||
_models_loaded = True
|
||||
image_models = image_models
|
||||
text_models = text_models
|
||||
vision_models = vision_models
|
||||
models = models
|
||||
models: list[str] = models
|
||||
default_model = "qwen3-235b-a22b"
|
||||
|
||||
_midtoken: str = None
|
||||
_midtoken_uses: int = 0
|
||||
|
||||
@classmethod
|
||||
def get_models(cls) -> list[str]:
|
||||
def get_models(cls, **kwargs) -> list[str]:
|
||||
if not cls._models_loaded and has_curl_cffi:
|
||||
response = curl_cffi.get(f"{cls.url}/api/models")
|
||||
if response.ok:
|
||||
|
|
@ -97,34 +154,106 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def prepare_files(cls, media, chat_type="")->list:
|
||||
async def prepare_files(cls, media, session: aiohttp.ClientSession, headers=None) -> list:
|
||||
if headers is None:
|
||||
headers = {}
|
||||
files = []
|
||||
for _file, file_name in media:
|
||||
file_type, _ = mimetypes.guess_type(file_name)
|
||||
file_class: Literal["default", "vision", "video", "audio", "document"] = "default"
|
||||
_type: Literal["file", "image", "video", "audio"] = "file"
|
||||
showType: Literal["file", "image", "video", "audio"] = "file"
|
||||
for index, (_file, file_name) in enumerate(media):
|
||||
|
||||
if isinstance(_file, str) and _file.startswith('http'):
|
||||
if chat_type == "image_edit" or (file_type and file_type.startswith("image")):
|
||||
file_class = "vision"
|
||||
data_bytes = to_bytes(_file)
|
||||
# Check Cache
|
||||
hasher = hashlib.md5()
|
||||
hasher.update(data_bytes)
|
||||
image_hash = hasher.hexdigest()
|
||||
file = ImagesCache.get(image_hash)
|
||||
if cls.image_cache and file:
|
||||
debug.log("Using cached image")
|
||||
files.append(file)
|
||||
continue
|
||||
|
||||
extension, file_type = detect_file_type(data_bytes)
|
||||
file_name = file_name or f"file-{len(data_bytes)}{extension}"
|
||||
file_size = len(data_bytes)
|
||||
|
||||
# Get File Url
|
||||
async with session.post(
|
||||
f'{cls.url}/api/v2/files/getstsToken',
|
||||
json={"filename": file_name,
|
||||
"filesize": file_size, "filetype": file_type},
|
||||
headers=headers
|
||||
|
||||
) as r:
|
||||
await raise_for_status(r, "Create file failed")
|
||||
res_data = await r.json()
|
||||
data = res_data.get("data")
|
||||
|
||||
if res_data["success"] is False:
|
||||
raise RateLimitError(f"{data['code']}:{data['details']}")
|
||||
file_url = data.get("file_url")
|
||||
file_id = data.get("file_id")
|
||||
|
||||
# Put File into Url
|
||||
str_date = datetime.datetime.now(datetime.UTC).strftime('%Y%m%dT%H%M%SZ')
|
||||
headers = get_oss_headers('PUT', str_date, data, file_type)
|
||||
async with session.put(
|
||||
file_url.split("?")[0],
|
||||
data=data_bytes,
|
||||
headers=headers
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
file_class: Literal["default", "vision", "video", "audio", "document"]
|
||||
_type: Literal["file", "image", "video", "audio"]
|
||||
show_type: Literal["file", "image", "video", "audio"]
|
||||
if "image" in file_type:
|
||||
_type = "image"
|
||||
if not file_type:
|
||||
# Try to infer from file extension, fallback to generic
|
||||
ext = file_name.split('.')[-1].lower() if '.' in file_name else ''
|
||||
file_type = mimetypes.types_map.get(f'.{ext}', 'application/octet-stream')
|
||||
showType = "image"
|
||||
show_type = "image"
|
||||
file_class = "vision"
|
||||
elif "video" in file_type:
|
||||
_type = "video"
|
||||
show_type = "video"
|
||||
file_class = "video"
|
||||
elif "audio" in file_type:
|
||||
_type = "audio"
|
||||
show_type = "audio"
|
||||
file_class = "audio"
|
||||
else:
|
||||
_type = "file"
|
||||
show_type = "file"
|
||||
file_class = "document"
|
||||
|
||||
files.append(
|
||||
{
|
||||
file = {
|
||||
"type": _type,
|
||||
"file": {
|
||||
"created_at": int(time() * 1000),
|
||||
"data": {},
|
||||
"filename": file_name,
|
||||
"hash": None,
|
||||
"id": file_id,
|
||||
"meta": {
|
||||
"name": file_name,
|
||||
"size": file_size,
|
||||
"content_type": file_type
|
||||
},
|
||||
"update_at": int(time() * 1000),
|
||||
},
|
||||
"id": file_id,
|
||||
"url": file_url,
|
||||
"name": file_name,
|
||||
"collection_name": "",
|
||||
"progress": 0,
|
||||
"status": "uploaded",
|
||||
"greenNet": "success",
|
||||
"size": file_size,
|
||||
"error": "",
|
||||
"itemId": str(uuid.uuid4()),
|
||||
"file_type": file_type,
|
||||
"showType": showType,
|
||||
"showType": show_type,
|
||||
"file_class": file_class,
|
||||
"url": _file
|
||||
"uploadTaskId": str(uuid.uuid4())
|
||||
}
|
||||
)
|
||||
ImagesCache[image_hash] = file
|
||||
files.append(file)
|
||||
return files
|
||||
|
||||
@classmethod
|
||||
|
|
@ -135,7 +264,6 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
media: MediaListType = None,
|
||||
conversation: JsonConversation = None,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
stream: bool = True,
|
||||
enable_thinking: bool = True,
|
||||
chat_type: Literal[
|
||||
|
|
@ -157,7 +285,7 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"""
|
||||
|
||||
model_name = cls.get_model(model)
|
||||
|
||||
token = kwargs.get("token")
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
|
||||
'Accept': '*/*',
|
||||
|
|
@ -169,13 +297,24 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
'Sec-Fetch-Mode': 'cors',
|
||||
'Sec-Fetch-Site': 'same-origin',
|
||||
'Connection': 'keep-alive',
|
||||
'Authorization': 'Bearer',
|
||||
'Authorization': f'Bearer {token}' if token else "Bearer",
|
||||
'Source': 'web'
|
||||
}
|
||||
|
||||
prompt = get_last_user_message(messages)
|
||||
|
||||
async with aiohttp.ClientSession(headers=headers) as session:
|
||||
_timeout = kwargs.get("timeout")
|
||||
if isinstance(_timeout, aiohttp.ClientTimeout):
|
||||
timeout = _timeout
|
||||
else:
|
||||
total = float(_timeout) if isinstance(_timeout, (int, float)) else 5 * 60
|
||||
timeout = aiohttp.ClientTimeout(total=total)
|
||||
async with StreamSession(headers=headers) as session:
|
||||
try:
|
||||
async with session.get('https://chat.qwen.ai/api/v1/auths/', proxy=proxy) as user_info_res:
|
||||
user_info_res.raise_for_status()
|
||||
debug.log(await user_info_res.json())
|
||||
except:
|
||||
...
|
||||
for attempt in range(5):
|
||||
try:
|
||||
if not cls._midtoken:
|
||||
|
|
@ -221,7 +360,8 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
files = []
|
||||
media = list(merge_media(media, messages))
|
||||
if media:
|
||||
files = await cls.prepare_files(media, chat_type=chat_type)
|
||||
files = await cls.prepare_files(media, session=session,
|
||||
headers=req_headers)
|
||||
|
||||
msg_payload = {
|
||||
"stream": stream,
|
||||
|
|
|
|||
|
|
@ -1,41 +1,51 @@
|
|||
import asyncio
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
import re
|
||||
import os
|
||||
import asyncio
|
||||
|
||||
import aiohttp
|
||||
|
||||
from ..typing import AsyncResult, Messages, Optional, Dict, Any, List
|
||||
from ..providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..providers.response import Reasoning, PlainTextResponse, PreviewResponse, JsonConversation, ImageResponse, ProviderInfo
|
||||
from ..errors import RateLimitError, ProviderException, MissingAuthError
|
||||
from .helper import get_last_user_message
|
||||
from .yupp.models import YuppModelManager
|
||||
from ..cookies import get_cookies
|
||||
from ..debug import log
|
||||
from ..errors import RateLimitError, ProviderException, MissingAuthError
|
||||
from ..image import is_accepted_format, to_bytes
|
||||
from ..providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..providers.response import Reasoning, PlainTextResponse, PreviewResponse, JsonConversation, ImageResponse, \
|
||||
ProviderInfo, FinishReason, JsonResponse
|
||||
from ..requests.aiohttp import StreamSession
|
||||
from ..tools.auth import AuthManager
|
||||
from ..tools.media import merge_media
|
||||
from ..image import is_accepted_format, to_bytes
|
||||
from .yupp.models import YuppModelManager
|
||||
from .helper import get_last_user_message
|
||||
from ..debug import log
|
||||
from ..typing import AsyncResult, Messages, Optional, Dict, Any, List
|
||||
|
||||
# Global variables to manage Yupp accounts
|
||||
YUPP_ACCOUNTS: List[Dict[str, Any]] = []
|
||||
YUPP_ACCOUNT = Dict[str, Any]
|
||||
YUPP_ACCOUNTS: List[YUPP_ACCOUNT] = []
|
||||
account_rotation_lock = asyncio.Lock()
|
||||
|
||||
# Global variables to manage Yupp Image Cache
|
||||
ImagesCache: Dict[str, dict] = {}
|
||||
|
||||
|
||||
class YuppAccount:
|
||||
"""Yupp account representation"""
|
||||
|
||||
def __init__(self, token: str, is_valid: bool = True, error_count: int = 0, last_used: float = 0):
|
||||
self.token = token
|
||||
self.is_valid = is_valid
|
||||
self.error_count = error_count
|
||||
self.last_used = last_used
|
||||
|
||||
|
||||
def load_yupp_accounts(tokens_str: str):
|
||||
"""Load Yupp accounts from token string"""
|
||||
global YUPP_ACCOUNTS
|
||||
if not tokens_str:
|
||||
return
|
||||
|
||||
tokens = [token.strip() for token in tokens_str.split(',') if token.strip()]
|
||||
YUPP_ACCOUNTS = [
|
||||
{
|
||||
|
|
@ -47,6 +57,7 @@ def load_yupp_accounts(tokens_str: str):
|
|||
for token in tokens
|
||||
]
|
||||
|
||||
|
||||
def create_headers() -> Dict[str, str]:
|
||||
"""Create headers for requests"""
|
||||
return {
|
||||
|
|
@ -59,7 +70,8 @@ def create_headers() -> Dict[str, str]:
|
|||
"Sec-Fetch-Site": "same-origin",
|
||||
}
|
||||
|
||||
async def get_best_yupp_account() -> Optional[Dict[str, Any]]:
|
||||
|
||||
async def get_best_yupp_account() -> Optional[YUPP_ACCOUNT]:
|
||||
"""Get the best available Yupp account using smart selection algorithm"""
|
||||
max_error_count = int(os.getenv("MAX_ERROR_COUNT", "3"))
|
||||
error_cooldown = int(os.getenv("ERROR_COOLDOWN", "300"))
|
||||
|
|
@ -93,7 +105,8 @@ async def get_best_yupp_account() -> Optional[Dict[str, Any]]:
|
|||
account["last_used"] = now
|
||||
return account
|
||||
|
||||
async def claim_yupp_reward(session: aiohttp.ClientSession, account: Dict[str, Any], reward_id: str):
|
||||
|
||||
async def claim_yupp_reward(session: aiohttp.ClientSession, account: YUPP_ACCOUNT, reward_id: str):
|
||||
"""Claim Yupp reward asynchronously"""
|
||||
try:
|
||||
log_debug(f"Claiming reward {reward_id}...")
|
||||
|
|
@ -102,6 +115,8 @@ async def claim_yupp_reward(session: aiohttp.ClientSession, account: Dict[str, A
|
|||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||
|
||||
}
|
||||
async with session.post(url, json=payload, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
|
|
@ -113,7 +128,8 @@ async def claim_yupp_reward(session: aiohttp.ClientSession, account: Dict[str, A
|
|||
log_debug(f"Failed to claim reward {reward_id}. Error: {e}")
|
||||
return None
|
||||
|
||||
async def make_chat_private(session: aiohttp.ClientSession, account: Dict[str, Any], chat_id: str) -> bool:
|
||||
|
||||
async def make_chat_private(session: aiohttp.ClientSession, account: YUPP_ACCOUNT, chat_id: str) -> bool:
|
||||
"""Set a Yupp chat's sharing status to PRIVATE"""
|
||||
try:
|
||||
log_debug(f"Setting chat {chat_id} to PRIVATE...")
|
||||
|
|
@ -129,6 +145,8 @@ async def make_chat_private(session: aiohttp.ClientSession, account: Dict[str, A
|
|||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||
|
||||
}
|
||||
|
||||
async with session.post(url, json=payload, headers=headers) as response:
|
||||
|
|
@ -148,6 +166,7 @@ async def make_chat_private(session: aiohttp.ClientSession, account: Dict[str, A
|
|||
log_debug(f"Failed to make chat {chat_id} private: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def log_debug(message: str):
|
||||
"""Debug logging"""
|
||||
if os.getenv("DEBUG_MODE", "false").lower() == "true":
|
||||
|
|
@ -155,6 +174,7 @@ def log_debug(message: str):
|
|||
else:
|
||||
log(f"[Yupp] {message}")
|
||||
|
||||
|
||||
def format_messages_for_yupp(messages: Messages) -> str:
|
||||
"""Format multi-turn conversation for Yupp single-turn format"""
|
||||
if not messages:
|
||||
|
|
@ -191,6 +211,7 @@ def format_messages_for_yupp(messages: Messages) -> str:
|
|||
|
||||
return result
|
||||
|
||||
|
||||
class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""
|
||||
Yupp.ai Provider for g4f
|
||||
|
|
@ -202,6 +223,7 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
working = True
|
||||
active_by_default = True
|
||||
supports_stream = True
|
||||
image_cache = True
|
||||
|
||||
@classmethod
|
||||
def get_models(cls, api_key: str = None, **kwargs) -> List[str]:
|
||||
|
|
@ -218,9 +240,66 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
cls.models_tags = {model.get("name"): manager.processor.generate_tags(model) for model in models}
|
||||
cls.models = [model.get("name") for model in models]
|
||||
cls.image_models = [model.get("name") for model in models if model.get("isImageGeneration")]
|
||||
cls.vision_models = [model.get("name") for model in models if "image/*" in model.get("supportedAttachmentMimeTypes", [])]
|
||||
cls.vision_models = [model.get("name") for model in models if
|
||||
"image/*" in model.get("supportedAttachmentMimeTypes", [])]
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def prepare_files(cls, media, session: aiohttp.ClientSession, account: YUPP_ACCOUNT) -> list:
|
||||
files = []
|
||||
if not media:
|
||||
return files
|
||||
for file, name in media:
|
||||
data = to_bytes(file)
|
||||
hasher = hashlib.md5()
|
||||
hasher.update(data)
|
||||
image_hash = hasher.hexdigest()
|
||||
file = ImagesCache.get(image_hash)
|
||||
if cls.image_cache and file:
|
||||
log_debug("Using cached image")
|
||||
files.append(file)
|
||||
continue
|
||||
presigned_resp = await session.post(
|
||||
"https://yupp.ai/api/trpc/chat.createPresignedURLForUpload?batch=1",
|
||||
json={
|
||||
"0": {"json": {"fileName": name, "fileSize": len(data), "contentType": is_accepted_format(data)}}},
|
||||
headers={"Content-Type": "application/json",
|
||||
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||
|
||||
}
|
||||
)
|
||||
presigned_resp.raise_for_status()
|
||||
upload_info = (await presigned_resp.json())[0]["result"]["data"]["json"]
|
||||
upload_url = upload_info["signedUrl"]
|
||||
|
||||
await session.put(
|
||||
upload_url,
|
||||
data=data,
|
||||
headers={
|
||||
"Content-Type": is_accepted_format(data),
|
||||
"Content-Length": str(len(data))
|
||||
}
|
||||
)
|
||||
|
||||
attachment_resp = await session.post(
|
||||
"https://yupp.ai/api/trpc/chat.createAttachmentForUploadedFile?batch=1",
|
||||
json={"0": {"json": {"fileName": name, "contentType": is_accepted_format(data),
|
||||
"fileId": upload_info["fileId"]}}},
|
||||
cookies={"__Secure-yupp.session-token": account["token"]}
|
||||
)
|
||||
attachment_resp.raise_for_status()
|
||||
attachment = (await attachment_resp.json())[0]["result"]["data"]["json"]
|
||||
file = {
|
||||
"fileName": attachment["file_name"],
|
||||
"contentType": attachment["content_type"],
|
||||
"attachmentId": attachment["attachment_id"],
|
||||
"chatMessageId": ""
|
||||
}
|
||||
ImagesCache[image_hash] = file
|
||||
files.append(file)
|
||||
return files
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
|
|
@ -254,7 +333,8 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
else:
|
||||
prompt = get_last_user_message(messages, prompt)
|
||||
|
||||
log_debug(f"Use url_uuid: {url_uuid}, Formatted prompt length: {len(prompt)}, Is new conversation: {is_new_conversation}")
|
||||
log_debug(
|
||||
f"Use url_uuid: {url_uuid}, Formatted prompt length: {len(prompt)}, Is new conversation: {is_new_conversation}")
|
||||
|
||||
# Try all accounts with rotation
|
||||
max_attempts = len(YUPP_ACCOUNTS)
|
||||
|
|
@ -264,46 +344,16 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
raise ProviderException("No valid Yupp accounts available")
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with StreamSession() as session:
|
||||
turn_id = str(uuid.uuid4())
|
||||
files = []
|
||||
|
||||
# Handle media attachments
|
||||
media = kwargs.get("media")
|
||||
if media:
|
||||
for file, name in list(merge_media(media, messages)):
|
||||
data = to_bytes(file)
|
||||
presigned_resp = await session.post(
|
||||
"https://yupp.ai/api/trpc/chat.createPresignedURLForUpload?batch=1",
|
||||
json={"0": {"json": {"fileName": name, "fileSize": len(data), "contentType": is_accepted_format(data)}}},
|
||||
headers={"Content-Type": "application/json", "Cookie": f"__Secure-yupp.session-token={account['token']}"}
|
||||
)
|
||||
presigned_resp.raise_for_status()
|
||||
upload_info = (await presigned_resp.json())[0]["result"]["data"]["json"]
|
||||
upload_url = upload_info["signedUrl"]
|
||||
|
||||
await session.put(
|
||||
upload_url,
|
||||
data=data,
|
||||
headers={
|
||||
"Content-Type": is_accepted_format(data),
|
||||
"Content-Length": str(len(data))
|
||||
}
|
||||
)
|
||||
|
||||
attachment_resp = await session.post(
|
||||
"https://yupp.ai/api/trpc/chat.createAttachmentForUploadedFile?batch=1",
|
||||
json={"0": {"json": {"fileName": name, "contentType": is_accepted_format(data), "fileId": upload_info["fileId"]}}},
|
||||
cookies={"__Secure-yupp.session-token": account["token"]}
|
||||
)
|
||||
attachment_resp.raise_for_status()
|
||||
attachment = (await attachment_resp.json())[0]["result"]["data"]["json"]
|
||||
files.append({
|
||||
"fileName": attachment["file_name"],
|
||||
"contentType": attachment["content_type"],
|
||||
"attachmentId": attachment["attachment_id"],
|
||||
"chatMessageId": ""
|
||||
})
|
||||
media_ = list(merge_media(media, messages))
|
||||
files = await cls.prepare_files(media_, session=session, account=account)
|
||||
else:
|
||||
files = []
|
||||
mode = "image" if model in cls.image_models else "text"
|
||||
|
||||
# Build payload and URL - FIXED: Use consistent url_uuid handling
|
||||
|
|
@ -346,20 +396,28 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"content-type": "text/plain;charset=UTF-8",
|
||||
"next-action": next_action,
|
||||
"cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||
}
|
||||
|
||||
log_debug(f"Sending request to: {url}")
|
||||
log_debug(f"Payload structure: {type(payload)}, length: {len(str(payload))}")
|
||||
|
||||
_timeout = kwargs.get("timeout")
|
||||
if isinstance(_timeout, aiohttp.ClientTimeout):
|
||||
timeout = _timeout
|
||||
else:
|
||||
total = float(_timeout) if isinstance(_timeout, (int, float)) else 5 * 60
|
||||
timeout = aiohttp.ClientTimeout(total=total)
|
||||
# Send request
|
||||
async with session.post(url, json=payload, headers=headers, proxy=proxy) as response:
|
||||
async with session.post(url, json=payload, headers=headers, proxy=proxy,
|
||||
timeout=timeout) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Make chat private in background
|
||||
asyncio.create_task(make_chat_private(session, account, url_uuid))
|
||||
|
||||
# Process stream
|
||||
async for chunk in cls._process_stream_response(response.content, account, session, prompt, model):
|
||||
async for chunk in cls._process_stream_response(response.content, account, session, prompt,
|
||||
model):
|
||||
yield chunk
|
||||
|
||||
return
|
||||
|
|
@ -377,6 +435,18 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
else:
|
||||
account["error_count"] += 1
|
||||
continue
|
||||
except aiohttp.ClientResponseError as e:
|
||||
log_debug(f"Account ...{account['token'][-4:]} failed: {str(e)}")
|
||||
# No Available Yupp credits
|
||||
if e.status == 500 and 'Internal Server Error' in e.message:
|
||||
account["is_valid"] = False
|
||||
# Need User-Agent
|
||||
# elif e.status == 429 and 'Too Many Requests' in e.message:
|
||||
# account["is_valid"] = False
|
||||
else:
|
||||
async with account_rotation_lock:
|
||||
account["error_count"] += 1
|
||||
raise ProviderException(f"Yupp request failed: {str(e)}") from e
|
||||
except Exception as e:
|
||||
log_debug(f"Unexpected error with account ...{account['token'][-4:]}: {str(e)}")
|
||||
async with account_rotation_lock:
|
||||
|
|
@ -389,7 +459,7 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
async def _process_stream_response(
|
||||
cls,
|
||||
response_content,
|
||||
account: Dict[str, Any],
|
||||
account: YUPP_ACCOUNT,
|
||||
session: aiohttp.ClientSession,
|
||||
prompt: str,
|
||||
model_id: str
|
||||
|
|
@ -399,54 +469,161 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
line_pattern = re.compile(b"^([0-9a-fA-F]+):(.*)")
|
||||
target_stream_id = None
|
||||
reward_info = None
|
||||
# Stream segmentation buffers
|
||||
is_thinking = False
|
||||
thinking_content = ""
|
||||
thinking_content = "" # model's "thinking" channel (if activated later)
|
||||
normal_content = ""
|
||||
quick_content = "" # quick-response short message
|
||||
variant_text = "" # variant model output (comparison stream)
|
||||
stream = {
|
||||
"target": [],
|
||||
"variant": [],
|
||||
"quick": [],
|
||||
"thinking": [],
|
||||
"extra": []
|
||||
}
|
||||
# Holds leftStream / rightStream definitions to determine target/variant
|
||||
select_stream = [None, None]
|
||||
# State for capturing a multi-line <think> + <yapp> block (fa-style)
|
||||
capturing_ref_id: Optional[str] = None
|
||||
capturing_lines: List[bytes] = []
|
||||
|
||||
# Storage for special referenced blocks like $fa
|
||||
think_blocks: Dict[str, str] = {}
|
||||
image_blocks: Dict[str, str] = {}
|
||||
|
||||
def extract_ref_id(ref):
|
||||
"""Extract ID from reference string, e.g., from '$@123' extract '123'"""
|
||||
return ref[2:] if ref and isinstance(ref, str) and ref.startswith("$@") else None
|
||||
|
||||
def extract_ref_name(ref: str) -> Optional[str]:
|
||||
"""Extract simple ref name from '$fa' → 'fa'"""
|
||||
if not isinstance(ref, str):
|
||||
return None
|
||||
if ref.startswith("$@"):
|
||||
return ref[2:]
|
||||
if ref.startswith("$") and len(ref) > 1:
|
||||
return ref[1:]
|
||||
return None
|
||||
|
||||
def is_valid_content(content: str) -> bool:
|
||||
"""Check if content is valid"""
|
||||
if not content or content in [None, "", "$undefined"]:
|
||||
return False
|
||||
return True
|
||||
|
||||
async def process_content_chunk(content: str, chunk_id: str, line_count: int):
|
||||
"""Process single content chunk"""
|
||||
nonlocal is_thinking, thinking_content, normal_content, session
|
||||
async def process_content_chunk(content: str, chunk_id: str, line_count: int, *, for_target: bool = False):
|
||||
"""
|
||||
Process a single content chunk from a stream.
|
||||
|
||||
- If for_target=True → chunk belongs to the target model output.
|
||||
"""
|
||||
nonlocal is_thinking, thinking_content, normal_content, variant_text, session
|
||||
|
||||
if not is_valid_content(content):
|
||||
return
|
||||
|
||||
# Handle image-gen chunks
|
||||
if '<yapp class="image-gen">' in content:
|
||||
content = content.split('<yapp class="image-gen">').pop().split('</yapp>')[0]
|
||||
img_block = content.split('<yapp class="image-gen">').pop().split('</yapp>')[0]
|
||||
url = "https://yupp.ai/api/trpc/chat.getSignedImage"
|
||||
async with session.get(url, params={"batch": "1", "input": json.dumps({"0": {"json": {"imageId": json.loads(content).get("image_id")}}})}) as resp:
|
||||
async with session.get(
|
||||
url,
|
||||
params={
|
||||
"batch": "1",
|
||||
"input": json.dumps(
|
||||
{"0": {"json": {"imageId": json.loads(img_block).get("image_id")}}}
|
||||
)
|
||||
}
|
||||
) as resp:
|
||||
resp.raise_for_status()
|
||||
data = await resp.json()
|
||||
yield ImageResponse(data[0]["result"]["data"]["json"]["signed_url"], prompt)
|
||||
img = ImageResponse(
|
||||
data[0]["result"]["data"]["json"]["signed_url"],
|
||||
prompt
|
||||
)
|
||||
yield img
|
||||
return
|
||||
|
||||
# log_debug(f"Processing chunk #{line_count} with content: '{content[:50]}...'")
|
||||
|
||||
# Optional: thinking-mode support (disabled by default)
|
||||
if is_thinking:
|
||||
yield Reasoning(content)
|
||||
else:
|
||||
if for_target:
|
||||
normal_content += content
|
||||
yield content
|
||||
|
||||
def finalize_capture_block(ref_id: str, lines: List[bytes]):
|
||||
"""Parse captured <think> + <yapp> block for a given ref ID."""
|
||||
text = b"".join(lines).decode("utf-8", errors="ignore")
|
||||
|
||||
# Extract <think>...</think>
|
||||
think_start = text.find("<think>")
|
||||
think_end = text.find("</think>")
|
||||
if think_start != -1 and think_end != -1 and think_end > think_start:
|
||||
inner = text[think_start + len("<think>"):think_end].strip()
|
||||
if inner:
|
||||
think_blocks[ref_id] = inner
|
||||
|
||||
# Extract <yapp class="image-gen">...</yapp>
|
||||
yapp_start = text.find('<yapp class="image-gen">')
|
||||
if yapp_start != -1:
|
||||
yapp_end = text.find("</yapp>", yapp_start)
|
||||
if yapp_end != -1:
|
||||
yapp_block = text[yapp_start:yapp_end + len("</yapp>")]
|
||||
image_blocks[ref_id] = yapp_block
|
||||
|
||||
try:
|
||||
line_count = 0
|
||||
quick_response_id = None
|
||||
variant_stream_id = None
|
||||
is_started: bool = False
|
||||
variant_image: Optional[ImageResponse] = None
|
||||
variant_text = ""
|
||||
|
||||
# "a" use as default then extract from "1"
|
||||
reward_id = "a"
|
||||
routing_id = "e"
|
||||
turn_id = None
|
||||
persisted_turn_id = None
|
||||
left_message_id = None
|
||||
right_message_id = None
|
||||
nudge_new_chat_id = None
|
||||
nudge_new_chat = False
|
||||
async for line in response_content:
|
||||
line_count += 1
|
||||
# If we are currently capturing a think/image block for some ref ID
|
||||
if capturing_ref_id is not None:
|
||||
capturing_lines.append(line)
|
||||
|
||||
# Check if this line closes the <yapp> block; after that, block is complete
|
||||
if b"</yapp>" in line: # or b':{"curr"' in line:
|
||||
# We may have trailing "2:{...}" after </yapp> on the same line
|
||||
# Get id using re
|
||||
idx = line.find(b"</yapp>")
|
||||
suffix = line[idx + len(b"</yapp>"):]
|
||||
|
||||
# Finalize captured block for this ref ID
|
||||
finalize_capture_block(capturing_ref_id, capturing_lines)
|
||||
capturing_ref_id = None
|
||||
capturing_lines = []
|
||||
|
||||
# If there is trailing content (e.g. '2:{"curr":"$fa"...}')
|
||||
if suffix.strip():
|
||||
# Process suffix as a new "line" in the same iteration
|
||||
line = suffix
|
||||
else:
|
||||
# Nothing more on this line
|
||||
continue
|
||||
else:
|
||||
# Still inside captured block; skip normal processing
|
||||
continue
|
||||
|
||||
# Detect start of a <think> block assigned to a ref like 'fa:...<think>'
|
||||
if b"<think>" in line:
|
||||
m = line_pattern.match(line)
|
||||
if m:
|
||||
capturing_ref_id = m.group(1).decode()
|
||||
capturing_lines = [line]
|
||||
# Skip normal parsing; the rest of the block will be captured until </yapp>
|
||||
continue
|
||||
|
||||
match = line_pattern.match(line)
|
||||
if not match:
|
||||
|
|
@ -455,13 +632,16 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
chunk_id, chunk_data = match.groups()
|
||||
chunk_id = chunk_id.decode()
|
||||
|
||||
if nudge_new_chat_id and chunk_id == nudge_new_chat_id:
|
||||
nudge_new_chat = chunk_data.decode()
|
||||
continue
|
||||
|
||||
try:
|
||||
data = json.loads(chunk_data) if chunk_data != b"{}" else {}
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
# Process reward info
|
||||
if chunk_id == "a":
|
||||
if chunk_id == reward_id and isinstance(data, dict) and "unclaimedRewardInfo" in data:
|
||||
reward_info = data
|
||||
log_debug(f"Found reward info")
|
||||
|
||||
|
|
@ -471,14 +651,30 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if isinstance(data, dict):
|
||||
left_stream = data.get("leftStream", {})
|
||||
right_stream = data.get("rightStream", {})
|
||||
quick_response_id = extract_ref_id(data.get("quickResponse", {}).get("stream", {}).get("next"))
|
||||
select_stream = [left_stream, right_stream]
|
||||
if data.get("quickResponse", {}) != "$undefined":
|
||||
quick_response_id = extract_ref_id(
|
||||
data.get("quickResponse", {}).get("stream", {}).get("next"))
|
||||
|
||||
elif chunk_id == "e":
|
||||
if data.get("turnId", {}) != "$undefined":
|
||||
turn_id = extract_ref_id(data.get("turnId", {}).get("next"))
|
||||
if data.get("persistedTurn", {}) != "$undefined":
|
||||
persisted_turn_id = extract_ref_id(data.get("persistedTurn", {}).get("next"))
|
||||
if data.get("leftMessageId", {}) != "$undefined":
|
||||
left_message_id = extract_ref_id(data.get("leftMessageId", {}).get("next"))
|
||||
if data.get("rightMessageId", {}) != "$undefined":
|
||||
right_message_id = extract_ref_id(data.get("rightMessageId", {}).get("next"))
|
||||
|
||||
reward_id = extract_ref_id(data.get("pendingRewardActionResult", "")) or reward_id
|
||||
routing_id = extract_ref_id(data.get("routingResultPromise", "")) or routing_id
|
||||
nudge_new_chat_id = extract_ref_id(data.get("nudgeNewChatPromise", "")) or nudge_new_chat_id
|
||||
select_stream = [left_stream, right_stream]
|
||||
# Routing / model selection block
|
||||
elif chunk_id == routing_id:
|
||||
yield PlainTextResponse(line.decode(errors="ignore"))
|
||||
if isinstance(data, dict):
|
||||
provider_info = cls.get_dict()
|
||||
provider_info['model'] = model_id
|
||||
# Determine target & variant stream IDs
|
||||
for i, selection in enumerate(data.get("modelSelections", [])):
|
||||
if selection.get("selectionSource") == "USER_SELECTED":
|
||||
target_stream_id = extract_ref_id(select_stream[i].get("next"))
|
||||
|
|
@ -499,43 +695,105 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
target_stream_id = extract_ref_id(data.get("next"))
|
||||
content = data.get("curr", "")
|
||||
if content:
|
||||
async for chunk in process_content_chunk(content, chunk_id, line_count):
|
||||
# Handle special "$fa" / "$<id>" reference
|
||||
ref_name = extract_ref_name(content)
|
||||
if ref_name and (ref_name in think_blocks or ref_name in image_blocks):
|
||||
# Thinking block
|
||||
if ref_name in think_blocks:
|
||||
t_text = think_blocks[ref_name]
|
||||
if t_text:
|
||||
reasoning = Reasoning(t_text)
|
||||
# thinking_content += t_text
|
||||
stream["thinking"].append(reasoning)
|
||||
# yield reasoning
|
||||
|
||||
# Image-gen block
|
||||
if ref_name in image_blocks:
|
||||
img_block_text = image_blocks[ref_name]
|
||||
async for chunk in process_content_chunk(
|
||||
img_block_text,
|
||||
ref_name,
|
||||
line_count,
|
||||
for_target=True
|
||||
):
|
||||
stream["target"].append(chunk)
|
||||
is_started = True
|
||||
yield chunk
|
||||
|
||||
else:
|
||||
# Normal textual chunk
|
||||
async for chunk in process_content_chunk(
|
||||
content,
|
||||
chunk_id,
|
||||
line_count,
|
||||
for_target=True
|
||||
):
|
||||
stream["target"].append(chunk)
|
||||
is_started = True
|
||||
yield chunk
|
||||
# Variant stream (comparison)
|
||||
elif variant_stream_id and chunk_id == variant_stream_id:
|
||||
yield PlainTextResponse("[Variant] " + line.decode(errors="ignore"))
|
||||
if isinstance(data, dict):
|
||||
variant_stream_id = extract_ref_id(data.get("next"))
|
||||
content = data.get("curr", "")
|
||||
if content:
|
||||
async for chunk in process_content_chunk(content, chunk_id, line_count):
|
||||
async for chunk in process_content_chunk(
|
||||
content,
|
||||
chunk_id,
|
||||
line_count,
|
||||
for_target=False
|
||||
):
|
||||
stream["variant"].append(chunk)
|
||||
if isinstance(chunk, ImageResponse):
|
||||
yield PreviewResponse(str(chunk))
|
||||
else:
|
||||
variant_text += str(chunk)
|
||||
if not is_started:
|
||||
yield PreviewResponse(variant_text)
|
||||
|
||||
# Quick response (short preview)
|
||||
elif quick_response_id and chunk_id == quick_response_id:
|
||||
yield PlainTextResponse("[Quick] " + line.decode(errors="ignore"))
|
||||
if isinstance(data, dict):
|
||||
content = data.get("curr", "")
|
||||
if content:
|
||||
async for chunk in process_content_chunk(
|
||||
content,
|
||||
chunk_id,
|
||||
line_count,
|
||||
for_target=False
|
||||
):
|
||||
stream["quick"].append(chunk)
|
||||
quick_content += content
|
||||
yield PreviewResponse(content)
|
||||
|
||||
elif chunk_id in [turn_id, persisted_turn_id]:
|
||||
...
|
||||
elif chunk_id == right_message_id:
|
||||
...
|
||||
elif chunk_id == left_message_id:
|
||||
...
|
||||
# Miscellaneous extra content
|
||||
elif isinstance(data, dict) and "curr" in data:
|
||||
content = data.get("curr", "")
|
||||
if content:
|
||||
async for chunk in process_content_chunk(
|
||||
content,
|
||||
chunk_id,
|
||||
line_count,
|
||||
for_target=False
|
||||
):
|
||||
stream["extra"].append(chunk)
|
||||
if isinstance(chunk, str) and "<streaming stopped unexpectedly" in chunk:
|
||||
yield FinishReason(chunk)
|
||||
|
||||
yield PlainTextResponse("[Extra] " + line.decode(errors="ignore"))
|
||||
|
||||
if variant_image is not None:
|
||||
yield variant_image
|
||||
elif variant_text:
|
||||
yield PreviewResponse(variant_text)
|
||||
|
||||
yield JsonResponse(**stream)
|
||||
log_debug(f"Finished processing {line_count} lines")
|
||||
|
||||
except:
|
||||
raise
|
||||
|
||||
|
|
|
|||
|
|
@ -48,6 +48,8 @@ from .Copilot import Copilot
|
|||
from .DeepInfra import DeepInfra
|
||||
from .EasyChat import EasyChat
|
||||
from .GLM import GLM
|
||||
from .GradientNetwork import GradientNetwork
|
||||
from .ItalyGPT import ItalyGPT
|
||||
from .LambdaChat import LambdaChat
|
||||
from .Mintlify import Mintlify
|
||||
from .OIVSCodeSer import OIVSCodeSer2, OIVSCodeSer0501
|
||||
|
|
|
|||
112
g4f/Provider/hf_space/BAAI_Ling.py
Normal file
112
g4f/Provider/hf_space/BAAI_Ling.py
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import aiohttp
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...providers.response import JsonConversation
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import format_prompt, get_last_user_message, get_system_prompt
|
||||
from ... import debug
|
||||
|
||||
class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Ling & Ring Playground"
|
||||
url = "https://cafe3310-ling-playground.hf.space"
|
||||
api_endpoint = f"{url}/gradio_api/queue/join"
|
||||
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = False
|
||||
|
||||
default_model = "ling-1t"
|
||||
model_aliases = {
|
||||
"ling": default_model,
|
||||
}
|
||||
models = ['ling-mini-2.0', 'ling-1t', 'ling-flash-2.0', 'ring-1t', 'ring-flash-2.0', 'ring-mini-2.0']
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
conversation: JsonConversation = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
is_new_conversation = conversation is None or not hasattr(conversation, 'session_hash')
|
||||
if is_new_conversation:
|
||||
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', '')[:12])
|
||||
|
||||
model = cls.get_model(model)
|
||||
prompt = format_prompt(messages) if is_new_conversation else get_last_user_message(messages)
|
||||
|
||||
headers = {
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'content-type': 'application/json',
|
||||
'origin': cls.url,
|
||||
'referer': f'{cls.url}/',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
|
||||
}
|
||||
|
||||
payload = {
|
||||
"data": [
|
||||
prompt,
|
||||
[
|
||||
[
|
||||
None,
|
||||
"Hello! I'm Ling. Try selecting a scenario and a message example below to get started."
|
||||
]
|
||||
],
|
||||
get_system_prompt(messages),
|
||||
1,
|
||||
model
|
||||
],
|
||||
"event_data": None,
|
||||
"fn_index": 11,
|
||||
"trigger_id": 14,
|
||||
"session_hash": conversation.session_hash
|
||||
}
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
# Response body must be consumed for the request to complete
|
||||
await response.json()
|
||||
|
||||
data_url = f'{cls.url}/gradio_api/queue/data?session_hash={conversation.session_hash}'
|
||||
headers_data = {
|
||||
'accept': 'text/event-stream',
|
||||
'referer': f'{cls.url}/',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
|
||||
}
|
||||
|
||||
async with session.get(data_url, headers=headers_data, proxy=proxy) as response:
|
||||
full_response = ""
|
||||
async for line in response.content:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
try:
|
||||
json_data = json.loads(decoded_line[6:])
|
||||
if json_data.get('msg') == 'process_generating':
|
||||
if 'output' in json_data and 'data' in json_data['output']:
|
||||
output_data = json_data['output']['data']
|
||||
if output_data and len(output_data) > 0:
|
||||
parts = output_data[0][0]
|
||||
if len(parts) == 2:
|
||||
new_text = output_data[0][1].pop()
|
||||
full_response += new_text
|
||||
yield new_text
|
||||
if len(parts) > 2:
|
||||
new_text = parts[2]
|
||||
full_response += new_text
|
||||
yield new_text
|
||||
|
||||
elif json_data.get('msg') == 'process_completed':
|
||||
break
|
||||
|
||||
except json.JSONDecodeError:
|
||||
debug.log("Could not parse JSON:", decoded_line)
|
||||
|
|
@ -6,6 +6,7 @@ from ...typing import AsyncResult, Messages, MediaListType
|
|||
from ...errors import ResponseError
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
from .BAAI_Ling import BAAI_Ling
|
||||
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
||||
from .BlackForestLabs_Flux1KontextDev import BlackForestLabs_Flux1KontextDev
|
||||
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
||||
|
|
@ -27,6 +28,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
default_image_model = BlackForestLabs_Flux1Dev.default_model
|
||||
default_vision_model = Microsoft_Phi_4_Multimodal.default_model
|
||||
providers = [
|
||||
BAAI_Ling,
|
||||
BlackForestLabs_Flux1Dev,
|
||||
BlackForestLabs_Flux1KontextDev,
|
||||
CohereForAI_C4AI_Command,
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ models = {
|
|||
"gemini-2.0-flash-thinking": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"9c17b1863f581b8a"]'},
|
||||
"gemini-2.0-flash-thinking-with-apps": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f8f8f5ea629f5d37"]'},
|
||||
# Currently used models
|
||||
"gemini-3-pro": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"9d8ca3786ebdfbea",null,null,0,[4]]'},
|
||||
"gemini-2.5-pro": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"61530e79959ab139",null,null,null,[4]]'},
|
||||
"gemini-2.5-flash": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"9ec249fc9ad08861",null,null,null,[4]]'},
|
||||
"gemini-audio": {}
|
||||
|
|
@ -89,7 +90,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
default_vision_model = default_model
|
||||
image_models = [default_image_model]
|
||||
models = [
|
||||
default_model, "gemini-2.5-flash", "gemini-2.5-pro"
|
||||
default_model, "gemini-3-pro", "gemini-2.5-flash", "gemini-2.5-pro"
|
||||
]
|
||||
|
||||
synthesize_content_type = "audio/vnd.wav"
|
||||
|
|
|
|||
|
|
@ -500,6 +500,7 @@ class GeminiCLI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
models = [
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-3-pro-preview"
|
||||
]
|
||||
|
||||
working = True
|
||||
|
|
|
|||
|
|
@ -92,12 +92,26 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
|||
while True:
|
||||
if has_headers:
|
||||
break
|
||||
textarea = await page.select("textarea", 180)
|
||||
await textarea.send_keys("Hello")
|
||||
await asyncio.sleep(1)
|
||||
button = await page.select("button[type='submit']")
|
||||
if button:
|
||||
await button.click()
|
||||
input_element = None
|
||||
try:
|
||||
input_element = await page.select("div.ProseMirror", 2)
|
||||
except Exception:
|
||||
pass
|
||||
if not input_element:
|
||||
try:
|
||||
input_element = await page.select("textarea", 180)
|
||||
except Exception:
|
||||
pass
|
||||
if input_element:
|
||||
try:
|
||||
await input_element.click()
|
||||
await input_element.send_keys("Hello")
|
||||
await asyncio.sleep(0.5)
|
||||
submit_btn = await page.select("button[type='submit']", 2)
|
||||
if submit_btn:
|
||||
await submit_btn.click()
|
||||
except Exception:
|
||||
pass
|
||||
await asyncio.sleep(1)
|
||||
auth_result.cookies = {}
|
||||
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
|
||||
|
|
|
|||
|
|
@ -642,7 +642,7 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
|
||||
if not cls._models_loaded:
|
||||
cls.get_models()
|
||||
is_image_model = model in image_models
|
||||
is_image_model = model in cls.image_models
|
||||
if not model:
|
||||
model = cls.default_model
|
||||
if model in cls.model_aliases:
|
||||
|
|
|
|||
|
|
@ -1,18 +1,20 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import asyncio
|
||||
import uuid
|
||||
import json
|
||||
import base64
|
||||
import time
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
from typing import AsyncIterator, Iterator, Optional, Generator, Dict, Union, List, Any
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
from copy import copy
|
||||
from typing import AsyncIterator, Iterator, Optional, Generator, Dict, Union, List, Any
|
||||
|
||||
try:
|
||||
import nodriver
|
||||
|
||||
has_nodriver = True
|
||||
except ImportError:
|
||||
has_nodriver = False
|
||||
|
|
@ -22,15 +24,17 @@ from ...typing import AsyncResult, Messages, Cookies, MediaListType
|
|||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...requests import StreamSession
|
||||
from ...requests import get_nodriver_session
|
||||
from ...image import ImageRequest, to_image, to_bytes, is_accepted_format, detect_file_type
|
||||
from ...image import ImageRequest, to_image, to_bytes, detect_file_type
|
||||
from ...errors import MissingAuthError, NoValidHarFileError, ModelNotFoundError
|
||||
from ...providers.response import JsonConversation, FinishReason, SynthesizeData, AuthResult, ImageResponse, ImagePreview, ResponseType, JsonRequest, format_link
|
||||
from ...providers.response import JsonConversation, FinishReason, SynthesizeData, AuthResult, ImageResponse, \
|
||||
ImagePreview, ResponseType, JsonRequest, format_link
|
||||
from ...providers.response import TitleGeneration, RequestLogin, Reasoning
|
||||
from ...tools.media import merge_media
|
||||
from ..helper import format_cookies, format_media_prompt, to_string
|
||||
from ..openai.models import default_model, default_image_model, models, image_models, text_models, model_aliases
|
||||
from ..openai.har_file import get_request_config
|
||||
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, prepare_url, backend_anon_url
|
||||
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, prepare_url, \
|
||||
backend_anon_url
|
||||
from ..openai.proofofwork import generate_proof_token
|
||||
from ..openai.new import get_requirements_token, get_config
|
||||
from ... import debug
|
||||
|
|
@ -87,6 +91,9 @@ UPLOAD_HEADERS = {
|
|||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
ImagesCache: Dict[str, dict] = {}
|
||||
|
||||
|
||||
class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
"""A class for creating and managing conversations with OpenAI chat service"""
|
||||
|
||||
|
|
@ -95,6 +102,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
working = True
|
||||
active_by_default = True
|
||||
use_nodriver = True
|
||||
image_cache = True
|
||||
supports_gpt_4 = True
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
|
|
@ -131,7 +139,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
session: StreamSession,
|
||||
auth_result: AuthResult,
|
||||
media: MediaListType,
|
||||
) -> list[ImageRequest]:
|
||||
) -> List[ImageRequest]:
|
||||
"""
|
||||
Upload an image to the service and get the download URL
|
||||
|
||||
|
|
@ -143,11 +151,20 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
Returns:
|
||||
An ImageRequest object that contains the download URL, file name, and other data
|
||||
"""
|
||||
async def upload_file(file, image_name=None):
|
||||
|
||||
async def upload_file(file, image_name=None) -> ImageRequest:
|
||||
debug.log(f"Uploading file: {image_name}")
|
||||
file_data = {}
|
||||
|
||||
data_bytes = to_bytes(file)
|
||||
# Check Cache
|
||||
hasher = hashlib.md5()
|
||||
hasher.update(data_bytes)
|
||||
image_hash = hasher.hexdigest()
|
||||
cache_file = ImagesCache.get(image_hash)
|
||||
if cls.image_cache and cache_file:
|
||||
debug.log("Using cached image")
|
||||
return ImageRequest(cache_file)
|
||||
extension, mime_type = detect_file_type(data_bytes)
|
||||
if "image" in mime_type:
|
||||
# Convert the image to a PIL Image object
|
||||
|
|
@ -202,9 +219,10 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
await raise_for_status(response, "Get download url failed")
|
||||
uploaded_data = await response.json()
|
||||
file_data["download_url"] = uploaded_data["download_url"]
|
||||
ImagesCache[image_hash] = file_data.copy()
|
||||
return ImageRequest(file_data)
|
||||
|
||||
medias = []
|
||||
medias: List["ImageRequest"] = []
|
||||
for item in media:
|
||||
item = item if isinstance(item, tuple) else (item,)
|
||||
__uploaded_media = await upload_file(*item)
|
||||
|
|
@ -242,7 +260,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
"id": str(uuid.uuid4()),
|
||||
"author": {"role": message["role"]},
|
||||
"content": {"content_type": "text", "parts": [to_string(message["content"])]},
|
||||
"metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, **({"system_hints": system_hints} if system_hints else {})},
|
||||
"metadata": {"serialization_metadata": {"custom_symbol_offsets": []},
|
||||
**({"system_hints": system_hints} if system_hints else {})},
|
||||
"create_time": time.time(),
|
||||
} for message in messages]
|
||||
# Check if there is an image response
|
||||
|
|
@ -283,7 +302,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
return messages
|
||||
|
||||
@classmethod
|
||||
async def get_generated_image(cls, session: StreamSession, auth_result: AuthResult, element: Union[dict, str], prompt: str = None, conversation_id: str = None) -> ImagePreview|ImageResponse|None:
|
||||
async def get_generated_image(cls, session: StreamSession, auth_result: AuthResult, element: Union[dict, str],
|
||||
prompt: str = None, conversation_id: str = None,
|
||||
status: Optional[str] = None) -> ImagePreview | ImageResponse | None:
|
||||
download_urls = []
|
||||
is_sediment = False
|
||||
if prompt is None:
|
||||
|
|
@ -303,7 +324,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
if is_sediment:
|
||||
url = f"{cls.url}/backend-api/conversation/{conversation_id}/attachment/{element}/download"
|
||||
else:
|
||||
url =f"{cls.url}/backend-api/files/{element}/download"
|
||||
url = f"{cls.url}/backend-api/files/{element}/download"
|
||||
try:
|
||||
async with session.get(url, headers=auth_result.headers) as response:
|
||||
cls._update_request_args(auth_result, session)
|
||||
|
|
@ -319,7 +340,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
debug.error("OpenaiChat: Download image failed")
|
||||
debug.error(e)
|
||||
if download_urls:
|
||||
return ImagePreview(download_urls, prompt, {"headers": auth_result.headers}) if is_sediment else ImageResponse(download_urls, prompt, {"headers": auth_result.headers})
|
||||
# status = None, finished_successfully
|
||||
if is_sediment and status is None:
|
||||
return ImagePreview(download_urls, prompt, {"status": status, "headers": auth_result.headers})
|
||||
else:
|
||||
return ImageResponse(download_urls, prompt, {"status": status, "headers": auth_result.headers})
|
||||
|
||||
@classmethod
|
||||
async def create_authed(
|
||||
|
|
@ -446,7 +471,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
f"{cls.url}/backend-anon/sentinel/chat-requirements"
|
||||
if cls._api_key is None else
|
||||
f"{cls.url}/backend-api/sentinel/chat-requirements",
|
||||
json={"p": None if not getattr(auth_result, "proof_token", None) else get_requirements_token(getattr(auth_result, "proof_token", None))},
|
||||
json={"p": None if not getattr(auth_result, "proof_token", None) else get_requirements_token(
|
||||
getattr(auth_result, "proof_token", None))},
|
||||
headers=cls._headers
|
||||
) as response:
|
||||
if response.status in (401, 403):
|
||||
|
|
@ -476,23 +502,25 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
proof_token=proof_token
|
||||
)
|
||||
# [debug.log(text) for text in (
|
||||
#f"Arkose: {'False' if not need_arkose else auth_result.arkose_token[:12]+'...'}",
|
||||
#f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
|
||||
#f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
|
||||
# f"Arkose: {'False' if not need_arkose else auth_result.arkose_token[:12]+'...'}",
|
||||
# f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
|
||||
# f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
|
||||
# )]
|
||||
data = {
|
||||
"action": "next",
|
||||
"parent_message_id": conversation.message_id,
|
||||
"model": model,
|
||||
"timezone_offset_min":-120,
|
||||
"timezone":"Europe/Berlin",
|
||||
"conversation_mode":{"kind":"primary_assistant"},
|
||||
"enable_message_followups":True,
|
||||
"timezone_offset_min": -120,
|
||||
"timezone": "Europe/Berlin",
|
||||
"conversation_mode": {"kind": "primary_assistant"},
|
||||
"enable_message_followups": True,
|
||||
"system_hints": ["search"] if web_search else None,
|
||||
"supports_buffering":True,
|
||||
"supported_encodings":["v1"],
|
||||
"client_contextual_info":{"is_dark_mode":False,"time_since_loaded":random.randint(20, 500),"page_height":578,"page_width":1850,"pixel_ratio":1,"screen_height":1080,"screen_width":1920},
|
||||
"paragen_cot_summary_display_override":"allow"
|
||||
"supports_buffering": True,
|
||||
"supported_encodings": ["v1"],
|
||||
"client_contextual_info": {"is_dark_mode": False, "time_since_loaded": random.randint(20, 500),
|
||||
"page_height": 578, "page_width": 1850, "pixel_ratio": 1,
|
||||
"screen_height": 1080, "screen_width": 1920},
|
||||
"paragen_cot_summary_display_override": "allow"
|
||||
}
|
||||
if temporary:
|
||||
data["history_and_training_disabled"] = True
|
||||
|
|
@ -512,7 +540,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
new_messages = []
|
||||
else:
|
||||
new_messages.append(message)
|
||||
data["messages"] = cls.create_messages(new_messages, image_requests, ["search"] if web_search else None)
|
||||
data["messages"] = cls.create_messages(new_messages, image_requests,
|
||||
["search"] if web_search else None)
|
||||
yield JsonRequest.from_dict(data)
|
||||
headers = {
|
||||
**cls._headers,
|
||||
|
|
@ -521,7 +550,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
"openai-sentinel-chat-requirements-token": chat_token,
|
||||
**({} if conduit_token is None else {"x-conduit-token": conduit_token})
|
||||
}
|
||||
#if cls.request_config.arkose_token:
|
||||
# if cls.request_config.arkose_token:
|
||||
# headers["openai-sentinel-arkose-token"] = cls.request_config.arkose_token
|
||||
if proofofwork is not None:
|
||||
headers["openai-sentinel-proof-token"] = proofofwork
|
||||
|
|
@ -548,10 +577,12 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
if match.group(0) in matches:
|
||||
continue
|
||||
matches.append(match.group(0))
|
||||
generated_image = await cls.get_generated_image(session, auth_result, match.group(0), prompt)
|
||||
generated_image = await cls.get_generated_image(session, auth_result, match.group(0),
|
||||
prompt)
|
||||
if generated_image is not None:
|
||||
yield generated_image
|
||||
async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources, references):
|
||||
async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources,
|
||||
references):
|
||||
if isinstance(chunk, str):
|
||||
chunk = chunk.replace("\ue203", "").replace("\ue204", "").replace("\ue206", "")
|
||||
buffer += chunk
|
||||
|
|
@ -582,9 +613,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
return f"})"
|
||||
|
||||
if is_video_embedding:
|
||||
if reference.get("url", "") and reference.get("thumbnail_url", ""):
|
||||
if reference.get("url", "") and reference.get("thumbnail_url",
|
||||
""):
|
||||
return f"[]({reference['url']})"
|
||||
video_match = re.match(r"video\n(.*?)\nturn[0-9]+", match.group(0))
|
||||
video_match = re.match(r"video\n(.*?)\nturn[0-9]+",
|
||||
match.group(0))
|
||||
if video_match:
|
||||
return video_match.group(1)
|
||||
return ""
|
||||
|
|
@ -595,7 +628,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
})
|
||||
if source_index is not None and len(sources.list) > source_index:
|
||||
link = sources.list[source_index]["url"]
|
||||
return f"[[{source_index+1}]]({link})"
|
||||
return f"[[{source_index + 1}]]({link})"
|
||||
return f""
|
||||
|
||||
def products_replacer(match: re.Match[str]):
|
||||
|
|
@ -612,7 +645,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
return ""
|
||||
|
||||
sequence_content = match.group(1)
|
||||
sequence_content = sequence_content.replace("\ue200", "").replace("\ue202", "\n").replace("\ue201", "")
|
||||
sequence_content = sequence_content.replace("\ue200", "").replace("\ue202",
|
||||
"\n").replace(
|
||||
"\ue201", "")
|
||||
sequence_content = sequence_content.replace("navlist\n", "#### ")
|
||||
|
||||
# Handle search, news, view and image citations
|
||||
|
|
@ -623,12 +658,15 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
citation_replacer,
|
||||
sequence_content
|
||||
)
|
||||
sequence_content = re.sub(r'products\n(.*)', products_replacer, sequence_content)
|
||||
sequence_content = re.sub(r'product_entity\n\[".*","(.*)"\]', lambda x: x.group(1), sequence_content)
|
||||
sequence_content = re.sub(r'products\n(.*)', products_replacer,
|
||||
sequence_content)
|
||||
sequence_content = re.sub(r'product_entity\n\[".*","(.*)"\]',
|
||||
lambda x: x.group(1), sequence_content)
|
||||
return sequence_content
|
||||
|
||||
# process only completed sequences and do not touch start of next not completed sequence
|
||||
buffer = re.sub(r'\ue200(.*?)\ue201', sequence_replacer, buffer, flags=re.DOTALL)
|
||||
buffer = re.sub(r'\ue200(.*?)\ue201', sequence_replacer, buffer,
|
||||
flags=re.DOTALL)
|
||||
|
||||
if buffer.find(u"\ue200") != -1: # still have uncompleted sequence
|
||||
continue
|
||||
|
|
@ -647,7 +685,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
if sources.list:
|
||||
yield sources
|
||||
if conversation.generated_images:
|
||||
yield ImageResponse(conversation.generated_images.urls, conversation.prompt, {"headers": auth_result.headers})
|
||||
yield ImageResponse(conversation.generated_images.urls, conversation.prompt,
|
||||
{"headers": auth_result.headers})
|
||||
conversation.generated_images = None
|
||||
conversation.prompt = None
|
||||
if return_conversation:
|
||||
|
|
@ -667,7 +706,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
yield FinishReason(conversation.finish_reason)
|
||||
|
||||
@classmethod
|
||||
async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes, fields: Conversation, sources: OpenAISources, references: ContentReferences) -> AsyncIterator:
|
||||
async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes,
|
||||
fields: Conversation, sources: OpenAISources,
|
||||
references: ContentReferences) -> AsyncIterator:
|
||||
if not line.startswith(b"data: "):
|
||||
return
|
||||
elif line.startswith(b"data: [DONE]"):
|
||||
|
|
@ -706,7 +747,12 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
elif m.get("p") == "/message/metadata/image_gen_title":
|
||||
fields.prompt = m.get("v")
|
||||
elif m.get("p") == "/message/content/parts/0/asset_pointer":
|
||||
generated_images = fields.generated_images = await cls.get_generated_image(session, auth_result, m.get("v"), fields.prompt, fields.conversation_id)
|
||||
status = next(filter(lambda x: x.get("p") == '/message/status', v), {}).get('v', None)
|
||||
generated_images = fields.generated_images = await cls.get_generated_image(session, auth_result,
|
||||
m.get("v"),
|
||||
fields.prompt,
|
||||
fields.conversation_id,
|
||||
status)
|
||||
if generated_images is not None:
|
||||
if buffer:
|
||||
yield buffer
|
||||
|
|
@ -735,34 +781,41 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
if match and m.get("o") == "append" and isinstance(m.get("v"), dict):
|
||||
idx = int(match.group(1))
|
||||
references.merge_reference(idx, m.get("v"))
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/fallback_items$", m.get("p")) and isinstance(m.get("v"), list):
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/fallback_items$",
|
||||
m.get("p")) and isinstance(m.get("v"), list):
|
||||
for link in m.get("v", []) or []:
|
||||
sources.add_source(link)
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/items$", m.get("p")) and isinstance(m.get("v"), list):
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/items$",
|
||||
m.get("p")) and isinstance(m.get("v"), list):
|
||||
for link in m.get("v", []) or []:
|
||||
sources.add_source(link)
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs$", m.get("p")) and isinstance(m.get("v"), list):
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs$",
|
||||
m.get("p")) and isinstance(m.get("v"), list):
|
||||
match = re.match(r"^/message/metadata/content_references/(\d+)/refs$", m.get("p"))
|
||||
if match:
|
||||
idx = int(match.group(1))
|
||||
references.update_reference(idx, m.get("o"), "refs", m.get("v"))
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/alt$", m.get("p")) and isinstance(m.get("v"), list):
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/alt$",
|
||||
m.get("p")) and isinstance(m.get("v"), list):
|
||||
match = re.match(r"^/message/metadata/content_references/(\d+)/alt$", m.get("p"))
|
||||
if match:
|
||||
idx = int(match.group(1))
|
||||
references.update_reference(idx, m.get("o"), "alt", m.get("v"))
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/prompt_text$", m.get("p")) and isinstance(m.get("v"), list):
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/prompt_text$",
|
||||
m.get("p")) and isinstance(m.get("v"), list):
|
||||
match = re.match(r"^/message/metadata/content_references/(\d+)/prompt_text$", m.get("p"))
|
||||
if match:
|
||||
idx = int(match.group(1))
|
||||
references.update_reference(idx, m.get("o"), "prompt_text", m.get("v"))
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs/\d+$", m.get("p")) and isinstance(m.get("v"), dict):
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs/\d+$",
|
||||
m.get("p")) and isinstance(m.get("v"), dict):
|
||||
match = re.match(r"^/message/metadata/content_references/(\d+)/refs/(\d+)$", m.get("p"))
|
||||
if match:
|
||||
reference_idx = int(match.group(1))
|
||||
ref_idx = int(match.group(2))
|
||||
references.update_reference(reference_idx, m.get("o"), "refs", m.get("v"), ref_idx)
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/images$", m.get("p")) and isinstance(m.get("v"), list):
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/images$",
|
||||
m.get("p")) and isinstance(m.get("v"), list):
|
||||
match = re.match(r"^/message/metadata/content_references/(\d+)/images$", m.get("p"))
|
||||
if match:
|
||||
idx = int(match.group(1))
|
||||
|
|
@ -785,10 +838,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
fields.recipient = m.get("recipient", fields.recipient)
|
||||
if fields.recipient == "all":
|
||||
c = m.get("content", {})
|
||||
if c.get("content_type") == "text" and m.get("author", {}).get("role") == "tool" and "initial_text" in m.get("metadata", {}):
|
||||
if c.get("content_type") == "text" and m.get("author", {}).get(
|
||||
"role") == "tool" and "initial_text" in m.get("metadata", {}):
|
||||
fields.is_thinking = True
|
||||
yield Reasoning(status=m.get("metadata", {}).get("initial_text"))
|
||||
#if c.get("content_type") == "multimodal_text":
|
||||
# if c.get("content_type") == "multimodal_text":
|
||||
# for part in c.get("parts"):
|
||||
# if isinstance(part, dict) and part.get("content_type") == "image_asset_pointer":
|
||||
# yield await cls.get_generated_image(session, auth_result, part, fields.prompt, fields.conversation_id)
|
||||
|
|
@ -825,7 +879,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
headers: dict = None,
|
||||
**kwargs
|
||||
) -> AsyncIterator:
|
||||
if cls._expires is not None and (cls._expires - 60*10) < time.time():
|
||||
if cls._expires is not None and (cls._expires - 60 * 10) < time.time():
|
||||
cls._headers = cls._api_key = None
|
||||
if cls._headers is None or headers is not None:
|
||||
cls._headers = {} if headers is None else headers
|
||||
|
|
@ -858,6 +912,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
async def nodriver_auth(cls, proxy: str = None):
|
||||
async with get_nodriver_session(proxy=proxy) as browser:
|
||||
page = await browser.get(cls.url)
|
||||
|
||||
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
|
||||
if event.request.url == start_url or event.request.url.startswith(conversation_url):
|
||||
if cls.request_config.headers is None:
|
||||
|
|
@ -867,7 +922,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
elif event.request.url in (backend_url, backend_anon_url):
|
||||
if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
|
||||
cls.request_config.proof_token = json.loads(base64.b64decode(
|
||||
event.request.headers["OpenAI-Sentinel-Proof-Token"].split("gAAAAAB", 1)[-1].split("~")[0].encode()
|
||||
event.request.headers["OpenAI-Sentinel-Proof-Token"].split("gAAAAAB", 1)[-1].split("~")[
|
||||
0].encode()
|
||||
).decode())
|
||||
if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers:
|
||||
cls.request_config.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"]
|
||||
|
|
@ -881,6 +937,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
arkBody=event.request.post_data,
|
||||
userAgent=event.request.headers.get("User-Agent")
|
||||
)
|
||||
|
||||
await page.send(nodriver.cdp.network.enable())
|
||||
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
||||
await page.reload()
|
||||
|
|
@ -912,7 +969,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
if cls._api_key is not None or not cls.needs_auth:
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
debug.log(f"OpenaiChat: Access token: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}")
|
||||
debug.log(f"OpenaiChat: Access token: {'False' if cls._api_key is None else cls._api_key[:12] + '...'}")
|
||||
while True:
|
||||
if cls.request_config.proof_token:
|
||||
break
|
||||
|
|
@ -970,11 +1027,14 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
if cls._cookies:
|
||||
cls._headers["cookie"] = format_cookies(cls._cookies)
|
||||
|
||||
|
||||
class Conversation(JsonConversation):
|
||||
"""
|
||||
Class to encapsulate response fields.
|
||||
"""
|
||||
def __init__(self, conversation_id: str = None, message_id: str = None, user_id: str = None, finish_reason: str = None, parent_message_id: str = None, is_thinking: bool = False):
|
||||
|
||||
def __init__(self, conversation_id: str = None, message_id: str = None, user_id: str = None,
|
||||
finish_reason: str = None, parent_message_id: str = None, is_thinking: bool = False):
|
||||
self.conversation_id = conversation_id
|
||||
self.message_id = message_id
|
||||
self.finish_reason = finish_reason
|
||||
|
|
@ -987,6 +1047,7 @@ class Conversation(JsonConversation):
|
|||
self.prompt = None
|
||||
self.generated_images: ImagePreview = None
|
||||
|
||||
|
||||
def get_cookies(
|
||||
urls: Optional[Iterator[str]] = None
|
||||
) -> Generator[Dict, Dict, Dict[str, str]]:
|
||||
|
|
@ -1000,6 +1061,7 @@ def get_cookies(
|
|||
json = yield cmd_dict
|
||||
return {c["name"]: c["value"] for c in json['cookies']} if 'cookies' in json else {}
|
||||
|
||||
|
||||
class OpenAISources(ResponseType):
|
||||
list: List[Dict[str, str]]
|
||||
|
||||
|
|
@ -1038,11 +1100,11 @@ class OpenAISources(ResponseType):
|
|||
if not self.list:
|
||||
return ""
|
||||
return "\n\n\n\n" + ("\n>\n".join([
|
||||
f"> [{idx+1}] {format_link(link['url'], link.get('title', ''))}"
|
||||
f"> [{idx + 1}] {format_link(link['url'], link.get('title', ''))}"
|
||||
for idx, link in enumerate(self.list)
|
||||
]))
|
||||
|
||||
def get_ref_info(self, source: Dict[str, str]) -> dict[str, str|int] | None:
|
||||
def get_ref_info(self, source: Dict[str, str]) -> dict[str, str | int] | None:
|
||||
ref_index = source.get("ref_id", {}).get("ref_index", None)
|
||||
ref_type = source.get("ref_id", {}).get("ref_type", None)
|
||||
if isinstance(ref_index, int):
|
||||
|
|
@ -1062,7 +1124,7 @@ class OpenAISources(ResponseType):
|
|||
|
||||
return None
|
||||
|
||||
def find_by_ref_info(self, ref_info: dict[str, str|int]):
|
||||
def find_by_ref_info(self, ref_info: dict[str, str | int]):
|
||||
for idx, source in enumerate(self.list):
|
||||
source_ref_info = self.get_ref_info(source)
|
||||
if (source_ref_info and
|
||||
|
|
@ -1078,13 +1140,14 @@ class OpenAISources(ResponseType):
|
|||
return source, idx
|
||||
return None, None
|
||||
|
||||
def get_index(self, ref_info: dict[str, str|int]) -> int | None:
|
||||
def get_index(self, ref_info: dict[str, str | int]) -> int | None:
|
||||
_, index = self.find_by_ref_info(ref_info)
|
||||
if index is not None:
|
||||
return index
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class ContentReferences:
|
||||
def __init__(self) -> None:
|
||||
self.list: List[Dict[str, Any]] = []
|
||||
|
|
@ -1098,7 +1161,7 @@ class ContentReferences:
|
|||
|
||||
self.list[idx] = {**self.list[idx], **reference_part}
|
||||
|
||||
def update_reference(self, idx: int, operation: str, field: str, value: Any, ref_idx = None) -> None:
|
||||
def update_reference(self, idx: int, operation: str, field: str, value: Any, ref_idx=None) -> None:
|
||||
while len(self.list) <= idx:
|
||||
self.list.append({})
|
||||
|
||||
|
|
@ -1126,7 +1189,7 @@ class ContentReferences:
|
|||
self,
|
||||
source: Dict[str, str],
|
||||
target_ref_info: Dict[str, Union[str, int]]
|
||||
) -> dict[str, str|int] | None:
|
||||
) -> dict[str, str | int] | None:
|
||||
for idx, ref_info in enumerate(source.get("refs", [])) or []:
|
||||
if not isinstance(ref_info, dict):
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -440,7 +440,7 @@ class Backend_Api(Api):
|
|||
os.remove(copyfile)
|
||||
continue
|
||||
if not is_media and result:
|
||||
with open(os.path.join(bucket_dir, f"{filename}.md"), 'w') as f:
|
||||
with open(os.path.join(bucket_dir, f"{filename}.md"), 'w', encoding="utf-8") as f:
|
||||
f.write(f"{result}\n")
|
||||
filenames.append(f"{filename}.md")
|
||||
if is_media:
|
||||
|
|
@ -477,7 +477,7 @@ class Backend_Api(Api):
|
|||
except OSError:
|
||||
shutil.copyfile(copyfile, newfile)
|
||||
os.remove(copyfile)
|
||||
with open(os.path.join(bucket_dir, "files.txt"), 'w') as f:
|
||||
with open(os.path.join(bucket_dir, "files.txt"), 'w', encoding="utf-8") as f:
|
||||
for filename in filenames:
|
||||
f.write(f"{filename}\n")
|
||||
return {"bucket_id": bucket_id, "files": filenames, "media": media}
|
||||
|
|
@ -572,7 +572,7 @@ class Backend_Api(Api):
|
|||
share_id = secure_filename(share_id)
|
||||
bucket_dir = get_bucket_dir(share_id)
|
||||
os.makedirs(bucket_dir, exist_ok=True)
|
||||
with open(os.path.join(bucket_dir, "chat.json"), 'w') as f:
|
||||
with open(os.path.join(bucket_dir, "chat.json"), 'w', encoding="utf-8") as f:
|
||||
json.dump(chat_data, f)
|
||||
self.chat_cache[share_id] = updated
|
||||
return {"share_id": share_id}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ from pathlib import Path
|
|||
from typing import Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
|
||||
try:
|
||||
from PIL import Image, ImageOps
|
||||
has_requirements = True
|
||||
|
|
@ -383,6 +385,13 @@ def to_bytes(image: ImageType) -> bytes:
|
|||
return Path(path).read_bytes()
|
||||
else:
|
||||
raise FileNotFoundError(f"File not found: {path}")
|
||||
else:
|
||||
resp = requests.get(image, headers={
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||
})
|
||||
if resp.ok and is_accepted_format(resp.content):
|
||||
return resp.content
|
||||
raise ValueError("Invalid image url. Expected bytes, str, or PIL Image.")
|
||||
else:
|
||||
raise ValueError("Invalid image format. Expected bytes, str, or PIL Image.")
|
||||
elif isinstance(image, Image.Image):
|
||||
|
|
|
|||
|
|
@ -31,17 +31,21 @@ class StreamResponse(ClientResponse):
|
|||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
class StreamSession():
|
||||
class StreamSession:
|
||||
def __init__(
|
||||
self,
|
||||
headers: dict = {},
|
||||
headers=None,
|
||||
timeout: int = None,
|
||||
connector: BaseConnector = None,
|
||||
proxy: str = None,
|
||||
proxies: dict = {},
|
||||
proxies=None,
|
||||
impersonate = None,
|
||||
**kwargs
|
||||
):
|
||||
if proxies is None:
|
||||
proxies = {}
|
||||
if headers is None:
|
||||
headers = {}
|
||||
if impersonate:
|
||||
headers = {
|
||||
**DEFAULT_HEADERS,
|
||||
|
|
@ -49,7 +53,7 @@ class StreamSession():
|
|||
}
|
||||
connect = None
|
||||
if isinstance(timeout, tuple):
|
||||
connect, timeout = timeout;
|
||||
connect, timeout = timeout
|
||||
if timeout is not None:
|
||||
timeout = ClientTimeout(timeout, connect)
|
||||
if proxy is None:
|
||||
|
|
|
|||
|
|
@ -18,4 +18,3 @@ python-multipart
|
|||
a2wsgi
|
||||
python-dotenv
|
||||
ddgs
|
||||
aiofile
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue