mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Compare commits
25 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
957d73a76e | ||
|
|
468dc7bd67 | ||
|
|
1fd9b8d116 | ||
|
|
ed84c2dc6b | ||
|
|
cda4634d34 | ||
|
|
1b3628dfee | ||
|
|
0d9c00b4e3 | ||
|
|
21113c51a6 | ||
|
|
098b2401ea | ||
|
|
04e300d7a6 | ||
|
|
c364425250 | ||
|
|
f57663cbe8 | ||
|
|
da4d7d118d | ||
|
|
07883bc9f0 | ||
|
|
f0ea4c5b95 | ||
|
|
7b32f89eca | ||
|
|
d76e56a66f | ||
|
|
6be76e3e84 | ||
|
|
688640b764 | ||
|
|
2e6d417d02 | ||
|
|
7771cf3d43 | ||
|
|
32215bb7bb | ||
|
|
05c108d3f6 | ||
|
|
18fda760cb | ||
|
|
9c7fc9fe4a |
20 changed files with 1104 additions and 349 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -9,3 +9,4 @@ g4f.egg-info
|
||||||
models/models.json
|
models/models.json
|
||||||
pyvenv.cfg
|
pyvenv.cfg
|
||||||
lib64
|
lib64
|
||||||
|
/.idea
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
g4f-slim:
|
g4f-slim:
|
||||||
container_name: g4f-slim
|
container_name: g4f-slim
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,3 @@
|
||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
gpt4free:
|
gpt4free:
|
||||||
image: hlohaus789/g4f:latest
|
image: hlohaus789/g4f:latest
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ RUN if [ "$G4F_VERSION" = "" ] ; then \
|
||||||
RUN apt-get -qqy update \
|
RUN apt-get -qqy update \
|
||||||
&& apt-get -qqy upgrade \
|
&& apt-get -qqy upgrade \
|
||||||
&& apt-get -qyy autoremove \
|
&& apt-get -qyy autoremove \
|
||||||
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac libavcodec-extra build-essential python3-dev \
|
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac libavcodec-extra \
|
||||||
&& apt-get -qyy remove openjdk-11-jre-headless \
|
&& apt-get -qyy remove openjdk-11-jre-headless \
|
||||||
&& apt-get -qyy autoremove \
|
&& apt-get -qyy autoremove \
|
||||||
&& apt-get -qyy clean \
|
&& apt-get -qyy clean \
|
||||||
|
|
|
||||||
107
g4f/Provider/GradientNetwork.py
Normal file
107
g4f/Provider/GradientNetwork.py
Normal file
|
|
@ -0,0 +1,107 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from ..providers.response import Reasoning, JsonResponse
|
||||||
|
from ..requests import StreamSession
|
||||||
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
|
||||||
|
|
||||||
|
class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
"""
|
||||||
|
Provider for chat.gradient.network
|
||||||
|
Supports streaming text generation with Qwen and GPT OSS models.
|
||||||
|
"""
|
||||||
|
label = "Gradient Network"
|
||||||
|
url = "https://chat.gradient.network"
|
||||||
|
api_endpoint = "https://chat.gradient.network/api/generate"
|
||||||
|
|
||||||
|
working = True
|
||||||
|
needs_auth = False
|
||||||
|
supports_stream = True
|
||||||
|
supports_system_message = True
|
||||||
|
supports_message_history = True
|
||||||
|
|
||||||
|
default_model = "GPT OSS 120B"
|
||||||
|
models = [
|
||||||
|
default_model,
|
||||||
|
"GPT OSS 120B",
|
||||||
|
]
|
||||||
|
model_aliases = {
|
||||||
|
"qwen-3-235b": "Qwen3 235B",
|
||||||
|
"qwen3-235b": "Qwen3 235B",
|
||||||
|
"gpt-oss-120b": "GPT OSS 120B",
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
|
enable_thinking: bool = True,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
"""
|
||||||
|
Create an async generator for streaming chat responses.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: The model name to use
|
||||||
|
messages: List of message dictionaries
|
||||||
|
proxy: Optional proxy URL
|
||||||
|
enable_thinking: Enable the thinking/analysis channel (maps to enableThinking in API)
|
||||||
|
**kwargs: Additional arguments
|
||||||
|
|
||||||
|
Yields:
|
||||||
|
str: Content chunks from the response
|
||||||
|
Reasoning: Reasoning content when enable_thinking is True
|
||||||
|
"""
|
||||||
|
model = cls.get_model(model)
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Accept": "application/x-ndjson",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Origin": cls.url,
|
||||||
|
"Referer": f"{cls.url}/",
|
||||||
|
}
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"clusterMode": "nvidia" if "GPT OSS" in model else "hybrid",
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
}
|
||||||
|
if enable_thinking:
|
||||||
|
payload["enableThinking"] = enable_thinking
|
||||||
|
async with StreamSession(headers=headers, proxy=proxy, impersonate="chrome") as session:
|
||||||
|
async with session.post(
|
||||||
|
cls.api_endpoint,
|
||||||
|
json=payload,
|
||||||
|
) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
async for line in response.iter_lines():
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = json.loads(line)
|
||||||
|
yield JsonResponse.from_dict(data)
|
||||||
|
msg_type = data.get("type")
|
||||||
|
|
||||||
|
if msg_type == "reply":
|
||||||
|
# Response chunks with content or reasoningContent
|
||||||
|
reply_data = data.get("data", {})
|
||||||
|
content = reply_data.get("content")
|
||||||
|
reasoning_content = reply_data.get("reasoningContent")
|
||||||
|
|
||||||
|
if reasoning_content:
|
||||||
|
yield Reasoning(reasoning_content)
|
||||||
|
if content:
|
||||||
|
yield content
|
||||||
|
|
||||||
|
# Skip clusterInfo and blockUpdate GPU visualization messages
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# Skip non-JSON lines (may be partial data or empty)
|
||||||
|
raise
|
||||||
46
g4f/Provider/ItalyGPT.py
Normal file
46
g4f/Provider/ItalyGPT.py
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from ..requests import DEFAULT_HEADERS
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
label = "ItalyGPT"
|
||||||
|
url = "https://italygpt.it"
|
||||||
|
working = True
|
||||||
|
supports_system_message = True
|
||||||
|
supports_message_history = True
|
||||||
|
|
||||||
|
default_model = "gpt-4o"
|
||||||
|
models = [default_model]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
stream: bool = True,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
model = cls.get_model(model)
|
||||||
|
headers = {
|
||||||
|
**DEFAULT_HEADERS,
|
||||||
|
"content-type": "application/json",
|
||||||
|
"origin": "https://italygpt.it",
|
||||||
|
"referer": "https://italygpt.it/",
|
||||||
|
}
|
||||||
|
payload = {
|
||||||
|
"messages": messages,
|
||||||
|
"stream": stream,
|
||||||
|
}
|
||||||
|
async with ClientSession() as session:
|
||||||
|
async with session.post(
|
||||||
|
f"{cls.url}/api/chat",
|
||||||
|
json=payload,
|
||||||
|
headers=headers,
|
||||||
|
proxy=proxy,
|
||||||
|
) as resp:
|
||||||
|
resp.raise_for_status()
|
||||||
|
async for chunk in resp.content.iter_any():
|
||||||
|
if chunk:
|
||||||
|
yield chunk.decode()
|
||||||
|
|
@ -1,22 +1,29 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import datetime
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
import json
|
import json
|
||||||
import mimetypes
|
|
||||||
import re
|
import re
|
||||||
import uuid
|
import uuid
|
||||||
from time import time
|
from time import time
|
||||||
from typing import Literal, Optional
|
from typing import Literal, Optional, Dict
|
||||||
|
from urllib.parse import quote
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
from ..errors import RateLimitError, ResponseError
|
|
||||||
from ..typing import AsyncResult, Messages, MediaListType
|
from g4f.image import to_bytes, detect_file_type
|
||||||
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
|
from g4f.requests import raise_for_status
|
||||||
from ..requests import sse_stream
|
|
||||||
from ..tools.media import merge_media
|
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from .helper import get_last_user_message
|
from .helper import get_last_user_message
|
||||||
from .. import debug
|
from .. import debug
|
||||||
|
from ..errors import RateLimitError, ResponseError
|
||||||
|
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
|
||||||
|
from ..requests import sse_stream
|
||||||
|
from ..requests.aiohttp import StreamSession
|
||||||
|
from ..tools.media import merge_media
|
||||||
|
from ..typing import AsyncResult, Messages, MediaListType
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import curl_cffi
|
import curl_cffi
|
||||||
|
|
@ -25,6 +32,56 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
has_curl_cffi = False
|
has_curl_cffi = False
|
||||||
|
|
||||||
|
# Global variables to manage Qwen Image Cache
|
||||||
|
ImagesCache: Dict[str, dict] = {}
|
||||||
|
|
||||||
|
|
||||||
|
def get_oss_headers(method: str, date_str: str, sts_data: dict, content_type: str) -> dict[str, str]:
|
||||||
|
bucket_name = sts_data.get('bucketname', 'qwen-webui-prod')
|
||||||
|
file_path = sts_data.get('file_path', '')
|
||||||
|
access_key_id = sts_data.get('access_key_id')
|
||||||
|
access_key_secret = sts_data.get('access_key_secret')
|
||||||
|
security_token = sts_data.get('security_token')
|
||||||
|
headers = {
|
||||||
|
'Content-Type': content_type,
|
||||||
|
'x-oss-content-sha256': 'UNSIGNED-PAYLOAD',
|
||||||
|
'x-oss-date': date_str,
|
||||||
|
'x-oss-security-token': security_token,
|
||||||
|
'x-oss-user-agent': 'aliyun-sdk-js/6.23.0 Chrome 132.0.0.0 on Windows 10 64-bit'
|
||||||
|
}
|
||||||
|
headers_lower = {k.lower(): v for k, v in headers.items()}
|
||||||
|
|
||||||
|
canonical_headers_list = []
|
||||||
|
signed_headers_list = []
|
||||||
|
required_headers = ['content-md5', 'content-type', 'x-oss-content-sha256', 'x-oss-date', 'x-oss-security-token',
|
||||||
|
'x-oss-user-agent']
|
||||||
|
for header_name in sorted(required_headers):
|
||||||
|
if header_name in headers_lower:
|
||||||
|
canonical_headers_list.append(f"{header_name}:{headers_lower[header_name]}")
|
||||||
|
signed_headers_list.append(header_name)
|
||||||
|
|
||||||
|
canonical_headers = '\n'.join(canonical_headers_list) + '\n'
|
||||||
|
canonical_uri = f"/{bucket_name}/{quote(file_path, safe='/')}"
|
||||||
|
|
||||||
|
canonical_request = f"{method}\n{canonical_uri}\n\n{canonical_headers}\n\nUNSIGNED-PAYLOAD"
|
||||||
|
|
||||||
|
date_parts = date_str.split('T')
|
||||||
|
date_scope = f"{date_parts[0]}/ap-southeast-1/oss/aliyun_v4_request"
|
||||||
|
string_to_sign = f"OSS4-HMAC-SHA256\n{date_str}\n{date_scope}\n{hashlib.sha256(canonical_request.encode()).hexdigest()}"
|
||||||
|
|
||||||
|
def sign(key, msg):
|
||||||
|
return hmac.new(key, msg.encode() if isinstance(msg, str) else msg, hashlib.sha256).digest()
|
||||||
|
|
||||||
|
date_key = sign(f"aliyun_v4{access_key_secret}".encode(), date_parts[0])
|
||||||
|
region_key = sign(date_key, "ap-southeast-1")
|
||||||
|
service_key = sign(region_key, "oss")
|
||||||
|
signing_key = sign(service_key, "aliyun_v4_request")
|
||||||
|
signature = hmac.new(signing_key, string_to_sign.encode(), hashlib.sha256).hexdigest()
|
||||||
|
|
||||||
|
headers['authorization'] = f"OSS4-HMAC-SHA256 Credential={access_key_id}/{date_scope},Signature={signature}"
|
||||||
|
return headers
|
||||||
|
|
||||||
|
|
||||||
text_models = [
|
text_models = [
|
||||||
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
|
'qwen3-max-preview', 'qwen-plus-2025-09-11', 'qwen3-235b-a22b', 'qwen3-coder-plus', 'qwen3-30b-a3b',
|
||||||
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwq-32b', 'qwen-turbo-2025-02-11',
|
'qwen3-coder-30b-a3b-instruct', 'qwen-max-latest', 'qwen-plus-2025-01-25', 'qwq-32b', 'qwen-turbo-2025-02-11',
|
||||||
|
|
@ -60,19 +117,19 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_message_history = False
|
supports_message_history = False
|
||||||
|
image_cache = True
|
||||||
_models_loaded = True
|
_models_loaded = True
|
||||||
image_models = image_models
|
image_models = image_models
|
||||||
text_models = text_models
|
text_models = text_models
|
||||||
vision_models = vision_models
|
vision_models = vision_models
|
||||||
models = models
|
models: list[str] = models
|
||||||
default_model = "qwen3-235b-a22b"
|
default_model = "qwen3-235b-a22b"
|
||||||
|
|
||||||
_midtoken: str = None
|
_midtoken: str = None
|
||||||
_midtoken_uses: int = 0
|
_midtoken_uses: int = 0
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls) -> list[str]:
|
def get_models(cls, **kwargs) -> list[str]:
|
||||||
if not cls._models_loaded and has_curl_cffi:
|
if not cls._models_loaded and has_curl_cffi:
|
||||||
response = curl_cffi.get(f"{cls.url}/api/models")
|
response = curl_cffi.get(f"{cls.url}/api/models")
|
||||||
if response.ok:
|
if response.ok:
|
||||||
|
|
@ -97,34 +154,106 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
return cls.models
|
return cls.models
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def prepare_files(cls, media, chat_type="")->list:
|
async def prepare_files(cls, media, session: aiohttp.ClientSession, headers=None) -> list:
|
||||||
|
if headers is None:
|
||||||
|
headers = {}
|
||||||
files = []
|
files = []
|
||||||
for _file, file_name in media:
|
for index, (_file, file_name) in enumerate(media):
|
||||||
file_type, _ = mimetypes.guess_type(file_name)
|
|
||||||
file_class: Literal["default", "vision", "video", "audio", "document"] = "default"
|
|
||||||
_type: Literal["file", "image", "video", "audio"] = "file"
|
|
||||||
showType: Literal["file", "image", "video", "audio"] = "file"
|
|
||||||
|
|
||||||
if isinstance(_file, str) and _file.startswith('http'):
|
data_bytes = to_bytes(_file)
|
||||||
if chat_type == "image_edit" or (file_type and file_type.startswith("image")):
|
# Check Cache
|
||||||
file_class = "vision"
|
hasher = hashlib.md5()
|
||||||
_type = "image"
|
hasher.update(data_bytes)
|
||||||
if not file_type:
|
image_hash = hasher.hexdigest()
|
||||||
# Try to infer from file extension, fallback to generic
|
file = ImagesCache.get(image_hash)
|
||||||
ext = file_name.split('.')[-1].lower() if '.' in file_name else ''
|
if cls.image_cache and file:
|
||||||
file_type = mimetypes.types_map.get(f'.{ext}', 'application/octet-stream')
|
debug.log("Using cached image")
|
||||||
showType = "image"
|
files.append(file)
|
||||||
|
continue
|
||||||
|
|
||||||
files.append(
|
extension, file_type = detect_file_type(data_bytes)
|
||||||
{
|
file_name = file_name or f"file-{len(data_bytes)}{extension}"
|
||||||
"type": _type,
|
file_size = len(data_bytes)
|
||||||
|
|
||||||
|
# Get File Url
|
||||||
|
async with session.post(
|
||||||
|
f'{cls.url}/api/v2/files/getstsToken',
|
||||||
|
json={"filename": file_name,
|
||||||
|
"filesize": file_size, "filetype": file_type},
|
||||||
|
headers=headers
|
||||||
|
|
||||||
|
) as r:
|
||||||
|
await raise_for_status(r, "Create file failed")
|
||||||
|
res_data = await r.json()
|
||||||
|
data = res_data.get("data")
|
||||||
|
|
||||||
|
if res_data["success"] is False:
|
||||||
|
raise RateLimitError(f"{data['code']}:{data['details']}")
|
||||||
|
file_url = data.get("file_url")
|
||||||
|
file_id = data.get("file_id")
|
||||||
|
|
||||||
|
# Put File into Url
|
||||||
|
str_date = datetime.datetime.now(datetime.UTC).strftime('%Y%m%dT%H%M%SZ')
|
||||||
|
headers = get_oss_headers('PUT', str_date, data, file_type)
|
||||||
|
async with session.put(
|
||||||
|
file_url.split("?")[0],
|
||||||
|
data=data_bytes,
|
||||||
|
headers=headers
|
||||||
|
) as response:
|
||||||
|
await raise_for_status(response)
|
||||||
|
|
||||||
|
file_class: Literal["default", "vision", "video", "audio", "document"]
|
||||||
|
_type: Literal["file", "image", "video", "audio"]
|
||||||
|
show_type: Literal["file", "image", "video", "audio"]
|
||||||
|
if "image" in file_type:
|
||||||
|
_type = "image"
|
||||||
|
show_type = "image"
|
||||||
|
file_class = "vision"
|
||||||
|
elif "video" in file_type:
|
||||||
|
_type = "video"
|
||||||
|
show_type = "video"
|
||||||
|
file_class = "video"
|
||||||
|
elif "audio" in file_type:
|
||||||
|
_type = "audio"
|
||||||
|
show_type = "audio"
|
||||||
|
file_class = "audio"
|
||||||
|
else:
|
||||||
|
_type = "file"
|
||||||
|
show_type = "file"
|
||||||
|
file_class = "document"
|
||||||
|
|
||||||
|
file = {
|
||||||
|
"type": _type,
|
||||||
|
"file": {
|
||||||
|
"created_at": int(time() * 1000),
|
||||||
|
"data": {},
|
||||||
|
"filename": file_name,
|
||||||
|
"hash": None,
|
||||||
|
"id": file_id,
|
||||||
|
"meta": {
|
||||||
"name": file_name,
|
"name": file_name,
|
||||||
"file_type": file_type,
|
"size": file_size,
|
||||||
"showType": showType,
|
"content_type": file_type
|
||||||
"file_class": file_class,
|
},
|
||||||
"url": _file
|
"update_at": int(time() * 1000),
|
||||||
}
|
},
|
||||||
)
|
"id": file_id,
|
||||||
|
"url": file_url,
|
||||||
|
"name": file_name,
|
||||||
|
"collection_name": "",
|
||||||
|
"progress": 0,
|
||||||
|
"status": "uploaded",
|
||||||
|
"greenNet": "success",
|
||||||
|
"size": file_size,
|
||||||
|
"error": "",
|
||||||
|
"itemId": str(uuid.uuid4()),
|
||||||
|
"file_type": file_type,
|
||||||
|
"showType": show_type,
|
||||||
|
"file_class": file_class,
|
||||||
|
"uploadTaskId": str(uuid.uuid4())
|
||||||
|
}
|
||||||
|
ImagesCache[image_hash] = file
|
||||||
|
files.append(file)
|
||||||
return files
|
return files
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -135,7 +264,6 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
media: MediaListType = None,
|
media: MediaListType = None,
|
||||||
conversation: JsonConversation = None,
|
conversation: JsonConversation = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 120,
|
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
enable_thinking: bool = True,
|
enable_thinking: bool = True,
|
||||||
chat_type: Literal[
|
chat_type: Literal[
|
||||||
|
|
@ -157,7 +285,7 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
model_name = cls.get_model(model)
|
model_name = cls.get_model(model)
|
||||||
|
token = kwargs.get("token")
|
||||||
headers = {
|
headers = {
|
||||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
|
||||||
'Accept': '*/*',
|
'Accept': '*/*',
|
||||||
|
|
@ -169,13 +297,24 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
'Sec-Fetch-Mode': 'cors',
|
'Sec-Fetch-Mode': 'cors',
|
||||||
'Sec-Fetch-Site': 'same-origin',
|
'Sec-Fetch-Site': 'same-origin',
|
||||||
'Connection': 'keep-alive',
|
'Connection': 'keep-alive',
|
||||||
'Authorization': 'Bearer',
|
'Authorization': f'Bearer {token}' if token else "Bearer",
|
||||||
'Source': 'web'
|
'Source': 'web'
|
||||||
}
|
}
|
||||||
|
|
||||||
prompt = get_last_user_message(messages)
|
prompt = get_last_user_message(messages)
|
||||||
|
_timeout = kwargs.get("timeout")
|
||||||
async with aiohttp.ClientSession(headers=headers) as session:
|
if isinstance(_timeout, aiohttp.ClientTimeout):
|
||||||
|
timeout = _timeout
|
||||||
|
else:
|
||||||
|
total = float(_timeout) if isinstance(_timeout, (int, float)) else 5 * 60
|
||||||
|
timeout = aiohttp.ClientTimeout(total=total)
|
||||||
|
async with StreamSession(headers=headers) as session:
|
||||||
|
try:
|
||||||
|
async with session.get('https://chat.qwen.ai/api/v1/auths/', proxy=proxy) as user_info_res:
|
||||||
|
user_info_res.raise_for_status()
|
||||||
|
debug.log(await user_info_res.json())
|
||||||
|
except:
|
||||||
|
...
|
||||||
for attempt in range(5):
|
for attempt in range(5):
|
||||||
try:
|
try:
|
||||||
if not cls._midtoken:
|
if not cls._midtoken:
|
||||||
|
|
@ -221,7 +360,8 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
files = []
|
files = []
|
||||||
media = list(merge_media(media, messages))
|
media = list(merge_media(media, messages))
|
||||||
if media:
|
if media:
|
||||||
files = await cls.prepare_files(media, chat_type=chat_type)
|
files = await cls.prepare_files(media, session=session,
|
||||||
|
headers=req_headers)
|
||||||
|
|
||||||
msg_payload = {
|
msg_payload = {
|
||||||
"stream": stream,
|
"stream": stream,
|
||||||
|
|
|
||||||
|
|
@ -1,41 +1,51 @@
|
||||||
|
import asyncio
|
||||||
|
import hashlib
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
import re
|
|
||||||
import os
|
|
||||||
import asyncio
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages, Optional, Dict, Any, List
|
from .helper import get_last_user_message
|
||||||
from ..providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from .yupp.models import YuppModelManager
|
||||||
from ..providers.response import Reasoning, PlainTextResponse, PreviewResponse, JsonConversation, ImageResponse, ProviderInfo
|
|
||||||
from ..errors import RateLimitError, ProviderException, MissingAuthError
|
|
||||||
from ..cookies import get_cookies
|
from ..cookies import get_cookies
|
||||||
|
from ..debug import log
|
||||||
|
from ..errors import RateLimitError, ProviderException, MissingAuthError
|
||||||
|
from ..image import is_accepted_format, to_bytes
|
||||||
|
from ..providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
from ..providers.response import Reasoning, PlainTextResponse, PreviewResponse, JsonConversation, ImageResponse, \
|
||||||
|
ProviderInfo, FinishReason, JsonResponse
|
||||||
|
from ..requests.aiohttp import StreamSession
|
||||||
from ..tools.auth import AuthManager
|
from ..tools.auth import AuthManager
|
||||||
from ..tools.media import merge_media
|
from ..tools.media import merge_media
|
||||||
from ..image import is_accepted_format, to_bytes
|
from ..typing import AsyncResult, Messages, Optional, Dict, Any, List
|
||||||
from .yupp.models import YuppModelManager
|
|
||||||
from .helper import get_last_user_message
|
|
||||||
from ..debug import log
|
|
||||||
|
|
||||||
# Global variables to manage Yupp accounts
|
# Global variables to manage Yupp accounts
|
||||||
YUPP_ACCOUNTS: List[Dict[str, Any]] = []
|
YUPP_ACCOUNT = Dict[str, Any]
|
||||||
|
YUPP_ACCOUNTS: List[YUPP_ACCOUNT] = []
|
||||||
account_rotation_lock = asyncio.Lock()
|
account_rotation_lock = asyncio.Lock()
|
||||||
|
|
||||||
|
# Global variables to manage Yupp Image Cache
|
||||||
|
ImagesCache: Dict[str, dict] = {}
|
||||||
|
|
||||||
|
|
||||||
class YuppAccount:
|
class YuppAccount:
|
||||||
"""Yupp account representation"""
|
"""Yupp account representation"""
|
||||||
|
|
||||||
def __init__(self, token: str, is_valid: bool = True, error_count: int = 0, last_used: float = 0):
|
def __init__(self, token: str, is_valid: bool = True, error_count: int = 0, last_used: float = 0):
|
||||||
self.token = token
|
self.token = token
|
||||||
self.is_valid = is_valid
|
self.is_valid = is_valid
|
||||||
self.error_count = error_count
|
self.error_count = error_count
|
||||||
self.last_used = last_used
|
self.last_used = last_used
|
||||||
|
|
||||||
|
|
||||||
def load_yupp_accounts(tokens_str: str):
|
def load_yupp_accounts(tokens_str: str):
|
||||||
"""Load Yupp accounts from token string"""
|
"""Load Yupp accounts from token string"""
|
||||||
global YUPP_ACCOUNTS
|
global YUPP_ACCOUNTS
|
||||||
if not tokens_str:
|
if not tokens_str:
|
||||||
return
|
return
|
||||||
|
|
||||||
tokens = [token.strip() for token in tokens_str.split(',') if token.strip()]
|
tokens = [token.strip() for token in tokens_str.split(',') if token.strip()]
|
||||||
YUPP_ACCOUNTS = [
|
YUPP_ACCOUNTS = [
|
||||||
{
|
{
|
||||||
|
|
@ -47,6 +57,7 @@ def load_yupp_accounts(tokens_str: str):
|
||||||
for token in tokens
|
for token in tokens
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def create_headers() -> Dict[str, str]:
|
def create_headers() -> Dict[str, str]:
|
||||||
"""Create headers for requests"""
|
"""Create headers for requests"""
|
||||||
return {
|
return {
|
||||||
|
|
@ -59,7 +70,8 @@ def create_headers() -> Dict[str, str]:
|
||||||
"Sec-Fetch-Site": "same-origin",
|
"Sec-Fetch-Site": "same-origin",
|
||||||
}
|
}
|
||||||
|
|
||||||
async def get_best_yupp_account() -> Optional[Dict[str, Any]]:
|
|
||||||
|
async def get_best_yupp_account() -> Optional[YUPP_ACCOUNT]:
|
||||||
"""Get the best available Yupp account using smart selection algorithm"""
|
"""Get the best available Yupp account using smart selection algorithm"""
|
||||||
max_error_count = int(os.getenv("MAX_ERROR_COUNT", "3"))
|
max_error_count = int(os.getenv("MAX_ERROR_COUNT", "3"))
|
||||||
error_cooldown = int(os.getenv("ERROR_COOLDOWN", "300"))
|
error_cooldown = int(os.getenv("ERROR_COOLDOWN", "300"))
|
||||||
|
|
@ -70,10 +82,10 @@ async def get_best_yupp_account() -> Optional[Dict[str, Any]]:
|
||||||
acc
|
acc
|
||||||
for acc in YUPP_ACCOUNTS
|
for acc in YUPP_ACCOUNTS
|
||||||
if acc["is_valid"]
|
if acc["is_valid"]
|
||||||
and (
|
and (
|
||||||
acc["error_count"] < max_error_count
|
acc["error_count"] < max_error_count
|
||||||
or now - acc["last_used"] > error_cooldown
|
or now - acc["last_used"] > error_cooldown
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
if not valid_accounts:
|
if not valid_accounts:
|
||||||
|
|
@ -82,8 +94,8 @@ async def get_best_yupp_account() -> Optional[Dict[str, Any]]:
|
||||||
# Reset error count for accounts in cooldown
|
# Reset error count for accounts in cooldown
|
||||||
for acc in valid_accounts:
|
for acc in valid_accounts:
|
||||||
if (
|
if (
|
||||||
acc["error_count"] >= max_error_count
|
acc["error_count"] >= max_error_count
|
||||||
and now - acc["last_used"] > error_cooldown
|
and now - acc["last_used"] > error_cooldown
|
||||||
):
|
):
|
||||||
acc["error_count"] = 0
|
acc["error_count"] = 0
|
||||||
|
|
||||||
|
|
@ -93,7 +105,8 @@ async def get_best_yupp_account() -> Optional[Dict[str, Any]]:
|
||||||
account["last_used"] = now
|
account["last_used"] = now
|
||||||
return account
|
return account
|
||||||
|
|
||||||
async def claim_yupp_reward(session: aiohttp.ClientSession, account: Dict[str, Any], reward_id: str):
|
|
||||||
|
async def claim_yupp_reward(session: aiohttp.ClientSession, account: YUPP_ACCOUNT, reward_id: str):
|
||||||
"""Claim Yupp reward asynchronously"""
|
"""Claim Yupp reward asynchronously"""
|
||||||
try:
|
try:
|
||||||
log_debug(f"Claiming reward {reward_id}...")
|
log_debug(f"Claiming reward {reward_id}...")
|
||||||
|
|
@ -102,6 +115,8 @@ async def claim_yupp_reward(session: aiohttp.ClientSession, account: Dict[str, A
|
||||||
headers = {
|
headers = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||||
|
|
||||||
}
|
}
|
||||||
async with session.post(url, json=payload, headers=headers) as response:
|
async with session.post(url, json=payload, headers=headers) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
@ -113,7 +128,8 @@ async def claim_yupp_reward(session: aiohttp.ClientSession, account: Dict[str, A
|
||||||
log_debug(f"Failed to claim reward {reward_id}. Error: {e}")
|
log_debug(f"Failed to claim reward {reward_id}. Error: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
async def make_chat_private(session: aiohttp.ClientSession, account: Dict[str, Any], chat_id: str) -> bool:
|
|
||||||
|
async def make_chat_private(session: aiohttp.ClientSession, account: YUPP_ACCOUNT, chat_id: str) -> bool:
|
||||||
"""Set a Yupp chat's sharing status to PRIVATE"""
|
"""Set a Yupp chat's sharing status to PRIVATE"""
|
||||||
try:
|
try:
|
||||||
log_debug(f"Setting chat {chat_id} to PRIVATE...")
|
log_debug(f"Setting chat {chat_id} to PRIVATE...")
|
||||||
|
|
@ -129,14 +145,16 @@ async def make_chat_private(session: aiohttp.ClientSession, account: Dict[str, A
|
||||||
headers = {
|
headers = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async with session.post(url, json=payload, headers=headers) as response:
|
async with session.post(url, json=payload, headers=headers) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
data = await response.json()
|
data = await response.json()
|
||||||
if (
|
if (
|
||||||
isinstance(data, list) and len(data) > 0
|
isinstance(data, list) and len(data) > 0
|
||||||
and "json" in data[0].get("result", {}).get("data", {})
|
and "json" in data[0].get("result", {}).get("data", {})
|
||||||
):
|
):
|
||||||
log_debug(f"Chat {chat_id} is now PRIVATE ✅")
|
log_debug(f"Chat {chat_id} is now PRIVATE ✅")
|
||||||
return True
|
return True
|
||||||
|
|
@ -148,6 +166,7 @@ async def make_chat_private(session: aiohttp.ClientSession, account: Dict[str, A
|
||||||
log_debug(f"Failed to make chat {chat_id} private: {e}")
|
log_debug(f"Failed to make chat {chat_id} private: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def log_debug(message: str):
|
def log_debug(message: str):
|
||||||
"""Debug logging"""
|
"""Debug logging"""
|
||||||
if os.getenv("DEBUG_MODE", "false").lower() == "true":
|
if os.getenv("DEBUG_MODE", "false").lower() == "true":
|
||||||
|
|
@ -155,11 +174,12 @@ def log_debug(message: str):
|
||||||
else:
|
else:
|
||||||
log(f"[Yupp] {message}")
|
log(f"[Yupp] {message}")
|
||||||
|
|
||||||
|
|
||||||
def format_messages_for_yupp(messages: Messages) -> str:
|
def format_messages_for_yupp(messages: Messages) -> str:
|
||||||
"""Format multi-turn conversation for Yupp single-turn format"""
|
"""Format multi-turn conversation for Yupp single-turn format"""
|
||||||
if not messages:
|
if not messages:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
if len(messages) == 1 and isinstance(messages[0].get("content"), str):
|
if len(messages) == 1 and isinstance(messages[0].get("content"), str):
|
||||||
return messages[0].get("content", "").strip()
|
return messages[0].get("content", "").strip()
|
||||||
|
|
||||||
|
|
@ -191,6 +211,7 @@ def format_messages_for_yupp(messages: Messages) -> str:
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"""
|
"""
|
||||||
Yupp.ai Provider for g4f
|
Yupp.ai Provider for g4f
|
||||||
|
|
@ -202,7 +223,8 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
working = True
|
working = True
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
image_cache = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls, api_key: str = None, **kwargs) -> List[str]:
|
def get_models(cls, api_key: str = None, **kwargs) -> List[str]:
|
||||||
if not cls.models:
|
if not cls.models:
|
||||||
|
|
@ -218,16 +240,73 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
cls.models_tags = {model.get("name"): manager.processor.generate_tags(model) for model in models}
|
cls.models_tags = {model.get("name"): manager.processor.generate_tags(model) for model in models}
|
||||||
cls.models = [model.get("name") for model in models]
|
cls.models = [model.get("name") for model in models]
|
||||||
cls.image_models = [model.get("name") for model in models if model.get("isImageGeneration")]
|
cls.image_models = [model.get("name") for model in models if model.get("isImageGeneration")]
|
||||||
cls.vision_models = [model.get("name") for model in models if "image/*" in model.get("supportedAttachmentMimeTypes", [])]
|
cls.vision_models = [model.get("name") for model in models if
|
||||||
|
"image/*" in model.get("supportedAttachmentMimeTypes", [])]
|
||||||
return cls.models
|
return cls.models
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def prepare_files(cls, media, session: aiohttp.ClientSession, account: YUPP_ACCOUNT) -> list:
|
||||||
|
files = []
|
||||||
|
if not media:
|
||||||
|
return files
|
||||||
|
for file, name in media:
|
||||||
|
data = to_bytes(file)
|
||||||
|
hasher = hashlib.md5()
|
||||||
|
hasher.update(data)
|
||||||
|
image_hash = hasher.hexdigest()
|
||||||
|
file = ImagesCache.get(image_hash)
|
||||||
|
if cls.image_cache and file:
|
||||||
|
log_debug("Using cached image")
|
||||||
|
files.append(file)
|
||||||
|
continue
|
||||||
|
presigned_resp = await session.post(
|
||||||
|
"https://yupp.ai/api/trpc/chat.createPresignedURLForUpload?batch=1",
|
||||||
|
json={
|
||||||
|
"0": {"json": {"fileName": name, "fileSize": len(data), "contentType": is_accepted_format(data)}}},
|
||||||
|
headers={"Content-Type": "application/json",
|
||||||
|
"Cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||||
|
|
||||||
|
}
|
||||||
|
)
|
||||||
|
presigned_resp.raise_for_status()
|
||||||
|
upload_info = (await presigned_resp.json())[0]["result"]["data"]["json"]
|
||||||
|
upload_url = upload_info["signedUrl"]
|
||||||
|
|
||||||
|
await session.put(
|
||||||
|
upload_url,
|
||||||
|
data=data,
|
||||||
|
headers={
|
||||||
|
"Content-Type": is_accepted_format(data),
|
||||||
|
"Content-Length": str(len(data))
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
attachment_resp = await session.post(
|
||||||
|
"https://yupp.ai/api/trpc/chat.createAttachmentForUploadedFile?batch=1",
|
||||||
|
json={"0": {"json": {"fileName": name, "contentType": is_accepted_format(data),
|
||||||
|
"fileId": upload_info["fileId"]}}},
|
||||||
|
cookies={"__Secure-yupp.session-token": account["token"]}
|
||||||
|
)
|
||||||
|
attachment_resp.raise_for_status()
|
||||||
|
attachment = (await attachment_resp.json())[0]["result"]["data"]["json"]
|
||||||
|
file = {
|
||||||
|
"fileName": attachment["file_name"],
|
||||||
|
"contentType": attachment["content_type"],
|
||||||
|
"attachmentId": attachment["attachment_id"],
|
||||||
|
"chatMessageId": ""
|
||||||
|
}
|
||||||
|
ImagesCache[image_hash] = file
|
||||||
|
files.append(file)
|
||||||
|
return files
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
"""
|
"""
|
||||||
Create async completion using Yupp.ai API with account rotation
|
Create async completion using Yupp.ai API with account rotation
|
||||||
|
|
@ -246,15 +325,16 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
conversation = kwargs.get("conversation")
|
conversation = kwargs.get("conversation")
|
||||||
url_uuid = conversation.url_uuid if conversation else None
|
url_uuid = conversation.url_uuid if conversation else None
|
||||||
is_new_conversation = url_uuid is None
|
is_new_conversation = url_uuid is None
|
||||||
|
|
||||||
prompt = kwargs.get("prompt")
|
prompt = kwargs.get("prompt")
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
if is_new_conversation:
|
if is_new_conversation:
|
||||||
prompt = format_messages_for_yupp(messages)
|
prompt = format_messages_for_yupp(messages)
|
||||||
else:
|
else:
|
||||||
prompt = get_last_user_message(messages, prompt)
|
prompt = get_last_user_message(messages, prompt)
|
||||||
|
|
||||||
log_debug(f"Use url_uuid: {url_uuid}, Formatted prompt length: {len(prompt)}, Is new conversation: {is_new_conversation}")
|
log_debug(
|
||||||
|
f"Use url_uuid: {url_uuid}, Formatted prompt length: {len(prompt)}, Is new conversation: {is_new_conversation}")
|
||||||
|
|
||||||
# Try all accounts with rotation
|
# Try all accounts with rotation
|
||||||
max_attempts = len(YUPP_ACCOUNTS)
|
max_attempts = len(YUPP_ACCOUNTS)
|
||||||
|
|
@ -264,46 +344,16 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
raise ProviderException("No valid Yupp accounts available")
|
raise ProviderException("No valid Yupp accounts available")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
async with aiohttp.ClientSession() as session:
|
async with StreamSession() as session:
|
||||||
turn_id = str(uuid.uuid4())
|
turn_id = str(uuid.uuid4())
|
||||||
files = []
|
|
||||||
|
|
||||||
# Handle media attachments
|
# Handle media attachments
|
||||||
media = kwargs.get("media")
|
media = kwargs.get("media")
|
||||||
if media:
|
if media:
|
||||||
for file, name in list(merge_media(media, messages)):
|
media_ = list(merge_media(media, messages))
|
||||||
data = to_bytes(file)
|
files = await cls.prepare_files(media_, session=session, account=account)
|
||||||
presigned_resp = await session.post(
|
else:
|
||||||
"https://yupp.ai/api/trpc/chat.createPresignedURLForUpload?batch=1",
|
files = []
|
||||||
json={"0": {"json": {"fileName": name, "fileSize": len(data), "contentType": is_accepted_format(data)}}},
|
|
||||||
headers={"Content-Type": "application/json", "Cookie": f"__Secure-yupp.session-token={account['token']}"}
|
|
||||||
)
|
|
||||||
presigned_resp.raise_for_status()
|
|
||||||
upload_info = (await presigned_resp.json())[0]["result"]["data"]["json"]
|
|
||||||
upload_url = upload_info["signedUrl"]
|
|
||||||
|
|
||||||
await session.put(
|
|
||||||
upload_url,
|
|
||||||
data=data,
|
|
||||||
headers={
|
|
||||||
"Content-Type": is_accepted_format(data),
|
|
||||||
"Content-Length": str(len(data))
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
attachment_resp = await session.post(
|
|
||||||
"https://yupp.ai/api/trpc/chat.createAttachmentForUploadedFile?batch=1",
|
|
||||||
json={"0": {"json": {"fileName": name, "contentType": is_accepted_format(data), "fileId": upload_info["fileId"]}}},
|
|
||||||
cookies={"__Secure-yupp.session-token": account["token"]}
|
|
||||||
)
|
|
||||||
attachment_resp.raise_for_status()
|
|
||||||
attachment = (await attachment_resp.json())[0]["result"]["data"]["json"]
|
|
||||||
files.append({
|
|
||||||
"fileName": attachment["file_name"],
|
|
||||||
"contentType": attachment["content_type"],
|
|
||||||
"attachmentId": attachment["attachment_id"],
|
|
||||||
"chatMessageId": ""
|
|
||||||
})
|
|
||||||
mode = "image" if model in cls.image_models else "text"
|
mode = "image" if model in cls.image_models else "text"
|
||||||
|
|
||||||
# Build payload and URL - FIXED: Use consistent url_uuid handling
|
# Build payload and URL - FIXED: Use consistent url_uuid handling
|
||||||
|
|
@ -346,20 +396,28 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"content-type": "text/plain;charset=UTF-8",
|
"content-type": "text/plain;charset=UTF-8",
|
||||||
"next-action": next_action,
|
"next-action": next_action,
|
||||||
"cookie": f"__Secure-yupp.session-token={account['token']}",
|
"cookie": f"__Secure-yupp.session-token={account['token']}",
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||||
}
|
}
|
||||||
|
|
||||||
log_debug(f"Sending request to: {url}")
|
log_debug(f"Sending request to: {url}")
|
||||||
log_debug(f"Payload structure: {type(payload)}, length: {len(str(payload))}")
|
log_debug(f"Payload structure: {type(payload)}, length: {len(str(payload))}")
|
||||||
|
_timeout = kwargs.get("timeout")
|
||||||
|
if isinstance(_timeout, aiohttp.ClientTimeout):
|
||||||
|
timeout = _timeout
|
||||||
|
else:
|
||||||
|
total = float(_timeout) if isinstance(_timeout, (int, float)) else 5 * 60
|
||||||
|
timeout = aiohttp.ClientTimeout(total=total)
|
||||||
# Send request
|
# Send request
|
||||||
async with session.post(url, json=payload, headers=headers, proxy=proxy) as response:
|
async with session.post(url, json=payload, headers=headers, proxy=proxy,
|
||||||
|
timeout=timeout) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
# Make chat private in background
|
# Make chat private in background
|
||||||
asyncio.create_task(make_chat_private(session, account, url_uuid))
|
asyncio.create_task(make_chat_private(session, account, url_uuid))
|
||||||
|
|
||||||
# Process stream
|
# Process stream
|
||||||
async for chunk in cls._process_stream_response(response.content, account, session, prompt, model):
|
async for chunk in cls._process_stream_response(response.content, account, session, prompt,
|
||||||
|
model):
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
@ -377,6 +435,18 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
else:
|
else:
|
||||||
account["error_count"] += 1
|
account["error_count"] += 1
|
||||||
continue
|
continue
|
||||||
|
except aiohttp.ClientResponseError as e:
|
||||||
|
log_debug(f"Account ...{account['token'][-4:]} failed: {str(e)}")
|
||||||
|
# No Available Yupp credits
|
||||||
|
if e.status == 500 and 'Internal Server Error' in e.message:
|
||||||
|
account["is_valid"] = False
|
||||||
|
# Need User-Agent
|
||||||
|
# elif e.status == 429 and 'Too Many Requests' in e.message:
|
||||||
|
# account["is_valid"] = False
|
||||||
|
else:
|
||||||
|
async with account_rotation_lock:
|
||||||
|
account["error_count"] += 1
|
||||||
|
raise ProviderException(f"Yupp request failed: {str(e)}") from e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log_debug(f"Unexpected error with account ...{account['token'][-4:]}: {str(e)}")
|
log_debug(f"Unexpected error with account ...{account['token'][-4:]}: {str(e)}")
|
||||||
async with account_rotation_lock:
|
async with account_rotation_lock:
|
||||||
|
|
@ -387,98 +457,224 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def _process_stream_response(
|
async def _process_stream_response(
|
||||||
cls,
|
cls,
|
||||||
response_content,
|
response_content,
|
||||||
account: Dict[str, Any],
|
account: YUPP_ACCOUNT,
|
||||||
session: aiohttp.ClientSession,
|
session: aiohttp.ClientSession,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
model_id: str
|
model_id: str
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
"""Process Yupp stream response asynchronously"""
|
"""Process Yupp stream response asynchronously"""
|
||||||
|
|
||||||
line_pattern = re.compile(b"^([0-9a-fA-F]+):(.*)")
|
line_pattern = re.compile(b"^([0-9a-fA-F]+):(.*)")
|
||||||
target_stream_id = None
|
target_stream_id = None
|
||||||
reward_info = None
|
reward_info = None
|
||||||
|
# Stream segmentation buffers
|
||||||
is_thinking = False
|
is_thinking = False
|
||||||
thinking_content = ""
|
thinking_content = "" # model's "thinking" channel (if activated later)
|
||||||
normal_content = ""
|
normal_content = ""
|
||||||
|
quick_content = "" # quick-response short message
|
||||||
|
variant_text = "" # variant model output (comparison stream)
|
||||||
|
stream = {
|
||||||
|
"target": [],
|
||||||
|
"variant": [],
|
||||||
|
"quick": [],
|
||||||
|
"thinking": [],
|
||||||
|
"extra": []
|
||||||
|
}
|
||||||
|
# Holds leftStream / rightStream definitions to determine target/variant
|
||||||
select_stream = [None, None]
|
select_stream = [None, None]
|
||||||
|
# State for capturing a multi-line <think> + <yapp> block (fa-style)
|
||||||
|
capturing_ref_id: Optional[str] = None
|
||||||
|
capturing_lines: List[bytes] = []
|
||||||
|
|
||||||
|
# Storage for special referenced blocks like $fa
|
||||||
|
think_blocks: Dict[str, str] = {}
|
||||||
|
image_blocks: Dict[str, str] = {}
|
||||||
|
|
||||||
def extract_ref_id(ref):
|
def extract_ref_id(ref):
|
||||||
"""Extract ID from reference string, e.g., from '$@123' extract '123'"""
|
"""Extract ID from reference string, e.g., from '$@123' extract '123'"""
|
||||||
return ref[2:] if ref and isinstance(ref, str) and ref.startswith("$@") else None
|
return ref[2:] if ref and isinstance(ref, str) and ref.startswith("$@") else None
|
||||||
|
|
||||||
|
def extract_ref_name(ref: str) -> Optional[str]:
|
||||||
|
"""Extract simple ref name from '$fa' → 'fa'"""
|
||||||
|
if not isinstance(ref, str):
|
||||||
|
return None
|
||||||
|
if ref.startswith("$@"):
|
||||||
|
return ref[2:]
|
||||||
|
if ref.startswith("$") and len(ref) > 1:
|
||||||
|
return ref[1:]
|
||||||
|
return None
|
||||||
|
|
||||||
def is_valid_content(content: str) -> bool:
|
def is_valid_content(content: str) -> bool:
|
||||||
"""Check if content is valid"""
|
"""Check if content is valid"""
|
||||||
if not content or content in [None, "", "$undefined"]:
|
if not content or content in [None, "", "$undefined"]:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def process_content_chunk(content: str, chunk_id: str, line_count: int):
|
async def process_content_chunk(content: str, chunk_id: str, line_count: int, *, for_target: bool = False):
|
||||||
"""Process single content chunk"""
|
"""
|
||||||
nonlocal is_thinking, thinking_content, normal_content, session
|
Process a single content chunk from a stream.
|
||||||
|
|
||||||
|
- If for_target=True → chunk belongs to the target model output.
|
||||||
|
"""
|
||||||
|
nonlocal is_thinking, thinking_content, normal_content, variant_text, session
|
||||||
|
|
||||||
if not is_valid_content(content):
|
if not is_valid_content(content):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Handle image-gen chunks
|
||||||
if '<yapp class="image-gen">' in content:
|
if '<yapp class="image-gen">' in content:
|
||||||
content = content.split('<yapp class="image-gen">').pop().split('</yapp>')[0]
|
img_block = content.split('<yapp class="image-gen">').pop().split('</yapp>')[0]
|
||||||
url = "https://yupp.ai/api/trpc/chat.getSignedImage"
|
url = "https://yupp.ai/api/trpc/chat.getSignedImage"
|
||||||
async with session.get(url, params={"batch": "1", "input": json.dumps({"0": {"json": {"imageId": json.loads(content).get("image_id")}}})}) as resp:
|
async with session.get(
|
||||||
|
url,
|
||||||
|
params={
|
||||||
|
"batch": "1",
|
||||||
|
"input": json.dumps(
|
||||||
|
{"0": {"json": {"imageId": json.loads(img_block).get("image_id")}}}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
) as resp:
|
||||||
resp.raise_for_status()
|
resp.raise_for_status()
|
||||||
data = await resp.json()
|
data = await resp.json()
|
||||||
yield ImageResponse(data[0]["result"]["data"]["json"]["signed_url"], prompt)
|
img = ImageResponse(
|
||||||
|
data[0]["result"]["data"]["json"]["signed_url"],
|
||||||
|
prompt
|
||||||
|
)
|
||||||
|
yield img
|
||||||
return
|
return
|
||||||
|
# Optional: thinking-mode support (disabled by default)
|
||||||
# log_debug(f"Processing chunk #{line_count} with content: '{content[:50]}...'")
|
|
||||||
|
|
||||||
if is_thinking:
|
if is_thinking:
|
||||||
yield Reasoning(content)
|
yield Reasoning(content)
|
||||||
else:
|
else:
|
||||||
normal_content += content
|
if for_target:
|
||||||
|
normal_content += content
|
||||||
yield content
|
yield content
|
||||||
|
|
||||||
|
def finalize_capture_block(ref_id: str, lines: List[bytes]):
|
||||||
|
"""Parse captured <think> + <yapp> block for a given ref ID."""
|
||||||
|
text = b"".join(lines).decode("utf-8", errors="ignore")
|
||||||
|
|
||||||
|
# Extract <think>...</think>
|
||||||
|
think_start = text.find("<think>")
|
||||||
|
think_end = text.find("</think>")
|
||||||
|
if think_start != -1 and think_end != -1 and think_end > think_start:
|
||||||
|
inner = text[think_start + len("<think>"):think_end].strip()
|
||||||
|
if inner:
|
||||||
|
think_blocks[ref_id] = inner
|
||||||
|
|
||||||
|
# Extract <yapp class="image-gen">...</yapp>
|
||||||
|
yapp_start = text.find('<yapp class="image-gen">')
|
||||||
|
if yapp_start != -1:
|
||||||
|
yapp_end = text.find("</yapp>", yapp_start)
|
||||||
|
if yapp_end != -1:
|
||||||
|
yapp_block = text[yapp_start:yapp_end + len("</yapp>")]
|
||||||
|
image_blocks[ref_id] = yapp_block
|
||||||
|
|
||||||
try:
|
try:
|
||||||
line_count = 0
|
line_count = 0
|
||||||
quick_response_id = None
|
quick_response_id = None
|
||||||
variant_stream_id = None
|
variant_stream_id = None
|
||||||
is_started: bool = False
|
is_started: bool = False
|
||||||
variant_image: Optional[ImageResponse] = None
|
variant_image: Optional[ImageResponse] = None
|
||||||
variant_text = ""
|
# "a" use as default then extract from "1"
|
||||||
|
reward_id = "a"
|
||||||
|
routing_id = "e"
|
||||||
|
turn_id = None
|
||||||
|
persisted_turn_id = None
|
||||||
|
left_message_id = None
|
||||||
|
right_message_id = None
|
||||||
|
nudge_new_chat_id = None
|
||||||
|
nudge_new_chat = False
|
||||||
async for line in response_content:
|
async for line in response_content:
|
||||||
line_count += 1
|
line_count += 1
|
||||||
|
# If we are currently capturing a think/image block for some ref ID
|
||||||
|
if capturing_ref_id is not None:
|
||||||
|
capturing_lines.append(line)
|
||||||
|
|
||||||
|
# Check if this line closes the <yapp> block; after that, block is complete
|
||||||
|
if b"</yapp>" in line: # or b':{"curr"' in line:
|
||||||
|
# We may have trailing "2:{...}" after </yapp> on the same line
|
||||||
|
# Get id using re
|
||||||
|
idx = line.find(b"</yapp>")
|
||||||
|
suffix = line[idx + len(b"</yapp>"):]
|
||||||
|
|
||||||
|
# Finalize captured block for this ref ID
|
||||||
|
finalize_capture_block(capturing_ref_id, capturing_lines)
|
||||||
|
capturing_ref_id = None
|
||||||
|
capturing_lines = []
|
||||||
|
|
||||||
|
# If there is trailing content (e.g. '2:{"curr":"$fa"...}')
|
||||||
|
if suffix.strip():
|
||||||
|
# Process suffix as a new "line" in the same iteration
|
||||||
|
line = suffix
|
||||||
|
else:
|
||||||
|
# Nothing more on this line
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# Still inside captured block; skip normal processing
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Detect start of a <think> block assigned to a ref like 'fa:...<think>'
|
||||||
|
if b"<think>" in line:
|
||||||
|
m = line_pattern.match(line)
|
||||||
|
if m:
|
||||||
|
capturing_ref_id = m.group(1).decode()
|
||||||
|
capturing_lines = [line]
|
||||||
|
# Skip normal parsing; the rest of the block will be captured until </yapp>
|
||||||
|
continue
|
||||||
|
|
||||||
match = line_pattern.match(line)
|
match = line_pattern.match(line)
|
||||||
if not match:
|
if not match:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
chunk_id, chunk_data = match.groups()
|
chunk_id, chunk_data = match.groups()
|
||||||
chunk_id = chunk_id.decode()
|
chunk_id = chunk_id.decode()
|
||||||
|
|
||||||
|
if nudge_new_chat_id and chunk_id == nudge_new_chat_id:
|
||||||
|
nudge_new_chat = chunk_data.decode()
|
||||||
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
data = json.loads(chunk_data) if chunk_data != b"{}" else {}
|
data = json.loads(chunk_data) if chunk_data != b"{}" else {}
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Process reward info
|
# Process reward info
|
||||||
if chunk_id == "a":
|
if chunk_id == reward_id and isinstance(data, dict) and "unclaimedRewardInfo" in data:
|
||||||
reward_info = data
|
reward_info = data
|
||||||
log_debug(f"Found reward info")
|
log_debug(f"Found reward info")
|
||||||
|
|
||||||
# Process initial setup
|
# Process initial setup
|
||||||
elif chunk_id == "1":
|
elif chunk_id == "1":
|
||||||
yield PlainTextResponse(line.decode(errors="ignore"))
|
yield PlainTextResponse(line.decode(errors="ignore"))
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
left_stream = data.get("leftStream", {})
|
left_stream = data.get("leftStream", {})
|
||||||
right_stream = data.get("rightStream", {})
|
right_stream = data.get("rightStream", {})
|
||||||
quick_response_id = extract_ref_id(data.get("quickResponse", {}).get("stream", {}).get("next"))
|
if data.get("quickResponse", {}) != "$undefined":
|
||||||
|
quick_response_id = extract_ref_id(
|
||||||
|
data.get("quickResponse", {}).get("stream", {}).get("next"))
|
||||||
|
|
||||||
|
if data.get("turnId", {}) != "$undefined":
|
||||||
|
turn_id = extract_ref_id(data.get("turnId", {}).get("next"))
|
||||||
|
if data.get("persistedTurn", {}) != "$undefined":
|
||||||
|
persisted_turn_id = extract_ref_id(data.get("persistedTurn", {}).get("next"))
|
||||||
|
if data.get("leftMessageId", {}) != "$undefined":
|
||||||
|
left_message_id = extract_ref_id(data.get("leftMessageId", {}).get("next"))
|
||||||
|
if data.get("rightMessageId", {}) != "$undefined":
|
||||||
|
right_message_id = extract_ref_id(data.get("rightMessageId", {}).get("next"))
|
||||||
|
|
||||||
|
reward_id = extract_ref_id(data.get("pendingRewardActionResult", "")) or reward_id
|
||||||
|
routing_id = extract_ref_id(data.get("routingResultPromise", "")) or routing_id
|
||||||
|
nudge_new_chat_id = extract_ref_id(data.get("nudgeNewChatPromise", "")) or nudge_new_chat_id
|
||||||
select_stream = [left_stream, right_stream]
|
select_stream = [left_stream, right_stream]
|
||||||
|
# Routing / model selection block
|
||||||
elif chunk_id == "e":
|
elif chunk_id == routing_id:
|
||||||
yield PlainTextResponse(line.decode(errors="ignore"))
|
yield PlainTextResponse(line.decode(errors="ignore"))
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
provider_info = cls.get_dict()
|
provider_info = cls.get_dict()
|
||||||
provider_info['model'] = model_id
|
provider_info['model'] = model_id
|
||||||
|
# Determine target & variant stream IDs
|
||||||
for i, selection in enumerate(data.get("modelSelections", [])):
|
for i, selection in enumerate(data.get("modelSelections", [])):
|
||||||
if selection.get("selectionSource") == "USER_SELECTED":
|
if selection.get("selectionSource") == "USER_SELECTED":
|
||||||
target_stream_id = extract_ref_id(select_stream[i].get("next"))
|
target_stream_id = extract_ref_id(select_stream[i].get("next"))
|
||||||
|
|
@ -491,7 +687,7 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
provider_info["variantUrl"] = selection.get("externalUrl")
|
provider_info["variantUrl"] = selection.get("externalUrl")
|
||||||
log_debug(f"Found variant stream ID: {variant_stream_id}")
|
log_debug(f"Found variant stream ID: {variant_stream_id}")
|
||||||
yield ProviderInfo.from_dict(provider_info)
|
yield ProviderInfo.from_dict(provider_info)
|
||||||
|
|
||||||
# Process target stream content
|
# Process target stream content
|
||||||
elif target_stream_id and chunk_id == target_stream_id:
|
elif target_stream_id and chunk_id == target_stream_id:
|
||||||
yield PlainTextResponse(line.decode(errors="ignore"))
|
yield PlainTextResponse(line.decode(errors="ignore"))
|
||||||
|
|
@ -499,49 +695,111 @@ class Yupp(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
target_stream_id = extract_ref_id(data.get("next"))
|
target_stream_id = extract_ref_id(data.get("next"))
|
||||||
content = data.get("curr", "")
|
content = data.get("curr", "")
|
||||||
if content:
|
if content:
|
||||||
async for chunk in process_content_chunk(content, chunk_id, line_count):
|
# Handle special "$fa" / "$<id>" reference
|
||||||
is_started = True
|
ref_name = extract_ref_name(content)
|
||||||
yield chunk
|
if ref_name and (ref_name in think_blocks or ref_name in image_blocks):
|
||||||
|
# Thinking block
|
||||||
|
if ref_name in think_blocks:
|
||||||
|
t_text = think_blocks[ref_name]
|
||||||
|
if t_text:
|
||||||
|
reasoning = Reasoning(t_text)
|
||||||
|
# thinking_content += t_text
|
||||||
|
stream["thinking"].append(reasoning)
|
||||||
|
# yield reasoning
|
||||||
|
|
||||||
|
# Image-gen block
|
||||||
|
if ref_name in image_blocks:
|
||||||
|
img_block_text = image_blocks[ref_name]
|
||||||
|
async for chunk in process_content_chunk(
|
||||||
|
img_block_text,
|
||||||
|
ref_name,
|
||||||
|
line_count,
|
||||||
|
for_target=True
|
||||||
|
):
|
||||||
|
stream["target"].append(chunk)
|
||||||
|
is_started = True
|
||||||
|
yield chunk
|
||||||
|
else:
|
||||||
|
# Normal textual chunk
|
||||||
|
async for chunk in process_content_chunk(
|
||||||
|
content,
|
||||||
|
chunk_id,
|
||||||
|
line_count,
|
||||||
|
for_target=True
|
||||||
|
):
|
||||||
|
stream["target"].append(chunk)
|
||||||
|
is_started = True
|
||||||
|
yield chunk
|
||||||
|
# Variant stream (comparison)
|
||||||
elif variant_stream_id and chunk_id == variant_stream_id:
|
elif variant_stream_id and chunk_id == variant_stream_id:
|
||||||
yield PlainTextResponse("[Variant] " + line.decode(errors="ignore"))
|
yield PlainTextResponse("[Variant] " + line.decode(errors="ignore"))
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
variant_stream_id = extract_ref_id(data.get("next"))
|
variant_stream_id = extract_ref_id(data.get("next"))
|
||||||
content = data.get("curr", "")
|
content = data.get("curr", "")
|
||||||
if content:
|
if content:
|
||||||
async for chunk in process_content_chunk(content, chunk_id, line_count):
|
async for chunk in process_content_chunk(
|
||||||
|
content,
|
||||||
|
chunk_id,
|
||||||
|
line_count,
|
||||||
|
for_target=False
|
||||||
|
):
|
||||||
|
stream["variant"].append(chunk)
|
||||||
if isinstance(chunk, ImageResponse):
|
if isinstance(chunk, ImageResponse):
|
||||||
yield PreviewResponse(str(chunk))
|
yield PreviewResponse(str(chunk))
|
||||||
else:
|
else:
|
||||||
variant_text += str(chunk)
|
variant_text += str(chunk)
|
||||||
if not is_started:
|
if not is_started:
|
||||||
yield PreviewResponse(variant_text)
|
yield PreviewResponse(variant_text)
|
||||||
|
# Quick response (short preview)
|
||||||
elif quick_response_id and chunk_id == quick_response_id:
|
elif quick_response_id and chunk_id == quick_response_id:
|
||||||
yield PlainTextResponse("[Quick] " + line.decode(errors="ignore"))
|
yield PlainTextResponse("[Quick] " + line.decode(errors="ignore"))
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
content = data.get("curr", "")
|
content = data.get("curr", "")
|
||||||
if content:
|
if content:
|
||||||
|
async for chunk in process_content_chunk(
|
||||||
|
content,
|
||||||
|
chunk_id,
|
||||||
|
line_count,
|
||||||
|
for_target=False
|
||||||
|
):
|
||||||
|
stream["quick"].append(chunk)
|
||||||
|
quick_content += content
|
||||||
yield PreviewResponse(content)
|
yield PreviewResponse(content)
|
||||||
|
|
||||||
|
elif chunk_id in [turn_id, persisted_turn_id]:
|
||||||
|
...
|
||||||
|
elif chunk_id == right_message_id:
|
||||||
|
...
|
||||||
|
elif chunk_id == left_message_id:
|
||||||
|
...
|
||||||
|
# Miscellaneous extra content
|
||||||
elif isinstance(data, dict) and "curr" in data:
|
elif isinstance(data, dict) and "curr" in data:
|
||||||
content = data.get("curr", "")
|
content = data.get("curr", "")
|
||||||
if content:
|
if content:
|
||||||
|
async for chunk in process_content_chunk(
|
||||||
|
content,
|
||||||
|
chunk_id,
|
||||||
|
line_count,
|
||||||
|
for_target=False
|
||||||
|
):
|
||||||
|
stream["extra"].append(chunk)
|
||||||
|
if isinstance(chunk, str) and "<streaming stopped unexpectedly" in chunk:
|
||||||
|
yield FinishReason(chunk)
|
||||||
|
|
||||||
yield PlainTextResponse("[Extra] " + line.decode(errors="ignore"))
|
yield PlainTextResponse("[Extra] " + line.decode(errors="ignore"))
|
||||||
|
|
||||||
if variant_image is not None:
|
if variant_image is not None:
|
||||||
yield variant_image
|
yield variant_image
|
||||||
elif variant_text:
|
elif variant_text:
|
||||||
yield PreviewResponse(variant_text)
|
yield PreviewResponse(variant_text)
|
||||||
|
yield JsonResponse(**stream)
|
||||||
log_debug(f"Finished processing {line_count} lines")
|
log_debug(f"Finished processing {line_count} lines")
|
||||||
|
|
||||||
except:
|
except:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
# Claim reward in background
|
# Claim reward in background
|
||||||
if reward_info and "unclaimedRewardInfo" in reward_info:
|
if reward_info and "unclaimedRewardInfo" in reward_info:
|
||||||
reward_id = reward_info["unclaimedRewardInfo"].get("rewardId")
|
reward_id = reward_info["unclaimedRewardInfo"].get("rewardId")
|
||||||
if reward_id:
|
if reward_id:
|
||||||
await claim_yupp_reward(session, account, reward_id)
|
await claim_yupp_reward(session, account, reward_id)
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,8 @@ from .Copilot import Copilot
|
||||||
from .DeepInfra import DeepInfra
|
from .DeepInfra import DeepInfra
|
||||||
from .EasyChat import EasyChat
|
from .EasyChat import EasyChat
|
||||||
from .GLM import GLM
|
from .GLM import GLM
|
||||||
|
from .GradientNetwork import GradientNetwork
|
||||||
|
from .ItalyGPT import ItalyGPT
|
||||||
from .LambdaChat import LambdaChat
|
from .LambdaChat import LambdaChat
|
||||||
from .Mintlify import Mintlify
|
from .Mintlify import Mintlify
|
||||||
from .OIVSCodeSer import OIVSCodeSer2, OIVSCodeSer0501
|
from .OIVSCodeSer import OIVSCodeSer2, OIVSCodeSer0501
|
||||||
|
|
|
||||||
112
g4f/Provider/hf_space/BAAI_Ling.py
Normal file
112
g4f/Provider/hf_space/BAAI_Ling.py
Normal file
|
|
@ -0,0 +1,112 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from ...typing import AsyncResult, Messages
|
||||||
|
from ...providers.response import JsonConversation
|
||||||
|
from ...requests.raise_for_status import raise_for_status
|
||||||
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
from ..helper import format_prompt, get_last_user_message, get_system_prompt
|
||||||
|
from ... import debug
|
||||||
|
|
||||||
|
class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
label = "Ling & Ring Playground"
|
||||||
|
url = "https://cafe3310-ling-playground.hf.space"
|
||||||
|
api_endpoint = f"{url}/gradio_api/queue/join"
|
||||||
|
|
||||||
|
working = True
|
||||||
|
supports_stream = True
|
||||||
|
supports_system_message = True
|
||||||
|
supports_message_history = False
|
||||||
|
|
||||||
|
default_model = "ling-1t"
|
||||||
|
model_aliases = {
|
||||||
|
"ling": default_model,
|
||||||
|
}
|
||||||
|
models = ['ling-mini-2.0', 'ling-1t', 'ling-flash-2.0', 'ring-1t', 'ring-flash-2.0', 'ring-mini-2.0']
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
|
conversation: JsonConversation = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
is_new_conversation = conversation is None or not hasattr(conversation, 'session_hash')
|
||||||
|
if is_new_conversation:
|
||||||
|
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', '')[:12])
|
||||||
|
|
||||||
|
model = cls.get_model(model)
|
||||||
|
prompt = format_prompt(messages) if is_new_conversation else get_last_user_message(messages)
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'accept': '*/*',
|
||||||
|
'accept-language': 'en-US,en;q=0.9',
|
||||||
|
'content-type': 'application/json',
|
||||||
|
'origin': cls.url,
|
||||||
|
'referer': f'{cls.url}/',
|
||||||
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
|
||||||
|
}
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"data": [
|
||||||
|
prompt,
|
||||||
|
[
|
||||||
|
[
|
||||||
|
None,
|
||||||
|
"Hello! I'm Ling. Try selecting a scenario and a message example below to get started."
|
||||||
|
]
|
||||||
|
],
|
||||||
|
get_system_prompt(messages),
|
||||||
|
1,
|
||||||
|
model
|
||||||
|
],
|
||||||
|
"event_data": None,
|
||||||
|
"fn_index": 11,
|
||||||
|
"trigger_id": 14,
|
||||||
|
"session_hash": conversation.session_hash
|
||||||
|
}
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
|
||||||
|
await raise_for_status(response)
|
||||||
|
# Response body must be consumed for the request to complete
|
||||||
|
await response.json()
|
||||||
|
|
||||||
|
data_url = f'{cls.url}/gradio_api/queue/data?session_hash={conversation.session_hash}'
|
||||||
|
headers_data = {
|
||||||
|
'accept': 'text/event-stream',
|
||||||
|
'referer': f'{cls.url}/',
|
||||||
|
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
|
||||||
|
}
|
||||||
|
|
||||||
|
async with session.get(data_url, headers=headers_data, proxy=proxy) as response:
|
||||||
|
full_response = ""
|
||||||
|
async for line in response.content:
|
||||||
|
decoded_line = line.decode('utf-8')
|
||||||
|
if decoded_line.startswith('data: '):
|
||||||
|
try:
|
||||||
|
json_data = json.loads(decoded_line[6:])
|
||||||
|
if json_data.get('msg') == 'process_generating':
|
||||||
|
if 'output' in json_data and 'data' in json_data['output']:
|
||||||
|
output_data = json_data['output']['data']
|
||||||
|
if output_data and len(output_data) > 0:
|
||||||
|
parts = output_data[0][0]
|
||||||
|
if len(parts) == 2:
|
||||||
|
new_text = output_data[0][1].pop()
|
||||||
|
full_response += new_text
|
||||||
|
yield new_text
|
||||||
|
if len(parts) > 2:
|
||||||
|
new_text = parts[2]
|
||||||
|
full_response += new_text
|
||||||
|
yield new_text
|
||||||
|
|
||||||
|
elif json_data.get('msg') == 'process_completed':
|
||||||
|
break
|
||||||
|
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
debug.log("Could not parse JSON:", decoded_line)
|
||||||
|
|
@ -6,6 +6,7 @@ from ...typing import AsyncResult, Messages, MediaListType
|
||||||
from ...errors import ResponseError
|
from ...errors import ResponseError
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
|
|
||||||
|
from .BAAI_Ling import BAAI_Ling
|
||||||
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
||||||
from .BlackForestLabs_Flux1KontextDev import BlackForestLabs_Flux1KontextDev
|
from .BlackForestLabs_Flux1KontextDev import BlackForestLabs_Flux1KontextDev
|
||||||
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
||||||
|
|
@ -27,6 +28,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
default_image_model = BlackForestLabs_Flux1Dev.default_model
|
default_image_model = BlackForestLabs_Flux1Dev.default_model
|
||||||
default_vision_model = Microsoft_Phi_4_Multimodal.default_model
|
default_vision_model = Microsoft_Phi_4_Multimodal.default_model
|
||||||
providers = [
|
providers = [
|
||||||
|
BAAI_Ling,
|
||||||
BlackForestLabs_Flux1Dev,
|
BlackForestLabs_Flux1Dev,
|
||||||
BlackForestLabs_Flux1KontextDev,
|
BlackForestLabs_Flux1KontextDev,
|
||||||
CohereForAI_C4AI_Command,
|
CohereForAI_C4AI_Command,
|
||||||
|
|
|
||||||
|
|
@ -71,6 +71,7 @@ models = {
|
||||||
"gemini-2.0-flash-thinking": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"9c17b1863f581b8a"]'},
|
"gemini-2.0-flash-thinking": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"9c17b1863f581b8a"]'},
|
||||||
"gemini-2.0-flash-thinking-with-apps": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f8f8f5ea629f5d37"]'},
|
"gemini-2.0-flash-thinking-with-apps": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f8f8f5ea629f5d37"]'},
|
||||||
# Currently used models
|
# Currently used models
|
||||||
|
"gemini-3-pro": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"9d8ca3786ebdfbea",null,null,0,[4]]'},
|
||||||
"gemini-2.5-pro": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"61530e79959ab139",null,null,null,[4]]'},
|
"gemini-2.5-pro": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"61530e79959ab139",null,null,null,[4]]'},
|
||||||
"gemini-2.5-flash": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"9ec249fc9ad08861",null,null,null,[4]]'},
|
"gemini-2.5-flash": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"9ec249fc9ad08861",null,null,null,[4]]'},
|
||||||
"gemini-audio": {}
|
"gemini-audio": {}
|
||||||
|
|
@ -89,7 +90,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
default_vision_model = default_model
|
default_vision_model = default_model
|
||||||
image_models = [default_image_model]
|
image_models = [default_image_model]
|
||||||
models = [
|
models = [
|
||||||
default_model, "gemini-2.5-flash", "gemini-2.5-pro"
|
default_model, "gemini-3-pro", "gemini-2.5-flash", "gemini-2.5-pro"
|
||||||
]
|
]
|
||||||
|
|
||||||
synthesize_content_type = "audio/vnd.wav"
|
synthesize_content_type = "audio/vnd.wav"
|
||||||
|
|
|
||||||
|
|
@ -500,6 +500,7 @@ class GeminiCLI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
models = [
|
models = [
|
||||||
"gemini-2.5-pro",
|
"gemini-2.5-pro",
|
||||||
"gemini-2.5-flash",
|
"gemini-2.5-flash",
|
||||||
|
"gemini-3-pro-preview"
|
||||||
]
|
]
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
|
|
|
||||||
|
|
@ -92,12 +92,26 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
while True:
|
while True:
|
||||||
if has_headers:
|
if has_headers:
|
||||||
break
|
break
|
||||||
textarea = await page.select("textarea", 180)
|
input_element = None
|
||||||
await textarea.send_keys("Hello")
|
try:
|
||||||
await asyncio.sleep(1)
|
input_element = await page.select("div.ProseMirror", 2)
|
||||||
button = await page.select("button[type='submit']")
|
except Exception:
|
||||||
if button:
|
pass
|
||||||
await button.click()
|
if not input_element:
|
||||||
|
try:
|
||||||
|
input_element = await page.select("textarea", 180)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
if input_element:
|
||||||
|
try:
|
||||||
|
await input_element.click()
|
||||||
|
await input_element.send_keys("Hello")
|
||||||
|
await asyncio.sleep(0.5)
|
||||||
|
submit_btn = await page.select("button[type='submit']", 2)
|
||||||
|
if submit_btn:
|
||||||
|
await submit_btn.click()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
auth_result.cookies = {}
|
auth_result.cookies = {}
|
||||||
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
|
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
|
||||||
|
|
|
||||||
|
|
@ -642,7 +642,7 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
|
|
||||||
if not cls._models_loaded:
|
if not cls._models_loaded:
|
||||||
cls.get_models()
|
cls.get_models()
|
||||||
is_image_model = model in image_models
|
is_image_model = model in cls.image_models
|
||||||
if not model:
|
if not model:
|
||||||
model = cls.default_model
|
model = cls.default_model
|
||||||
if model in cls.model_aliases:
|
if model in cls.model_aliases:
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,20 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import uuid
|
|
||||||
import json
|
|
||||||
import base64
|
import base64
|
||||||
import time
|
import hashlib
|
||||||
|
import json
|
||||||
|
import os
|
||||||
import random
|
import random
|
||||||
from typing import AsyncIterator, Iterator, Optional, Generator, Dict, Union, List, Any
|
import re
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
from copy import copy
|
from copy import copy
|
||||||
|
from typing import AsyncIterator, Iterator, Optional, Generator, Dict, Union, List, Any
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import nodriver
|
import nodriver
|
||||||
|
|
||||||
has_nodriver = True
|
has_nodriver = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
has_nodriver = False
|
has_nodriver = False
|
||||||
|
|
@ -22,15 +24,17 @@ from ...typing import AsyncResult, Messages, Cookies, MediaListType
|
||||||
from ...requests.raise_for_status import raise_for_status
|
from ...requests.raise_for_status import raise_for_status
|
||||||
from ...requests import StreamSession
|
from ...requests import StreamSession
|
||||||
from ...requests import get_nodriver_session
|
from ...requests import get_nodriver_session
|
||||||
from ...image import ImageRequest, to_image, to_bytes, is_accepted_format, detect_file_type
|
from ...image import ImageRequest, to_image, to_bytes, detect_file_type
|
||||||
from ...errors import MissingAuthError, NoValidHarFileError, ModelNotFoundError
|
from ...errors import MissingAuthError, NoValidHarFileError, ModelNotFoundError
|
||||||
from ...providers.response import JsonConversation, FinishReason, SynthesizeData, AuthResult, ImageResponse, ImagePreview, ResponseType, JsonRequest, format_link
|
from ...providers.response import JsonConversation, FinishReason, SynthesizeData, AuthResult, ImageResponse, \
|
||||||
|
ImagePreview, ResponseType, JsonRequest, format_link
|
||||||
from ...providers.response import TitleGeneration, RequestLogin, Reasoning
|
from ...providers.response import TitleGeneration, RequestLogin, Reasoning
|
||||||
from ...tools.media import merge_media
|
from ...tools.media import merge_media
|
||||||
from ..helper import format_cookies, format_media_prompt, to_string
|
from ..helper import format_cookies, format_media_prompt, to_string
|
||||||
from ..openai.models import default_model, default_image_model, models, image_models, text_models, model_aliases
|
from ..openai.models import default_model, default_image_model, models, image_models, text_models, model_aliases
|
||||||
from ..openai.har_file import get_request_config
|
from ..openai.har_file import get_request_config
|
||||||
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, prepare_url, backend_anon_url
|
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, prepare_url, \
|
||||||
|
backend_anon_url
|
||||||
from ..openai.proofofwork import generate_proof_token
|
from ..openai.proofofwork import generate_proof_token
|
||||||
from ..openai.new import get_requirements_token, get_config
|
from ..openai.new import get_requirements_token, get_config
|
||||||
from ... import debug
|
from ... import debug
|
||||||
|
|
@ -87,6 +91,9 @@ UPLOAD_HEADERS = {
|
||||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
|
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ImagesCache: Dict[str, dict] = {}
|
||||||
|
|
||||||
|
|
||||||
class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
"""A class for creating and managing conversations with OpenAI chat service"""
|
"""A class for creating and managing conversations with OpenAI chat service"""
|
||||||
|
|
||||||
|
|
@ -95,6 +102,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
working = True
|
working = True
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
use_nodriver = True
|
use_nodriver = True
|
||||||
|
image_cache = True
|
||||||
supports_gpt_4 = True
|
supports_gpt_4 = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
|
|
@ -127,11 +135,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def upload_files(
|
async def upload_files(
|
||||||
cls,
|
cls,
|
||||||
session: StreamSession,
|
session: StreamSession,
|
||||||
auth_result: AuthResult,
|
auth_result: AuthResult,
|
||||||
media: MediaListType,
|
media: MediaListType,
|
||||||
) -> list[ImageRequest]:
|
) -> List[ImageRequest]:
|
||||||
"""
|
"""
|
||||||
Upload an image to the service and get the download URL
|
Upload an image to the service and get the download URL
|
||||||
|
|
||||||
|
|
@ -143,11 +151,20 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
Returns:
|
Returns:
|
||||||
An ImageRequest object that contains the download URL, file name, and other data
|
An ImageRequest object that contains the download URL, file name, and other data
|
||||||
"""
|
"""
|
||||||
async def upload_file(file, image_name=None):
|
|
||||||
|
async def upload_file(file, image_name=None) -> ImageRequest:
|
||||||
debug.log(f"Uploading file: {image_name}")
|
debug.log(f"Uploading file: {image_name}")
|
||||||
file_data = {}
|
file_data = {}
|
||||||
|
|
||||||
data_bytes = to_bytes(file)
|
data_bytes = to_bytes(file)
|
||||||
|
# Check Cache
|
||||||
|
hasher = hashlib.md5()
|
||||||
|
hasher.update(data_bytes)
|
||||||
|
image_hash = hasher.hexdigest()
|
||||||
|
cache_file = ImagesCache.get(image_hash)
|
||||||
|
if cls.image_cache and cache_file:
|
||||||
|
debug.log("Using cached image")
|
||||||
|
return ImageRequest(cache_file)
|
||||||
extension, mime_type = detect_file_type(data_bytes)
|
extension, mime_type = detect_file_type(data_bytes)
|
||||||
if "image" in mime_type:
|
if "image" in mime_type:
|
||||||
# Convert the image to a PIL Image object
|
# Convert the image to a PIL Image object
|
||||||
|
|
@ -181,30 +198,31 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
# Put the image bytes to the upload URL and check the status
|
# Put the image bytes to the upload URL and check the status
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
async with session.put(
|
async with session.put(
|
||||||
file_data["upload_url"],
|
file_data["upload_url"],
|
||||||
data=data_bytes,
|
data=data_bytes,
|
||||||
headers={
|
headers={
|
||||||
**UPLOAD_HEADERS,
|
**UPLOAD_HEADERS,
|
||||||
"Content-Type": file_data["mime_type"],
|
"Content-Type": file_data["mime_type"],
|
||||||
"x-ms-blob-type": "BlockBlob",
|
"x-ms-blob-type": "BlockBlob",
|
||||||
"x-ms-version": "2020-04-08",
|
"x-ms-version": "2020-04-08",
|
||||||
"Origin": "https://chatgpt.com",
|
"Origin": "https://chatgpt.com",
|
||||||
}
|
}
|
||||||
) as response:
|
) as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
# Post the file ID to the service and get the download URL
|
# Post the file ID to the service and get the download URL
|
||||||
async with session.post(
|
async with session.post(
|
||||||
f"{cls.url}/backend-api/files/{file_data['file_id']}/uploaded",
|
f"{cls.url}/backend-api/files/{file_data['file_id']}/uploaded",
|
||||||
json={},
|
json={},
|
||||||
headers=auth_result.headers
|
headers=auth_result.headers
|
||||||
) as response:
|
) as response:
|
||||||
cls._update_request_args(auth_result, session)
|
cls._update_request_args(auth_result, session)
|
||||||
await raise_for_status(response, "Get download url failed")
|
await raise_for_status(response, "Get download url failed")
|
||||||
uploaded_data = await response.json()
|
uploaded_data = await response.json()
|
||||||
file_data["download_url"] = uploaded_data["download_url"]
|
file_data["download_url"] = uploaded_data["download_url"]
|
||||||
|
ImagesCache[image_hash] = file_data.copy()
|
||||||
return ImageRequest(file_data)
|
return ImageRequest(file_data)
|
||||||
|
|
||||||
medias = []
|
medias: List["ImageRequest"] = []
|
||||||
for item in media:
|
for item in media:
|
||||||
item = item if isinstance(item, tuple) else (item,)
|
item = item if isinstance(item, tuple) else (item,)
|
||||||
__uploaded_media = await upload_file(*item)
|
__uploaded_media = await upload_file(*item)
|
||||||
|
|
@ -242,7 +260,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
"id": str(uuid.uuid4()),
|
"id": str(uuid.uuid4()),
|
||||||
"author": {"role": message["role"]},
|
"author": {"role": message["role"]},
|
||||||
"content": {"content_type": "text", "parts": [to_string(message["content"])]},
|
"content": {"content_type": "text", "parts": [to_string(message["content"])]},
|
||||||
"metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, **({"system_hints": system_hints} if system_hints else {})},
|
"metadata": {"serialization_metadata": {"custom_symbol_offsets": []},
|
||||||
|
**({"system_hints": system_hints} if system_hints else {})},
|
||||||
"create_time": time.time(),
|
"create_time": time.time(),
|
||||||
} for message in messages]
|
} for message in messages]
|
||||||
# Check if there is an image response
|
# Check if there is an image response
|
||||||
|
|
@ -256,11 +275,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
"size_bytes": image_request.get("file_size"),
|
"size_bytes": image_request.get("file_size"),
|
||||||
"width": image_request.get("width"),
|
"width": image_request.get("width"),
|
||||||
}
|
}
|
||||||
for image_request in image_requests
|
for image_request in image_requests
|
||||||
# Add For Images Only
|
# Add For Images Only
|
||||||
if image_request.get("use_case") == "multimodal"
|
if image_request.get("use_case") == "multimodal"
|
||||||
],
|
],
|
||||||
messages[-1]["content"]["parts"][0]]
|
messages[-1]["content"]["parts"][0]]
|
||||||
}
|
}
|
||||||
# Add the metadata object with the attachments
|
# Add the metadata object with the attachments
|
||||||
messages[-1]["metadata"] = {
|
messages[-1]["metadata"] = {
|
||||||
|
|
@ -278,12 +297,14 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
else {}
|
else {}
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
for image_request in image_requests]
|
for image_request in image_requests]
|
||||||
}
|
}
|
||||||
return messages
|
return messages
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def get_generated_image(cls, session: StreamSession, auth_result: AuthResult, element: Union[dict, str], prompt: str = None, conversation_id: str = None) -> ImagePreview|ImageResponse|None:
|
async def get_generated_image(cls, session: StreamSession, auth_result: AuthResult, element: Union[dict, str],
|
||||||
|
prompt: str = None, conversation_id: str = None,
|
||||||
|
status: Optional[str] = None) -> ImagePreview | ImageResponse | None:
|
||||||
download_urls = []
|
download_urls = []
|
||||||
is_sediment = False
|
is_sediment = False
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
|
|
@ -292,7 +313,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
if "asset_pointer" in element:
|
if "asset_pointer" in element:
|
||||||
element = element["asset_pointer"]
|
element = element["asset_pointer"]
|
||||||
if isinstance(element, str) and element.startswith("file-service://"):
|
if isinstance(element, str) and element.startswith("file-service://"):
|
||||||
element = element.split("file-service://", 1)[-1]
|
element = element.split("file-service://", 1)[-1]
|
||||||
elif isinstance(element, str) and element.startswith("sediment://"):
|
elif isinstance(element, str) and element.startswith("sediment://"):
|
||||||
|
|
@ -303,7 +324,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if is_sediment:
|
if is_sediment:
|
||||||
url = f"{cls.url}/backend-api/conversation/{conversation_id}/attachment/{element}/download"
|
url = f"{cls.url}/backend-api/conversation/{conversation_id}/attachment/{element}/download"
|
||||||
else:
|
else:
|
||||||
url =f"{cls.url}/backend-api/files/{element}/download"
|
url = f"{cls.url}/backend-api/files/{element}/download"
|
||||||
try:
|
try:
|
||||||
async with session.get(url, headers=auth_result.headers) as response:
|
async with session.get(url, headers=auth_result.headers) as response:
|
||||||
cls._update_request_args(auth_result, session)
|
cls._update_request_args(auth_result, session)
|
||||||
|
|
@ -319,27 +340,31 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
debug.error("OpenaiChat: Download image failed")
|
debug.error("OpenaiChat: Download image failed")
|
||||||
debug.error(e)
|
debug.error(e)
|
||||||
if download_urls:
|
if download_urls:
|
||||||
return ImagePreview(download_urls, prompt, {"headers": auth_result.headers}) if is_sediment else ImageResponse(download_urls, prompt, {"headers": auth_result.headers})
|
# status = None, finished_successfully
|
||||||
|
if is_sediment and status is None:
|
||||||
|
return ImagePreview(download_urls, prompt, {"status": status, "headers": auth_result.headers})
|
||||||
|
else:
|
||||||
|
return ImageResponse(download_urls, prompt, {"status": status, "headers": auth_result.headers})
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_authed(
|
async def create_authed(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
auth_result: AuthResult,
|
auth_result: AuthResult,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 360,
|
timeout: int = 360,
|
||||||
auto_continue: bool = False,
|
auto_continue: bool = False,
|
||||||
action: Optional[str] = None,
|
action: Optional[str] = None,
|
||||||
conversation: Conversation = None,
|
conversation: Conversation = None,
|
||||||
media: MediaListType = None,
|
media: MediaListType = None,
|
||||||
return_conversation: bool = True,
|
return_conversation: bool = True,
|
||||||
web_search: bool = False,
|
web_search: bool = False,
|
||||||
prompt: str = None,
|
prompt: str = None,
|
||||||
conversation_mode: Optional[dict] = None,
|
conversation_mode: Optional[dict] = None,
|
||||||
temporary: Optional[bool] = None,
|
temporary: Optional[bool] = None,
|
||||||
conversation_id: Optional[str] = None,
|
conversation_id: Optional[str] = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
"""
|
"""
|
||||||
Create an asynchronous generator for the conversation.
|
Create an asynchronous generator for the conversation.
|
||||||
|
|
@ -367,12 +392,12 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if action is None:
|
if action is None:
|
||||||
action = "next"
|
action = "next"
|
||||||
async with StreamSession(
|
async with StreamSession(
|
||||||
proxy=proxy,
|
proxy=proxy,
|
||||||
impersonate="chrome",
|
impersonate="chrome",
|
||||||
timeout=timeout
|
timeout=timeout
|
||||||
) as session:
|
) as session:
|
||||||
image_requests = None
|
image_requests = None
|
||||||
media = merge_media(media, messages)
|
media = merge_media(media, messages)
|
||||||
if not cls.needs_auth and not media:
|
if not cls.needs_auth and not media:
|
||||||
if cls._headers is None:
|
if cls._headers is None:
|
||||||
cls._create_request_args(cls._cookies)
|
cls._create_request_args(cls._cookies)
|
||||||
|
|
@ -436,18 +461,19 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if temporary:
|
if temporary:
|
||||||
data["history_and_training_disabled"] = True
|
data["history_and_training_disabled"] = True
|
||||||
async with session.post(
|
async with session.post(
|
||||||
prepare_url,
|
prepare_url,
|
||||||
json=data,
|
json=data,
|
||||||
headers=cls._headers
|
headers=cls._headers
|
||||||
) as response:
|
) as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
conduit_token = (await response.json())["conduit_token"]
|
conduit_token = (await response.json())["conduit_token"]
|
||||||
async with session.post(
|
async with session.post(
|
||||||
f"{cls.url}/backend-anon/sentinel/chat-requirements"
|
f"{cls.url}/backend-anon/sentinel/chat-requirements"
|
||||||
if cls._api_key is None else
|
if cls._api_key is None else
|
||||||
f"{cls.url}/backend-api/sentinel/chat-requirements",
|
f"{cls.url}/backend-api/sentinel/chat-requirements",
|
||||||
json={"p": None if not getattr(auth_result, "proof_token", None) else get_requirements_token(getattr(auth_result, "proof_token", None))},
|
json={"p": None if not getattr(auth_result, "proof_token", None) else get_requirements_token(
|
||||||
headers=cls._headers
|
getattr(auth_result, "proof_token", None))},
|
||||||
|
headers=cls._headers
|
||||||
) as response:
|
) as response:
|
||||||
if response.status in (401, 403):
|
if response.status in (401, 403):
|
||||||
raise MissingAuthError(f"Response status: {response.status}")
|
raise MissingAuthError(f"Response status: {response.status}")
|
||||||
|
|
@ -456,10 +482,10 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
chat_requirements = await response.json()
|
chat_requirements = await response.json()
|
||||||
need_turnstile = chat_requirements.get("turnstile", {}).get("required", False)
|
need_turnstile = chat_requirements.get("turnstile", {}).get("required", False)
|
||||||
need_arkose = chat_requirements.get("arkose", {}).get("required", False)
|
need_arkose = chat_requirements.get("arkose", {}).get("required", False)
|
||||||
chat_token = chat_requirements.get("token")
|
chat_token = chat_requirements.get("token")
|
||||||
|
|
||||||
# if need_arkose and cls.request_config.arkose_token is None:
|
# if need_arkose and cls.request_config.arkose_token is None:
|
||||||
# await get_request_config(proxy)
|
# await get_request_config(proxy)
|
||||||
# cls._create_request_args(auth_result.cookies, auth_result.headers)
|
# cls._create_request_args(auth_result.cookies, auth_result.headers)
|
||||||
# cls._set_api_key(auth_result.access_token)
|
# cls._set_api_key(auth_result.access_token)
|
||||||
|
|
@ -476,23 +502,25 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
proof_token=proof_token
|
proof_token=proof_token
|
||||||
)
|
)
|
||||||
# [debug.log(text) for text in (
|
# [debug.log(text) for text in (
|
||||||
#f"Arkose: {'False' if not need_arkose else auth_result.arkose_token[:12]+'...'}",
|
# f"Arkose: {'False' if not need_arkose else auth_result.arkose_token[:12]+'...'}",
|
||||||
#f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
|
# f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
|
||||||
#f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
|
# f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
|
||||||
# )]
|
# )]
|
||||||
data = {
|
data = {
|
||||||
"action": "next",
|
"action": "next",
|
||||||
"parent_message_id": conversation.message_id,
|
"parent_message_id": conversation.message_id,
|
||||||
"model": model,
|
"model": model,
|
||||||
"timezone_offset_min":-120,
|
"timezone_offset_min": -120,
|
||||||
"timezone":"Europe/Berlin",
|
"timezone": "Europe/Berlin",
|
||||||
"conversation_mode":{"kind":"primary_assistant"},
|
"conversation_mode": {"kind": "primary_assistant"},
|
||||||
"enable_message_followups":True,
|
"enable_message_followups": True,
|
||||||
"system_hints": ["search"] if web_search else None,
|
"system_hints": ["search"] if web_search else None,
|
||||||
"supports_buffering":True,
|
"supports_buffering": True,
|
||||||
"supported_encodings":["v1"],
|
"supported_encodings": ["v1"],
|
||||||
"client_contextual_info":{"is_dark_mode":False,"time_since_loaded":random.randint(20, 500),"page_height":578,"page_width":1850,"pixel_ratio":1,"screen_height":1080,"screen_width":1920},
|
"client_contextual_info": {"is_dark_mode": False, "time_since_loaded": random.randint(20, 500),
|
||||||
"paragen_cot_summary_display_override":"allow"
|
"page_height": 578, "page_width": 1850, "pixel_ratio": 1,
|
||||||
|
"screen_height": 1080, "screen_width": 1920},
|
||||||
|
"paragen_cot_summary_display_override": "allow"
|
||||||
}
|
}
|
||||||
if temporary:
|
if temporary:
|
||||||
data["history_and_training_disabled"] = True
|
data["history_and_training_disabled"] = True
|
||||||
|
|
@ -512,7 +540,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
new_messages = []
|
new_messages = []
|
||||||
else:
|
else:
|
||||||
new_messages.append(message)
|
new_messages.append(message)
|
||||||
data["messages"] = cls.create_messages(new_messages, image_requests, ["search"] if web_search else None)
|
data["messages"] = cls.create_messages(new_messages, image_requests,
|
||||||
|
["search"] if web_search else None)
|
||||||
yield JsonRequest.from_dict(data)
|
yield JsonRequest.from_dict(data)
|
||||||
headers = {
|
headers = {
|
||||||
**cls._headers,
|
**cls._headers,
|
||||||
|
|
@ -521,18 +550,18 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
"openai-sentinel-chat-requirements-token": chat_token,
|
"openai-sentinel-chat-requirements-token": chat_token,
|
||||||
**({} if conduit_token is None else {"x-conduit-token": conduit_token})
|
**({} if conduit_token is None else {"x-conduit-token": conduit_token})
|
||||||
}
|
}
|
||||||
#if cls.request_config.arkose_token:
|
# if cls.request_config.arkose_token:
|
||||||
# headers["openai-sentinel-arkose-token"] = cls.request_config.arkose_token
|
# headers["openai-sentinel-arkose-token"] = cls.request_config.arkose_token
|
||||||
if proofofwork is not None:
|
if proofofwork is not None:
|
||||||
headers["openai-sentinel-proof-token"] = proofofwork
|
headers["openai-sentinel-proof-token"] = proofofwork
|
||||||
if need_turnstile and getattr(auth_result, "turnstile_token", None) is not None:
|
if need_turnstile and getattr(auth_result, "turnstile_token", None) is not None:
|
||||||
headers['openai-sentinel-turnstile-token'] = auth_result.turnstile_token
|
headers['openai-sentinel-turnstile-token'] = auth_result.turnstile_token
|
||||||
async with session.post(
|
async with session.post(
|
||||||
backend_anon_url
|
backend_anon_url
|
||||||
if cls._api_key is None else
|
if cls._api_key is None else
|
||||||
backend_url,
|
backend_url,
|
||||||
json=data,
|
json=data,
|
||||||
headers=headers
|
headers=headers
|
||||||
) as response:
|
) as response:
|
||||||
cls._update_request_args(auth_result, session)
|
cls._update_request_args(auth_result, session)
|
||||||
if response.status in (401, 403, 429, 500):
|
if response.status in (401, 403, 429, 500):
|
||||||
|
|
@ -548,10 +577,12 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if match.group(0) in matches:
|
if match.group(0) in matches:
|
||||||
continue
|
continue
|
||||||
matches.append(match.group(0))
|
matches.append(match.group(0))
|
||||||
generated_image = await cls.get_generated_image(session, auth_result, match.group(0), prompt)
|
generated_image = await cls.get_generated_image(session, auth_result, match.group(0),
|
||||||
|
prompt)
|
||||||
if generated_image is not None:
|
if generated_image is not None:
|
||||||
yield generated_image
|
yield generated_image
|
||||||
async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources, references):
|
async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources,
|
||||||
|
references):
|
||||||
if isinstance(chunk, str):
|
if isinstance(chunk, str):
|
||||||
chunk = chunk.replace("\ue203", "").replace("\ue204", "").replace("\ue206", "")
|
chunk = chunk.replace("\ue203", "").replace("\ue204", "").replace("\ue206", "")
|
||||||
buffer += chunk
|
buffer += chunk
|
||||||
|
|
@ -561,9 +592,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
def citation_replacer(match: re.Match[str]):
|
def citation_replacer(match: re.Match[str]):
|
||||||
ref_type = match.group(1)
|
ref_type = match.group(1)
|
||||||
ref_index = int(match.group(2))
|
ref_index = int(match.group(2))
|
||||||
if ((ref_type == "image" and is_image_embedding) or
|
if ((ref_type == "image" and is_image_embedding) or
|
||||||
is_video_embedding or
|
is_video_embedding or
|
||||||
ref_type == "forecast"):
|
ref_type == "forecast"):
|
||||||
|
|
||||||
reference = references.get_reference({
|
reference = references.get_reference({
|
||||||
"ref_index": ref_index,
|
"ref_index": ref_index,
|
||||||
|
|
@ -571,7 +602,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
})
|
})
|
||||||
if not reference:
|
if not reference:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
if ref_type == "forecast":
|
if ref_type == "forecast":
|
||||||
if reference.get("alt"):
|
if reference.get("alt"):
|
||||||
return reference.get("alt")
|
return reference.get("alt")
|
||||||
|
|
@ -580,11 +611,13 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
|
|
||||||
if is_image_embedding and reference.get("content_url", ""):
|
if is_image_embedding and reference.get("content_url", ""):
|
||||||
return f"})"
|
return f"})"
|
||||||
|
|
||||||
if is_video_embedding:
|
if is_video_embedding:
|
||||||
if reference.get("url", "") and reference.get("thumbnail_url", ""):
|
if reference.get("url", "") and reference.get("thumbnail_url",
|
||||||
|
""):
|
||||||
return f"[]({reference['url']})"
|
return f"[]({reference['url']})"
|
||||||
video_match = re.match(r"video\n(.*?)\nturn[0-9]+", match.group(0))
|
video_match = re.match(r"video\n(.*?)\nturn[0-9]+",
|
||||||
|
match.group(0))
|
||||||
if video_match:
|
if video_match:
|
||||||
return video_match.group(1)
|
return video_match.group(1)
|
||||||
return ""
|
return ""
|
||||||
|
|
@ -595,9 +628,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
})
|
})
|
||||||
if source_index is not None and len(sources.list) > source_index:
|
if source_index is not None and len(sources.list) > source_index:
|
||||||
link = sources.list[source_index]["url"]
|
link = sources.list[source_index]["url"]
|
||||||
return f"[[{source_index+1}]]({link})"
|
return f"[[{source_index + 1}]]({link})"
|
||||||
return f""
|
return f""
|
||||||
|
|
||||||
def products_replacer(match: re.Match[str]):
|
def products_replacer(match: re.Match[str]):
|
||||||
try:
|
try:
|
||||||
products_data = json.loads(match.group(1))
|
products_data = json.loads(match.group(1))
|
||||||
|
|
@ -612,25 +645,30 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
sequence_content = match.group(1)
|
sequence_content = match.group(1)
|
||||||
sequence_content = sequence_content.replace("\ue200", "").replace("\ue202", "\n").replace("\ue201", "")
|
sequence_content = sequence_content.replace("\ue200", "").replace("\ue202",
|
||||||
|
"\n").replace(
|
||||||
|
"\ue201", "")
|
||||||
sequence_content = sequence_content.replace("navlist\n", "#### ")
|
sequence_content = sequence_content.replace("navlist\n", "#### ")
|
||||||
|
|
||||||
# Handle search, news, view and image citations
|
# Handle search, news, view and image citations
|
||||||
is_image_embedding = sequence_content.startswith("i\nturn")
|
is_image_embedding = sequence_content.startswith("i\nturn")
|
||||||
is_video_embedding = sequence_content.startswith("video\n")
|
is_video_embedding = sequence_content.startswith("video\n")
|
||||||
sequence_content = re.sub(
|
sequence_content = re.sub(
|
||||||
r'(?:cite\nturn[0-9]+|forecast\nturn[0-9]+|video\n.*?\nturn[0-9]+|i?\n?turn[0-9]+)(search|news|view|image|forecast)(\d+)',
|
r'(?:cite\nturn[0-9]+|forecast\nturn[0-9]+|video\n.*?\nturn[0-9]+|i?\n?turn[0-9]+)(search|news|view|image|forecast)(\d+)',
|
||||||
citation_replacer,
|
citation_replacer,
|
||||||
sequence_content
|
sequence_content
|
||||||
)
|
)
|
||||||
sequence_content = re.sub(r'products\n(.*)', products_replacer, sequence_content)
|
sequence_content = re.sub(r'products\n(.*)', products_replacer,
|
||||||
sequence_content = re.sub(r'product_entity\n\[".*","(.*)"\]', lambda x: x.group(1), sequence_content)
|
sequence_content)
|
||||||
|
sequence_content = re.sub(r'product_entity\n\[".*","(.*)"\]',
|
||||||
|
lambda x: x.group(1), sequence_content)
|
||||||
return sequence_content
|
return sequence_content
|
||||||
|
|
||||||
# process only completed sequences and do not touch start of next not completed sequence
|
# process only completed sequences and do not touch start of next not completed sequence
|
||||||
buffer = re.sub(r'\ue200(.*?)\ue201', sequence_replacer, buffer, flags=re.DOTALL)
|
buffer = re.sub(r'\ue200(.*?)\ue201', sequence_replacer, buffer,
|
||||||
|
flags=re.DOTALL)
|
||||||
if buffer.find(u"\ue200") != -1: # still have uncompleted sequence
|
|
||||||
|
if buffer.find(u"\ue200") != -1: # still have uncompleted sequence
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# do not yield to consume rest part of special sequence
|
# do not yield to consume rest part of special sequence
|
||||||
|
|
@ -647,7 +685,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if sources.list:
|
if sources.list:
|
||||||
yield sources
|
yield sources
|
||||||
if conversation.generated_images:
|
if conversation.generated_images:
|
||||||
yield ImageResponse(conversation.generated_images.urls, conversation.prompt, {"headers": auth_result.headers})
|
yield ImageResponse(conversation.generated_images.urls, conversation.prompt,
|
||||||
|
{"headers": auth_result.headers})
|
||||||
conversation.generated_images = None
|
conversation.generated_images = None
|
||||||
conversation.prompt = None
|
conversation.prompt = None
|
||||||
if return_conversation:
|
if return_conversation:
|
||||||
|
|
@ -667,7 +706,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
yield FinishReason(conversation.finish_reason)
|
yield FinishReason(conversation.finish_reason)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes, fields: Conversation, sources: OpenAISources, references: ContentReferences) -> AsyncIterator:
|
async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes,
|
||||||
|
fields: Conversation, sources: OpenAISources,
|
||||||
|
references: ContentReferences) -> AsyncIterator:
|
||||||
if not line.startswith(b"data: "):
|
if not line.startswith(b"data: "):
|
||||||
return
|
return
|
||||||
elif line.startswith(b"data: [DONE]"):
|
elif line.startswith(b"data: [DONE]"):
|
||||||
|
|
@ -706,9 +747,14 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
elif m.get("p") == "/message/metadata/image_gen_title":
|
elif m.get("p") == "/message/metadata/image_gen_title":
|
||||||
fields.prompt = m.get("v")
|
fields.prompt = m.get("v")
|
||||||
elif m.get("p") == "/message/content/parts/0/asset_pointer":
|
elif m.get("p") == "/message/content/parts/0/asset_pointer":
|
||||||
generated_images = fields.generated_images = await cls.get_generated_image(session, auth_result, m.get("v"), fields.prompt, fields.conversation_id)
|
status = next(filter(lambda x: x.get("p") == '/message/status', v), {}).get('v', None)
|
||||||
|
generated_images = fields.generated_images = await cls.get_generated_image(session, auth_result,
|
||||||
|
m.get("v"),
|
||||||
|
fields.prompt,
|
||||||
|
fields.conversation_id,
|
||||||
|
status)
|
||||||
if generated_images is not None:
|
if generated_images is not None:
|
||||||
if buffer:
|
if buffer:
|
||||||
yield buffer
|
yield buffer
|
||||||
yield generated_images
|
yield generated_images
|
||||||
elif m.get("p") == "/message/metadata/search_result_groups":
|
elif m.get("p") == "/message/metadata/search_result_groups":
|
||||||
|
|
@ -735,41 +781,48 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if match and m.get("o") == "append" and isinstance(m.get("v"), dict):
|
if match and m.get("o") == "append" and isinstance(m.get("v"), dict):
|
||||||
idx = int(match.group(1))
|
idx = int(match.group(1))
|
||||||
references.merge_reference(idx, m.get("v"))
|
references.merge_reference(idx, m.get("v"))
|
||||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/fallback_items$", m.get("p")) and isinstance(m.get("v"), list):
|
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/fallback_items$",
|
||||||
|
m.get("p")) and isinstance(m.get("v"), list):
|
||||||
for link in m.get("v", []) or []:
|
for link in m.get("v", []) or []:
|
||||||
sources.add_source(link)
|
sources.add_source(link)
|
||||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/items$", m.get("p")) and isinstance(m.get("v"), list):
|
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/items$",
|
||||||
|
m.get("p")) and isinstance(m.get("v"), list):
|
||||||
for link in m.get("v", []) or []:
|
for link in m.get("v", []) or []:
|
||||||
sources.add_source(link)
|
sources.add_source(link)
|
||||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs$", m.get("p")) and isinstance(m.get("v"), list):
|
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs$",
|
||||||
|
m.get("p")) and isinstance(m.get("v"), list):
|
||||||
match = re.match(r"^/message/metadata/content_references/(\d+)/refs$", m.get("p"))
|
match = re.match(r"^/message/metadata/content_references/(\d+)/refs$", m.get("p"))
|
||||||
if match:
|
if match:
|
||||||
idx = int(match.group(1))
|
idx = int(match.group(1))
|
||||||
references.update_reference(idx, m.get("o"), "refs", m.get("v"))
|
references.update_reference(idx, m.get("o"), "refs", m.get("v"))
|
||||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/alt$", m.get("p")) and isinstance(m.get("v"), list):
|
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/alt$",
|
||||||
|
m.get("p")) and isinstance(m.get("v"), list):
|
||||||
match = re.match(r"^/message/metadata/content_references/(\d+)/alt$", m.get("p"))
|
match = re.match(r"^/message/metadata/content_references/(\d+)/alt$", m.get("p"))
|
||||||
if match:
|
if match:
|
||||||
idx = int(match.group(1))
|
idx = int(match.group(1))
|
||||||
references.update_reference(idx, m.get("o"), "alt", m.get("v"))
|
references.update_reference(idx, m.get("o"), "alt", m.get("v"))
|
||||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/prompt_text$", m.get("p")) and isinstance(m.get("v"), list):
|
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/prompt_text$",
|
||||||
|
m.get("p")) and isinstance(m.get("v"), list):
|
||||||
match = re.match(r"^/message/metadata/content_references/(\d+)/prompt_text$", m.get("p"))
|
match = re.match(r"^/message/metadata/content_references/(\d+)/prompt_text$", m.get("p"))
|
||||||
if match:
|
if match:
|
||||||
idx = int(match.group(1))
|
idx = int(match.group(1))
|
||||||
references.update_reference(idx, m.get("o"), "prompt_text", m.get("v"))
|
references.update_reference(idx, m.get("o"), "prompt_text", m.get("v"))
|
||||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs/\d+$", m.get("p")) and isinstance(m.get("v"), dict):
|
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/refs/\d+$",
|
||||||
|
m.get("p")) and isinstance(m.get("v"), dict):
|
||||||
match = re.match(r"^/message/metadata/content_references/(\d+)/refs/(\d+)$", m.get("p"))
|
match = re.match(r"^/message/metadata/content_references/(\d+)/refs/(\d+)$", m.get("p"))
|
||||||
if match:
|
if match:
|
||||||
reference_idx = int(match.group(1))
|
reference_idx = int(match.group(1))
|
||||||
ref_idx = int(match.group(2))
|
ref_idx = int(match.group(2))
|
||||||
references.update_reference(reference_idx, m.get("o"), "refs", m.get("v"), ref_idx)
|
references.update_reference(reference_idx, m.get("o"), "refs", m.get("v"), ref_idx)
|
||||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/images$", m.get("p")) and isinstance(m.get("v"), list):
|
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+/images$",
|
||||||
|
m.get("p")) and isinstance(m.get("v"), list):
|
||||||
match = re.match(r"^/message/metadata/content_references/(\d+)/images$", m.get("p"))
|
match = re.match(r"^/message/metadata/content_references/(\d+)/images$", m.get("p"))
|
||||||
if match:
|
if match:
|
||||||
idx = int(match.group(1))
|
idx = int(match.group(1))
|
||||||
references.update_reference(idx, m.get("o"), "images", m.get("v"))
|
references.update_reference(idx, m.get("o"), "images", m.get("v"))
|
||||||
elif m.get("p") == "/message/metadata/finished_text":
|
elif m.get("p") == "/message/metadata/finished_text":
|
||||||
fields.is_thinking = False
|
fields.is_thinking = False
|
||||||
if buffer:
|
if buffer:
|
||||||
yield buffer
|
yield buffer
|
||||||
yield Reasoning(status=m.get("v"))
|
yield Reasoning(status=m.get("v"))
|
||||||
elif m.get("p") == "/message/metadata" and fields.recipient == "all":
|
elif m.get("p") == "/message/metadata" and fields.recipient == "all":
|
||||||
|
|
@ -785,10 +838,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
fields.recipient = m.get("recipient", fields.recipient)
|
fields.recipient = m.get("recipient", fields.recipient)
|
||||||
if fields.recipient == "all":
|
if fields.recipient == "all":
|
||||||
c = m.get("content", {})
|
c = m.get("content", {})
|
||||||
if c.get("content_type") == "text" and m.get("author", {}).get("role") == "tool" and "initial_text" in m.get("metadata", {}):
|
if c.get("content_type") == "text" and m.get("author", {}).get(
|
||||||
|
"role") == "tool" and "initial_text" in m.get("metadata", {}):
|
||||||
fields.is_thinking = True
|
fields.is_thinking = True
|
||||||
yield Reasoning(status=m.get("metadata", {}).get("initial_text"))
|
yield Reasoning(status=m.get("metadata", {}).get("initial_text"))
|
||||||
#if c.get("content_type") == "multimodal_text":
|
# if c.get("content_type") == "multimodal_text":
|
||||||
# for part in c.get("parts"):
|
# for part in c.get("parts"):
|
||||||
# if isinstance(part, dict) and part.get("content_type") == "image_asset_pointer":
|
# if isinstance(part, dict) and part.get("content_type") == "image_asset_pointer":
|
||||||
# yield await cls.get_generated_image(session, auth_result, part, fields.prompt, fields.conversation_id)
|
# yield await cls.get_generated_image(session, auth_result, part, fields.prompt, fields.conversation_id)
|
||||||
|
|
@ -803,13 +857,13 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
@classmethod
|
@classmethod
|
||||||
async def synthesize(cls, params: dict) -> AsyncIterator[bytes]:
|
async def synthesize(cls, params: dict) -> AsyncIterator[bytes]:
|
||||||
async with StreamSession(
|
async with StreamSession(
|
||||||
impersonate="chrome",
|
impersonate="chrome",
|
||||||
timeout=0
|
timeout=0
|
||||||
) as session:
|
) as session:
|
||||||
async with session.get(
|
async with session.get(
|
||||||
f"{cls.url}/backend-api/synthesize",
|
f"{cls.url}/backend-api/synthesize",
|
||||||
params=params,
|
params=params,
|
||||||
headers=cls._headers
|
headers=cls._headers
|
||||||
) as response:
|
) as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
async for chunk in response.iter_content():
|
async for chunk in response.iter_content():
|
||||||
|
|
@ -817,15 +871,15 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def login(
|
async def login(
|
||||||
cls,
|
cls,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
proof_token: str = None,
|
proof_token: str = None,
|
||||||
cookies: Cookies = None,
|
cookies: Cookies = None,
|
||||||
headers: dict = None,
|
headers: dict = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncIterator:
|
) -> AsyncIterator:
|
||||||
if cls._expires is not None and (cls._expires - 60*10) < time.time():
|
if cls._expires is not None and (cls._expires - 60 * 10) < time.time():
|
||||||
cls._headers = cls._api_key = None
|
cls._headers = cls._api_key = None
|
||||||
if cls._headers is None or headers is not None:
|
if cls._headers is None or headers is not None:
|
||||||
cls._headers = {} if headers is None else headers
|
cls._headers = {} if headers is None else headers
|
||||||
|
|
@ -858,6 +912,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
async def nodriver_auth(cls, proxy: str = None):
|
async def nodriver_auth(cls, proxy: str = None):
|
||||||
async with get_nodriver_session(proxy=proxy) as browser:
|
async with get_nodriver_session(proxy=proxy) as browser:
|
||||||
page = await browser.get(cls.url)
|
page = await browser.get(cls.url)
|
||||||
|
|
||||||
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
|
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
|
||||||
if event.request.url == start_url or event.request.url.startswith(conversation_url):
|
if event.request.url == start_url or event.request.url.startswith(conversation_url):
|
||||||
if cls.request_config.headers is None:
|
if cls.request_config.headers is None:
|
||||||
|
|
@ -866,9 +921,10 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
cls.request_config.headers[key.lower()] = value
|
cls.request_config.headers[key.lower()] = value
|
||||||
elif event.request.url in (backend_url, backend_anon_url):
|
elif event.request.url in (backend_url, backend_anon_url):
|
||||||
if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
|
if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
|
||||||
cls.request_config.proof_token = json.loads(base64.b64decode(
|
cls.request_config.proof_token = json.loads(base64.b64decode(
|
||||||
event.request.headers["OpenAI-Sentinel-Proof-Token"].split("gAAAAAB", 1)[-1].split("~")[0].encode()
|
event.request.headers["OpenAI-Sentinel-Proof-Token"].split("gAAAAAB", 1)[-1].split("~")[
|
||||||
).decode())
|
0].encode()
|
||||||
|
).decode())
|
||||||
if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers:
|
if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers:
|
||||||
cls.request_config.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"]
|
cls.request_config.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"]
|
||||||
if "Authorization" in event.request.headers:
|
if "Authorization" in event.request.headers:
|
||||||
|
|
@ -881,6 +937,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
arkBody=event.request.post_data,
|
arkBody=event.request.post_data,
|
||||||
userAgent=event.request.headers.get("User-Agent")
|
userAgent=event.request.headers.get("User-Agent")
|
||||||
)
|
)
|
||||||
|
|
||||||
await page.send(nodriver.cdp.network.enable())
|
await page.send(nodriver.cdp.network.enable())
|
||||||
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
||||||
await page.reload()
|
await page.reload()
|
||||||
|
|
@ -912,7 +969,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if cls._api_key is not None or not cls.needs_auth:
|
if cls._api_key is not None or not cls.needs_auth:
|
||||||
break
|
break
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
debug.log(f"OpenaiChat: Access token: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}")
|
debug.log(f"OpenaiChat: Access token: {'False' if cls._api_key is None else cls._api_key[:12] + '...'}")
|
||||||
while True:
|
while True:
|
||||||
if cls.request_config.proof_token:
|
if cls.request_config.proof_token:
|
||||||
break
|
break
|
||||||
|
|
@ -970,11 +1027,14 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
if cls._cookies:
|
if cls._cookies:
|
||||||
cls._headers["cookie"] = format_cookies(cls._cookies)
|
cls._headers["cookie"] = format_cookies(cls._cookies)
|
||||||
|
|
||||||
|
|
||||||
class Conversation(JsonConversation):
|
class Conversation(JsonConversation):
|
||||||
"""
|
"""
|
||||||
Class to encapsulate response fields.
|
Class to encapsulate response fields.
|
||||||
"""
|
"""
|
||||||
def __init__(self, conversation_id: str = None, message_id: str = None, user_id: str = None, finish_reason: str = None, parent_message_id: str = None, is_thinking: bool = False):
|
|
||||||
|
def __init__(self, conversation_id: str = None, message_id: str = None, user_id: str = None,
|
||||||
|
finish_reason: str = None, parent_message_id: str = None, is_thinking: bool = False):
|
||||||
self.conversation_id = conversation_id
|
self.conversation_id = conversation_id
|
||||||
self.message_id = message_id
|
self.message_id = message_id
|
||||||
self.finish_reason = finish_reason
|
self.finish_reason = finish_reason
|
||||||
|
|
@ -987,8 +1047,9 @@ class Conversation(JsonConversation):
|
||||||
self.prompt = None
|
self.prompt = None
|
||||||
self.generated_images: ImagePreview = None
|
self.generated_images: ImagePreview = None
|
||||||
|
|
||||||
|
|
||||||
def get_cookies(
|
def get_cookies(
|
||||||
urls: Optional[Iterator[str]] = None
|
urls: Optional[Iterator[str]] = None
|
||||||
) -> Generator[Dict, Dict, Dict[str, str]]:
|
) -> Generator[Dict, Dict, Dict[str, str]]:
|
||||||
params = {}
|
params = {}
|
||||||
if urls is not None:
|
if urls is not None:
|
||||||
|
|
@ -1000,6 +1061,7 @@ def get_cookies(
|
||||||
json = yield cmd_dict
|
json = yield cmd_dict
|
||||||
return {c["name"]: c["value"] for c in json['cookies']} if 'cookies' in json else {}
|
return {c["name"]: c["value"] for c in json['cookies']} if 'cookies' in json else {}
|
||||||
|
|
||||||
|
|
||||||
class OpenAISources(ResponseType):
|
class OpenAISources(ResponseType):
|
||||||
list: List[Dict[str, str]]
|
list: List[Dict[str, str]]
|
||||||
|
|
||||||
|
|
@ -1025,7 +1087,7 @@ class OpenAISources(ResponseType):
|
||||||
if existing_source and idx is not None:
|
if existing_source and idx is not None:
|
||||||
self.list[idx] = source
|
self.list[idx] = source
|
||||||
return
|
return
|
||||||
|
|
||||||
existing_source, idx = self.find_by_url(source["url"])
|
existing_source, idx = self.find_by_url(source["url"])
|
||||||
if existing_source and idx is not None:
|
if existing_source and idx is not None:
|
||||||
self.list[idx] = source
|
self.list[idx] = source
|
||||||
|
|
@ -1038,53 +1100,54 @@ class OpenAISources(ResponseType):
|
||||||
if not self.list:
|
if not self.list:
|
||||||
return ""
|
return ""
|
||||||
return "\n\n\n\n" + ("\n>\n".join([
|
return "\n\n\n\n" + ("\n>\n".join([
|
||||||
f"> [{idx+1}] {format_link(link['url'], link.get('title', ''))}"
|
f"> [{idx + 1}] {format_link(link['url'], link.get('title', ''))}"
|
||||||
for idx, link in enumerate(self.list)
|
for idx, link in enumerate(self.list)
|
||||||
]))
|
]))
|
||||||
|
|
||||||
def get_ref_info(self, source: Dict[str, str]) -> dict[str, str|int] | None:
|
def get_ref_info(self, source: Dict[str, str]) -> dict[str, str | int] | None:
|
||||||
ref_index = source.get("ref_id", {}).get("ref_index", None)
|
ref_index = source.get("ref_id", {}).get("ref_index", None)
|
||||||
ref_type = source.get("ref_id", {}).get("ref_type", None)
|
ref_type = source.get("ref_id", {}).get("ref_type", None)
|
||||||
if isinstance(ref_index, int):
|
if isinstance(ref_index, int):
|
||||||
return {
|
return {
|
||||||
"ref_index": ref_index,
|
"ref_index": ref_index,
|
||||||
"ref_type": ref_type,
|
"ref_type": ref_type,
|
||||||
}
|
}
|
||||||
|
|
||||||
for ref_info in source.get('refs') or []:
|
for ref_info in source.get('refs') or []:
|
||||||
ref_index = ref_info.get("ref_index", None)
|
ref_index = ref_info.get("ref_index", None)
|
||||||
ref_type = ref_info.get("ref_type", None)
|
ref_type = ref_info.get("ref_type", None)
|
||||||
if isinstance(ref_index, int):
|
if isinstance(ref_index, int):
|
||||||
return {
|
return {
|
||||||
"ref_index": ref_index,
|
"ref_index": ref_index,
|
||||||
"ref_type": ref_type,
|
"ref_type": ref_type,
|
||||||
}
|
}
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def find_by_ref_info(self, ref_info: dict[str, str|int]):
|
def find_by_ref_info(self, ref_info: dict[str, str | int]):
|
||||||
for idx, source in enumerate(self.list):
|
for idx, source in enumerate(self.list):
|
||||||
source_ref_info = self.get_ref_info(source)
|
source_ref_info = self.get_ref_info(source)
|
||||||
if (source_ref_info and
|
if (source_ref_info and
|
||||||
source_ref_info["ref_index"] == ref_info["ref_index"] and
|
source_ref_info["ref_index"] == ref_info["ref_index"] and
|
||||||
source_ref_info["ref_type"] == ref_info["ref_type"]):
|
source_ref_info["ref_type"] == ref_info["ref_type"]):
|
||||||
return source, idx
|
return source, idx
|
||||||
|
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
def find_by_url(self, url: str):
|
def find_by_url(self, url: str):
|
||||||
for idx, source in enumerate(self.list):
|
for idx, source in enumerate(self.list):
|
||||||
if source["url"] == url:
|
if source["url"] == url:
|
||||||
return source, idx
|
return source, idx
|
||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
def get_index(self, ref_info: dict[str, str|int]) -> int | None:
|
def get_index(self, ref_info: dict[str, str | int]) -> int | None:
|
||||||
_, index = self.find_by_ref_info(ref_info)
|
_, index = self.find_by_ref_info(ref_info)
|
||||||
if index is not None:
|
if index is not None:
|
||||||
return index
|
return index
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
class ContentReferences:
|
class ContentReferences:
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.list: List[Dict[str, Any]] = []
|
self.list: List[Dict[str, Any]] = []
|
||||||
|
|
@ -1098,16 +1161,16 @@ class ContentReferences:
|
||||||
|
|
||||||
self.list[idx] = {**self.list[idx], **reference_part}
|
self.list[idx] = {**self.list[idx], **reference_part}
|
||||||
|
|
||||||
def update_reference(self, idx: int, operation: str, field: str, value: Any, ref_idx = None) -> None:
|
def update_reference(self, idx: int, operation: str, field: str, value: Any, ref_idx=None) -> None:
|
||||||
while len(self.list) <= idx:
|
while len(self.list) <= idx:
|
||||||
self.list.append({})
|
self.list.append({})
|
||||||
|
|
||||||
if operation == "append" or operation == "add":
|
if operation == "append" or operation == "add":
|
||||||
if not isinstance(self.list[idx].get(field, None), list):
|
if not isinstance(self.list[idx].get(field, None), list):
|
||||||
self.list[idx][field] = []
|
self.list[idx][field] = []
|
||||||
if isinstance(value, list):
|
if isinstance(value, list):
|
||||||
self.list[idx][field].extend(value)
|
self.list[idx][field].extend(value)
|
||||||
else:
|
else:
|
||||||
self.list[idx][field].append(value)
|
self.list[idx][field].append(value)
|
||||||
|
|
||||||
if operation == "replace" and ref_idx is not None:
|
if operation == "replace" and ref_idx is not None:
|
||||||
|
|
@ -1123,10 +1186,10 @@ class ContentReferences:
|
||||||
self.list[idx][field] = value
|
self.list[idx][field] = value
|
||||||
|
|
||||||
def get_ref_info(
|
def get_ref_info(
|
||||||
self,
|
self,
|
||||||
source: Dict[str, str],
|
source: Dict[str, str],
|
||||||
target_ref_info: Dict[str, Union[str, int]]
|
target_ref_info: Dict[str, Union[str, int]]
|
||||||
) -> dict[str, str|int] | None:
|
) -> dict[str, str | int] | None:
|
||||||
for idx, ref_info in enumerate(source.get("refs", [])) or []:
|
for idx, ref_info in enumerate(source.get("refs", [])) or []:
|
||||||
if not isinstance(ref_info, dict):
|
if not isinstance(ref_info, dict):
|
||||||
continue
|
continue
|
||||||
|
|
@ -1134,11 +1197,11 @@ class ContentReferences:
|
||||||
ref_index = ref_info.get("ref_index", None)
|
ref_index = ref_info.get("ref_index", None)
|
||||||
ref_type = ref_info.get("ref_type", None)
|
ref_type = ref_info.get("ref_type", None)
|
||||||
if isinstance(ref_index, int) and isinstance(ref_type, str):
|
if isinstance(ref_index, int) and isinstance(ref_type, str):
|
||||||
if (not target_ref_info or
|
if (not target_ref_info or
|
||||||
(target_ref_info["ref_index"] == ref_index and
|
(target_ref_info["ref_index"] == ref_index and
|
||||||
target_ref_info["ref_type"] == ref_type)):
|
target_ref_info["ref_type"] == ref_type)):
|
||||||
return {
|
return {
|
||||||
"ref_index": ref_index,
|
"ref_index": ref_index,
|
||||||
"ref_type": ref_type,
|
"ref_type": ref_type,
|
||||||
"idx": idx
|
"idx": idx
|
||||||
}
|
}
|
||||||
|
|
@ -1149,9 +1212,9 @@ class ContentReferences:
|
||||||
for reference in self.list:
|
for reference in self.list:
|
||||||
reference_ref_info = self.get_ref_info(reference, ref_info)
|
reference_ref_info = self.get_ref_info(reference, ref_info)
|
||||||
|
|
||||||
if (not reference_ref_info or
|
if (not reference_ref_info or
|
||||||
reference_ref_info["ref_index"] != ref_info["ref_index"] or
|
reference_ref_info["ref_index"] != ref_info["ref_index"] or
|
||||||
reference_ref_info["ref_type"] != ref_info["ref_type"]):
|
reference_ref_info["ref_type"] != ref_info["ref_type"]):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if ref_info["ref_type"] != "image":
|
if ref_info["ref_type"] != "image":
|
||||||
|
|
|
||||||
|
|
@ -440,7 +440,7 @@ class Backend_Api(Api):
|
||||||
os.remove(copyfile)
|
os.remove(copyfile)
|
||||||
continue
|
continue
|
||||||
if not is_media and result:
|
if not is_media and result:
|
||||||
with open(os.path.join(bucket_dir, f"{filename}.md"), 'w') as f:
|
with open(os.path.join(bucket_dir, f"{filename}.md"), 'w', encoding="utf-8") as f:
|
||||||
f.write(f"{result}\n")
|
f.write(f"{result}\n")
|
||||||
filenames.append(f"{filename}.md")
|
filenames.append(f"{filename}.md")
|
||||||
if is_media:
|
if is_media:
|
||||||
|
|
@ -477,7 +477,7 @@ class Backend_Api(Api):
|
||||||
except OSError:
|
except OSError:
|
||||||
shutil.copyfile(copyfile, newfile)
|
shutil.copyfile(copyfile, newfile)
|
||||||
os.remove(copyfile)
|
os.remove(copyfile)
|
||||||
with open(os.path.join(bucket_dir, "files.txt"), 'w') as f:
|
with open(os.path.join(bucket_dir, "files.txt"), 'w', encoding="utf-8") as f:
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
f.write(f"{filename}\n")
|
f.write(f"{filename}\n")
|
||||||
return {"bucket_id": bucket_id, "files": filenames, "media": media}
|
return {"bucket_id": bucket_id, "files": filenames, "media": media}
|
||||||
|
|
@ -572,7 +572,7 @@ class Backend_Api(Api):
|
||||||
share_id = secure_filename(share_id)
|
share_id = secure_filename(share_id)
|
||||||
bucket_dir = get_bucket_dir(share_id)
|
bucket_dir = get_bucket_dir(share_id)
|
||||||
os.makedirs(bucket_dir, exist_ok=True)
|
os.makedirs(bucket_dir, exist_ok=True)
|
||||||
with open(os.path.join(bucket_dir, "chat.json"), 'w') as f:
|
with open(os.path.join(bucket_dir, "chat.json"), 'w', encoding="utf-8") as f:
|
||||||
json.dump(chat_data, f)
|
json.dump(chat_data, f)
|
||||||
self.chat_cache[share_id] = updated
|
self.chat_cache[share_id] = updated
|
||||||
return {"share_id": share_id}
|
return {"share_id": share_id}
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,8 @@ from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from PIL import Image, ImageOps
|
from PIL import Image, ImageOps
|
||||||
has_requirements = True
|
has_requirements = True
|
||||||
|
|
@ -383,6 +385,13 @@ def to_bytes(image: ImageType) -> bytes:
|
||||||
return Path(path).read_bytes()
|
return Path(path).read_bytes()
|
||||||
else:
|
else:
|
||||||
raise FileNotFoundError(f"File not found: {path}")
|
raise FileNotFoundError(f"File not found: {path}")
|
||||||
|
else:
|
||||||
|
resp = requests.get(image, headers={
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
|
||||||
|
})
|
||||||
|
if resp.ok and is_accepted_format(resp.content):
|
||||||
|
return resp.content
|
||||||
|
raise ValueError("Invalid image url. Expected bytes, str, or PIL Image.")
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid image format. Expected bytes, str, or PIL Image.")
|
raise ValueError("Invalid image format. Expected bytes, str, or PIL Image.")
|
||||||
elif isinstance(image, Image.Image):
|
elif isinstance(image, Image.Image):
|
||||||
|
|
|
||||||
|
|
@ -31,17 +31,21 @@ class StreamResponse(ClientResponse):
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
class StreamSession():
|
class StreamSession:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
headers: dict = {},
|
headers=None,
|
||||||
timeout: int = None,
|
timeout: int = None,
|
||||||
connector: BaseConnector = None,
|
connector: BaseConnector = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
proxies: dict = {},
|
proxies=None,
|
||||||
impersonate = None,
|
impersonate = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
|
if proxies is None:
|
||||||
|
proxies = {}
|
||||||
|
if headers is None:
|
||||||
|
headers = {}
|
||||||
if impersonate:
|
if impersonate:
|
||||||
headers = {
|
headers = {
|
||||||
**DEFAULT_HEADERS,
|
**DEFAULT_HEADERS,
|
||||||
|
|
@ -49,7 +53,7 @@ class StreamSession():
|
||||||
}
|
}
|
||||||
connect = None
|
connect = None
|
||||||
if isinstance(timeout, tuple):
|
if isinstance(timeout, tuple):
|
||||||
connect, timeout = timeout;
|
connect, timeout = timeout
|
||||||
if timeout is not None:
|
if timeout is not None:
|
||||||
timeout = ClientTimeout(timeout, connect)
|
timeout = ClientTimeout(timeout, connect)
|
||||||
if proxy is None:
|
if proxy is None:
|
||||||
|
|
|
||||||
|
|
@ -18,4 +18,3 @@ python-multipart
|
||||||
a2wsgi
|
a2wsgi
|
||||||
python-dotenv
|
python-dotenv
|
||||||
ddgs
|
ddgs
|
||||||
aiofile
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue