feat: add LM Arena provider, async‑ify Copilot & surface follow‑up suggestions

* **Provider/Blackbox.py**
  * Raise `RateLimitError` when `"You have reached your request limit for the hour"` substring is detected
* **Provider/Copilot.py**
  * Convert class to `AsyncGeneratorProvider`; rename `create_completion` → `create_async_generator`
  * Swap `curl_cffi.requests.Session` for `AsyncSession`; reduce default timeout to **30 s**
  * Fully async websocket flow (`await session.ws_connect`, `await wss.send/recv/close`)
  * Emit new response types: `TitleGeneration`, `SourceLink`, aggregated `Sources`
  * Track request completion with `done` flag; collect citations in `sources` dict
* **Provider/DuckDuckGo.py**
  * Replace `duckduckgo_search.DDGS` with `duckai.DuckAI`
  * Change base class to `AbstractProvider`; drop nodriver‑based auth
* **Provider/PollinationsAI.py**
  * Re‑build text/audio model lists ensuring uniqueness; remove unused `extra_text_models`
  * Fix image seed logic (`i==1` for first retry); propagate streaming `error` field via `ResponseError`
* **Provider/hf_space**
  * **New file** `LMArenaProvider.py` implementing async queue/stream client
  * Register `LMArenaProvider` in `hf_space/__init__.py`; delete `G4F` import
* **Provider/needs_auth/CopilotAccount.py**
  * Inherit order changed to `Copilot, AsyncAuthedProvider`
  * Refactor token & cookie propagation; add `cookies_to_dict` helper
* **Provider/needs_auth/OpenaiChat.py**
  * Parse reasoning thoughts/summary; yield `Reasoning` responses
  * Tighten access‑token validation and nodriver JS evaluations (`return_by_value`)
  * Extend `Conversation` with `p` and `thoughts_summary`
* **providers/response.py**
  * Add `SourceLink` response class returning single formatted citation link
* **providers/base_provider.py**
  * Serialize `AuthResult` with custom `json.dump` to handle non‑serializable fields
  * Gracefully skip empty cache files when loading auth data
* **image/copy_images.py**
  * Ignore file extensions longer than 4 chars when inferring type
* **requests/__init__.py**
  * Use `return_by_value=True` for `navigator.userAgent` extraction
* **models.py**
  * Remove `G4F` from model provider lists; update `janus_pro_7b` best providers
* **GUI server/api.py**
  * Stream `SuggestedFollowups` to client (`"suggestions"` event)
* **GUI static assets**
  * **style.css**: bold chat title, add `.suggestions` styles, remove padding from `.chat-body`
  * **chat.v1.js**
    * Capture `suggestions` packets, render buttons, and send as quick replies
    * Re‑order finish‑reason logic; adjust token count placement and system‑prompt toggling
  * **chat-top-panel / footer** interactions updated accordingly
* **gui/client/static/js/chat.v1.js** & **css** further UI refinements (scroll handling, token counting, hide prompt toggle)
* Minor updates across multiple files to match new async interfaces and headers (`userAgent`, `raise_for_status`)
This commit is contained in:
hlohaus 2025-04-17 01:21:58 +02:00
parent 323765d810
commit 06546649db
19 changed files with 473 additions and 212 deletions

View file

@ -26,6 +26,10 @@ from typing import Optional, Dict, Any, List, Tuple
from g4f.client import Client
from g4f.models import ModelUtils
import g4f.Provider
from g4f import debug
debug.logging = True
# Constants
DEFAULT_MODEL = "claude-3.7-sonnet"
@ -184,16 +188,21 @@ def generate_commit_message(diff_text: str, model: str = DEFAULT_MODEL) -> Optio
# Make API call
response = client.chat.completions.create(
prompt,
model=model,
messages=[{"role": "user", "content": prompt}]
stream=True,
)
# Stop spinner and clear line
spinner.set()
sys.stdout.write("\r" + " " * 50 + "\r")
sys.stdout.flush()
return response.choices[0].message.content.strip()
content = []
for chunk in response:
# Stop spinner and clear line
if spinner:
spinner.set()
print(" " * 50 + "\n", flush=True)
spinner = None
if isinstance(chunk.choices[0].delta.content, str):
content.append(chunk.choices[0].delta.content)
print(chunk.choices[0].delta.content, end="", flush=True)
return "".join(content).strip()
except Exception as e:
# Stop spinner if it's running
if 'spinner' in locals() and spinner:
@ -306,11 +315,6 @@ def main():
print("Failed to generate commit message after multiple attempts.")
sys.exit(1)
print("\nGenerated commit message:")
print("-" * 50)
print(commit_message)
print("-" * 50)
if args.edit:
print("\nOpening editor to modify commit message...")
commit_message = edit_commit_message(commit_message)

View file

@ -19,7 +19,7 @@ from ..cookies import get_cookies_dir
from .helper import format_image_prompt
from ..providers.response import JsonConversation, ImageResponse
from ..tools.media import merge_media
from ..errors import PaymentRequiredError
from ..errors import RateLimitError
from .. import debug
class Conversation(JsonConversation):
@ -690,8 +690,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async for chunk in response.content.iter_any():
if chunk:
chunk_text = chunk.decode()
if chunk_text == "You have reached your request limit for the hour":
raise PaymentRequiredError(chunk_text)
if "You have reached your request limit for the hour" in chunk_text:
raise RateLimitError(chunk_text)
full_response.append(chunk_text)
# Only yield chunks for non-image models
if model != cls.default_image_model:

View file

@ -3,11 +3,10 @@ from __future__ import annotations
import os
import json
import asyncio
import base64
from urllib.parse import quote
try:
from curl_cffi.requests import Session
from curl_cffi.requests import AsyncSession
from curl_cffi import CurlWsFlag
has_curl_cffi = True
except ImportError:
@ -18,14 +17,12 @@ try:
except ImportError:
has_nodriver = False
from .base_provider import AbstractProvider, ProviderModelMixin
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt_max_length
from .openai.har_file import get_headers, get_har_files
from ..typing import CreateResult, Messages, MediaListType
from ..typing import AsyncResult, Messages, MediaListType
from ..errors import MissingRequirementsError, NoValidHarFileError, MissingAuthError
from ..requests.raise_for_status import raise_for_status
from ..providers.response import BaseConversation, JsonConversation, RequestLogin, ImageResponse, FinishReason, SuggestedFollowups
from ..providers.asyncio import get_running_loop
from ..providers.response import BaseConversation, JsonConversation, RequestLogin, ImageResponse, FinishReason, SuggestedFollowups, TitleGeneration, Sources, SourceLink
from ..tools.media import merge_media
from ..requests import get_nodriver
from ..image import to_bytes, is_accepted_format
@ -38,7 +35,7 @@ class Conversation(JsonConversation):
def __init__(self, conversation_id: str):
self.conversation_id = conversation_id
class Copilot(AbstractProvider, ProviderModelMixin):
class Copilot(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Copilot"
url = "https://copilot.microsoft.com"
@ -62,20 +59,20 @@ class Copilot(AbstractProvider, ProviderModelMixin):
_cookies: dict = None
@classmethod
def create_completion(
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
timeout: int = 900,
timeout: int = 30,
prompt: str = None,
media: MediaListType = None,
conversation: BaseConversation = None,
return_conversation: bool = False,
api_key: str = None,
**kwargs
) -> CreateResult:
) -> AsyncResult:
if not has_curl_cffi:
raise MissingRequirementsError('Install or update "curl_cffi" package | pip install -U curl_cffi')
model = cls.get_model(model)
@ -91,14 +88,13 @@ class Copilot(AbstractProvider, ProviderModelMixin):
debug.log(f"Copilot: {h}")
if has_nodriver:
yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
get_running_loop(check_nested=True)
cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy))
cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy)
else:
raise h
websocket_url = f"{websocket_url}&accessToken={quote(cls._access_token)}"
headers = {"authorization": f"Bearer {cls._access_token}"}
with Session(
async with AsyncSession(
timeout=timeout,
proxy=proxy,
impersonate="chrome",
@ -107,31 +103,17 @@ class Copilot(AbstractProvider, ProviderModelMixin):
) as session:
if cls._access_token is not None:
cls._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
# if cls._access_token is None:
# try:
# url = "https://copilot.microsoft.com/cl/eus-sc/collect"
# headers = {
# "Accept": "application/x-clarity-gzip",
# "referrer": "https://copilot.microsoft.com/onboarding"
# }
# response = session.post(url, headers=headers, data=get_clarity())
# clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"]
# debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}")
# except Exception as e:
# debug.log(f"Copilot: {e}")
# else:
# clarity_token = None
response = session.get("https://copilot.microsoft.com/c/api/user")
response = await session.get("https://copilot.microsoft.com/c/api/user")
if response.status_code == 401:
raise MissingAuthError("Status 401: Invalid access token")
raise_for_status(response)
response.raise_for_status()
user = response.json().get('firstName')
if user is None:
cls._access_token = None
debug.log(f"Copilot: User: {user or 'null'}")
if conversation is None:
response = session.post(cls.conversation_url)
raise_for_status(response)
response = await session.post(cls.conversation_url)
response.raise_for_status()
conversation_id = response.json().get("id")
conversation = Conversation(conversation_id)
if return_conversation:
@ -146,28 +128,24 @@ class Copilot(AbstractProvider, ProviderModelMixin):
debug.log(f"Copilot: Use conversation: {conversation_id}")
uploaded_images = []
media, _ = [(None, None), *merge_media(media, messages)].pop()
if media:
for media, _ in merge_media(media, messages):
if not isinstance(media, str):
data = to_bytes(media)
response = session.post(
response = await session.post(
"https://copilot.microsoft.com/c/api/attachments",
headers={"content-type": is_accepted_format(data)},
headers={
"content-type": is_accepted_format(data),
"content-length": str(len(data)),
},
data=data
)
raise_for_status(response)
response.raise_for_status()
media = response.json().get("url")
uploaded_images.append({"type":"image", "url": media})
wss = session.ws_connect(cls.websocket_url)
# if clarity_token is not None:
# wss.send(json.dumps({
# "event": "challengeResponse",
# "token": clarity_token,
# "method":"clarity"
# }).encode(), CurlWsFlag.TEXT)
wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
wss.send(json.dumps({
wss = await session.ws_connect(cls.websocket_url, timeout=3)
await wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
await wss.send(json.dumps({
"event": "send",
"conversationId": conversation_id,
"content": [*uploaded_images, {
@ -177,20 +155,20 @@ class Copilot(AbstractProvider, ProviderModelMixin):
"mode": "reasoning" if "Think" in model else "chat",
}).encode(), CurlWsFlag.TEXT)
is_started = False
done = False
msg = None
image_prompt: str = None
last_msg = None
sources = {}
try:
while True:
while not wss.closed:
try:
msg = wss.recv()[0]
msg = json.loads(msg)
msg = await asyncio.wait_for(wss.recv(), 3 if done else timeout)
msg = json.loads(msg[0])
except:
break
last_msg = msg
if msg.get("event") == "appendText":
is_started = True
yield msg.get("text")
elif msg.get("event") == "generatingImage":
image_prompt = msg.get("prompt")
@ -198,20 +176,28 @@ class Copilot(AbstractProvider, ProviderModelMixin):
yield ImageResponse(msg.get("url"), image_prompt, {"preview": msg.get("thumbnailUrl")})
elif msg.get("event") == "done":
yield FinishReason("stop")
break
done = True
elif msg.get("event") == "suggestedFollowups":
yield SuggestedFollowups(msg.get("suggestions"))
break
elif msg.get("event") == "replaceText":
yield msg.get("text")
elif msg.get("event") == "titleUpdate":
yield TitleGeneration(msg.get("title"))
elif msg.get("event") == "citation":
sources[msg.get("url")] = msg
yield SourceLink(list(sources.keys()).index(msg.get("url")), msg.get("url"))
elif msg.get("event") == "error":
raise RuntimeError(f"Error: {msg}")
elif msg.get("event") not in ["received", "startMessage", "citation", "partCompleted"]:
elif msg.get("event") not in ["received", "startMessage", "partCompleted"]:
debug.log(f"Copilot Message: {msg}")
if not is_started:
if not done:
raise RuntimeError(f"Invalid response: {last_msg}")
if sources:
yield Sources(sources.values())
finally:
wss.close()
if not wss.closed:
await wss.close()
async def get_access_token_and_cookies(url: str, proxy: str = None, target: str = "ChatAI",):
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="copilot")
@ -263,9 +249,4 @@ def readHAR(url: str):
if api_key is None:
raise NoValidHarFileError("No access token found in .har files")
return api_key, cookies
def get_clarity() -> bytes:
#{"e":["0.7.58",5,7284,4779,"n59ae4ieqq","aln5en","1upufhz",1,0,0],"a":[[7323,12,65,217,324],[7344,12,65,214,329],[7385,12,65,211,334],[7407,12,65,210,337],[7428,12,65,209,338],[7461,12,65,209,339],[7497,12,65,209,339],[7531,12,65,208,340],[7545,12,65,208,342],[11654,13,65,208,342],[11728,14,65,208,342],[11728,9,65,208,342,17535,19455,0,0,0,"Annehmen",null,"52w7wqv1r.8ovjfyrpu",1],[7284,4,1,393,968,393,968,0,0,231,310,939,0],[12063,0,2,147,3,4,4,18,5,1,10,79,25,15],[12063,36,6,[11938,0]]]}
body = base64.b64decode("H4sIAAAAAAAAA23RwU7DMAwG4HfJ2aqS2E5ibjxH1cMOnQYqYZvUTQPx7vyJRGGAemj01XWcP+9udg+j80MetDhSyrEISc5GrqrtZnmaTydHbrdUnSsWYT2u+8Obo0Ce/IQvaDBmjkwhUlKKIRNHmQgosqEArWPRDQMx90rxeUMPzB1j+UJvwNIxhTvsPcXyX1T+rizE4juK3mEEhpAUg/JvzW1/+U/tB1LATmhqotoiweMea50PLy2vui4LOY3XfD1dwnkor5fn/e18XBFgm6fHjSzZmCyV7d3aRByAEYextaTHEH3i5pgKGVP/s+DScE5PuLKIpW6FnCi1gY3Rbpqmj0/DI/+L7QEAAA==")
return body
return api_key, cookies

View file

@ -3,28 +3,21 @@ from __future__ import annotations
import asyncio
try:
from duckduckgo_search import DDGS
from duckduckgo_search.exceptions import DuckDuckGoSearchException, RatelimitException, ConversationLimitException
from duckai import DuckAI
has_requirements = True
except ImportError:
has_requirements = False
try:
import nodriver
has_nodriver = True
except ImportError:
has_nodriver = False
from ..typing import AsyncResult, Messages
from ..requests import get_nodriver
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import CreateResult, Messages
from .base_provider import AbstractProvider, ProviderModelMixin
from .helper import get_last_user_message
class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
class DuckDuckGo(AbstractProvider, ProviderModelMixin):
label = "Duck.ai (duckduckgo_search)"
url = "https://duckduckgo.com/aichat"
api_base = "https://duckduckgo.com/duckchat/v1/"
working = False
working = has_requirements
supports_stream = True
supports_system_message = True
supports_message_history = True
@ -32,7 +25,7 @@ class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "gpt-4o-mini"
models = [default_model, "meta-llama/Llama-3.3-70B-Instruct-Turbo", "claude-3-haiku-20240307", "o3-mini", "mistralai/Mistral-Small-24B-Instruct-2501"]
ddgs: DDGS = None
duck_ai: DuckAI = None
model_aliases = {
"gpt-4": "gpt-4o-mini",
@ -42,44 +35,17 @@ class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
}
@classmethod
async def create_async_generator(
def create_completion(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 60,
**kwargs
) -> AsyncResult:
) -> CreateResult:
if not has_requirements:
raise ImportError("duckduckgo_search is not installed. Install it with `pip install duckduckgo-search`.")
if cls.ddgs is None:
cls.ddgs = DDGS(proxy=proxy, timeout=timeout)
if has_nodriver:
await cls.nodriver_auth(proxy=proxy)
raise ImportError("duckai is not installed. Install it with `pip install -U duckai`.")
if cls.duck_ai is None:
cls.duck_ai = DuckAI(proxy=proxy, timeout=timeout)
model = cls.get_model(model)
for chunk in cls.ddgs.chat_yield(get_last_user_message(messages), model, timeout):
yield chunk
@classmethod
async def nodriver_auth(cls, proxy: str = None):
browser, stop_browser = await get_nodriver(proxy=proxy)
try:
page = browser.main_tab
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
if cls.api_base in event.request.url:
if "X-Vqd-4" in event.request.headers:
cls.ddgs._chat_vqd = event.request.headers["X-Vqd-4"]
if "X-Vqd-Hash-1" in event.request.headers:
cls.ddgs._chat_vqd_hash = event.request.headers["X-Vqd-Hash-1"]
if "F-Fe-Version" in event.request.headers:
cls.ddgs._chat_xfe = event.request.headers["F-Fe-Version" ]
await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
page = await browser.get(cls.url)
while True:
if cls.ddgs._chat_vqd:
break
await asyncio.sleep(1)
await page.close()
finally:
stop_browser()
yield cls.duck_ai.chat(get_last_user_message(messages), model, timeout)

View file

@ -52,8 +52,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
audio_models = [default_audio_model]
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"]
extra_text_models = vision_models
_models_loaded = False
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
model_aliases = {
### Text Models ###
"gpt-4o-mini": "openai",
@ -100,43 +100,32 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
cls.image_models = all_image_models
# Update of text models
text_response = requests.get("https://text.pollinations.ai/models")
text_response.raise_for_status()
models = text_response.json()
# Purpose of text models
cls.text_models = [
model.get("name")
for model in models
if "input_modalities" in model and "text" in model["input_modalities"]
]
# Purpose of audio models
cls.audio_models = {
model.get("name"): model.get("voices")
for model in models
if model.get("audio")
if "output_modalities" in model and "audio" in model["output_modalities"]
}
# Create a set of unique text models starting with default model
unique_text_models = {cls.default_model}
unique_text_models = cls.text_models.copy()
# Add models from vision_models
unique_text_models.update(cls.vision_models)
unique_text_models.extend(cls.vision_models)
# Add models from the API response
for model in models:
model_name = model.get("name")
if model_name and "input_modalities" in model and "text" in model["input_modalities"]:
unique_text_models.add(model_name)
unique_text_models.append(model_name)
# Convert to list and update text_models
cls.text_models = list(unique_text_models)
# Update extra_text_models with unique vision models
cls.extra_text_models = [model for model in cls.vision_models if model != cls.default_model]
cls.text_models = list(dict.fromkeys(unique_text_models))
cls._models_loaded = True
except Exception as e:
@ -148,12 +137,10 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
debug.error(f"Failed to fetch models: {e}")
# Return unique models across all categories
all_models = set(cls.text_models)
all_models.update(cls.image_models)
all_models.update(cls.audio_models.keys())
result = list(all_models)
return result
all_models = cls.text_models.copy()
all_models.extend(cls.image_models)
all_models.extend(cls.audio_models.keys())
return list(dict.fromkeys(all_models))
@classmethod
async def create_async_generator(
@ -265,15 +252,15 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None)
prompt = quote_plus(prompt)[:2048-256-len(query)]
url = f"{cls.image_api_endpoint}prompt/{prompt}?{query}"
def get_image_url(i: int = 0, seed: Optional[int] = None):
if i == 0:
def get_image_url(i: int, seed: Optional[int] = None):
if i == 1:
if not cache and seed is None:
seed = random.randint(0, 2**32)
else:
seed = random.randint(0, 2**32)
return f"{url}&seed={seed}" if seed else url
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
async def get_image(i: int = 0, seed: Optional[int] = None):
async def get_image(i: int, seed: Optional[int] = None):
async with session.get(get_image_url(i, seed), allow_redirects=False) as response:
try:
await raise_for_status(response)
@ -343,6 +330,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
if line[6:].startswith(b"[DONE]"):
break
result = json.loads(line[6:])
if "error" in result:
raise ResponseError(result["error"].get("message", result["error"]))
if "usage" in result:
yield Usage(**result["usage"])
choices = result.get("choices", [{}])

View file

@ -0,0 +1,251 @@
from __future__ import annotations
import json
import uuid
import asyncio
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...providers.response import FinishReason
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin
from ..helper import format_prompt
from ... import debug
class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "LM Arena"
url = "https://lmarena.ai"
api_endpoint = "/queue/join?"
working = True
default_model = "chatgpt-4o-latest-20250326"
model_aliases = {"gpt-4o": default_model}
models = [
default_model,
"gpt-4.1-2025-04-14",
"gemini-2.5-pro-exp-03-25",
"llama-4-maverick-03-26-experimental",
"grok-3-preview-02-24",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-20250219-thinking-32k",
"deepseek-v3-0324",
"llama-4-maverick-17b-128e-instruct",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05",
"gemma-3-27b-it",
"gemma-3-12b-it",
"gemma-3-4b-it",
"deepseek-r1",
"claude-3-5-sonnet-20241022",
"o3-mini",
"llama-3.3-70b-instruct",
"gpt-4o-mini-2024-07-18",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"command-a-03-2025",
"qwq-32b",
"p2l-router-7b",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-20240620",
"doubao-1.5-pro-32k-250115",
"doubao-1.5-vision-pro-32k-250115",
"mistral-small-24b-instruct-2501",
"phi-4",
"amazon-nova-pro-v1.0",
"amazon-nova-lite-v1.0",
"amazon-nova-micro-v1.0",
"cobalt-exp-beta-v3",
"cobalt-exp-beta-v4",
"qwen-max-2025-01-25",
"qwen-plus-0125-exp",
"qwen2.5-vl-32b-instruct",
"qwen2.5-vl-72b-instruct",
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-1.5-flash-8b-001",
"gemini-1.5-pro-001",
"gemini-1.5-flash-001",
"llama-3.1-405b-instruct-bf16",
"llama-3.3-nemotron-49b-super-v1",
"llama-3.1-nemotron-ultra-253b-v1",
"llama-3.1-nemotron-70b-instruct",
"llama-3.1-70b-instruct",
"llama-3.1-8b-instruct",
"hunyuan-standard-2025-02-10",
"hunyuan-large-2025-02-10",
"hunyuan-standard-vision-2024-12-31",
"hunyuan-turbo-0110",
"hunyuan-turbos-20250226",
"mistral-large-2411",
"pixtral-large-2411",
"mistral-large-2407",
"llama-3.1-nemotron-51b-instruct",
"granite-3.1-8b-instruct",
"granite-3.1-2b-instruct",
"step-2-16k-exp-202412",
"step-2-16k-202502",
"step-1o-vision-32k-highres",
"yi-lightning",
"glm-4-plus",
"glm-4-plus-0111",
"jamba-1.5-large",
"jamba-1.5-mini",
"gemma-2-27b-it",
"gemma-2-9b-it",
"gemma-2-2b-it",
"eureka-chatbot",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"nemotron-4-340b",
"llama-3-70b-instruct",
"llama-3-8b-instruct",
"qwen2.5-plus-1127",
"qwen2.5-coder-32b-instruct",
"qwen2.5-72b-instruct",
"qwen-max-0919",
"qwen-vl-max-1119",
"qwen-vl-max-0809",
"llama-3.1-tulu-3-70b",
"olmo-2-0325-32b-instruct",
"gpt-3.5-turbo-0125",
"reka-core-20240904",
"reka-flash-20240904",
"c4ai-aya-expanse-32b",
"c4ai-aya-expanse-8b",
"c4ai-aya-vision-32b",
"command-r-plus-08-2024",
"command-r-08-2024",
"codestral-2405",
"mixtral-8x22b-instruct-v0.1",
"mixtral-8x7b-instruct-v0.1",
"pixtral-12b-2409",
"ministral-8b-2410"]
_args: dict = None
@staticmethod
def _random_session_hash():
return str(uuid.uuid4())
@classmethod
def _build_payloads(cls, model_id: str, session_hash: str, messages: Messages, max_tokens: int, temperature: float, top_p: float):
first_payload = {
"data": [
None,
model_id,
{"text": format_prompt(messages), "files": []},
{
"text_models": [model_id],
"all_text_models": [model_id],
"vision_models": [],
"all_vision_models": [],
"image_gen_models": [],
"all_image_gen_models": [],
"search_models": [],
"all_search_models": [],
"models": [model_id],
"all_models": [model_id],
"arena_type": "text-arena"
}
],
"event_data": None,
"fn_index": 117,
"trigger_id": 159,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 118,
"trigger_id": 159,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 119,
"trigger_id": 159,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
max_tokens: int = 2048,
temperature: float = 0.7,
top_p: float = 1,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = cls.default_model
if model in cls.model_aliases:
model = cls.model_aliases[model]
session_hash = cls._random_session_hash()
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
async with StreamSession(impersonate="chrome", headers=headers) as session:
first_payload, second_payload, third_payload = cls._build_payloads(model, session_hash, messages, max_tokens, temperature, top_p)
# Long stream GET
async def long_stream():
# POST 1
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy) as response:
await raise_for_status(response)
# POST 2
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy) as response:
await raise_for_status(response)
# POST 3
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy) as response:
await raise_for_status(response)
stream_url = f"{cls.url}/queue/data?session_hash={session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
text_position = 0
count = 0
async for line in response.iter_lines():
if line.startswith(b"data: "):
try:
msg = json.loads(line[6:])
except Exception as e:
raise RuntimeError(f"Failed to decode JSON from stream: {line}", e)
if msg.get("msg") == "process_generating":
data = msg["output"]["data"][1]
if data:
data = data[0]
if len(data) > 2:
if isinstance(data[2], list):
data[2] = data[2][-1]
content = data[2][text_position:].rstrip("")
if content:
count += 1
yield count, content
text_position += len(content)
elif msg.get("msg") == "close_stream":
break
elif msg.get("msg") not in ("process_completed", "process_starts", "estimation"):
debug.log(f"Unexpected message: {msg}")
count = 0
async for count, chunk in long_stream():
yield chunk
if count == 0:
await asyncio.sleep(10)
async for count, chunk in long_stream():
yield chunk
if count == 0:
raise RuntimeError("No response from server.")
if count == max_tokens:
yield FinishReason("length")

View file

@ -10,7 +10,7 @@ from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
from .G4F import G4F
from .LMArenaProvider import LMArenaProvider
from .Microsoft_Phi_4 import Microsoft_Phi_4
from .Qwen_QVQ_72B import Qwen_QVQ_72B
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
@ -33,7 +33,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
BlackForestLabs_Flux1Schnell,
CohereForAI_C4AI_Command,
DeepseekAI_JanusPro7b,
G4F,
LMArenaProvider,
Microsoft_Phi_4,
Qwen_QVQ_72B,
Qwen_Qwen_2_5,

View file

@ -10,10 +10,7 @@ from ...typing import AsyncResult, Messages
from ...errors import NoValidHarFileError
from ... import debug
def cookies_to_dict():
return Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies}
class CopilotAccount(AsyncAuthedProvider, Copilot):
class CopilotAccount(Copilot, AsyncAuthedProvider):
needs_auth = True
use_nodriver = True
parent = "Copilot"
@ -23,17 +20,17 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
try:
Copilot._access_token, Copilot._cookies = readHAR(cls.url)
cls._access_token, cls._cookies = readHAR(cls.url)
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
if has_nodriver:
yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
Copilot._access_token, Copilot._cookies = await get_access_token_and_cookies(cls.url, proxy)
cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy)
else:
raise h
yield AuthResult(
api_key=Copilot._access_token,
cookies=cookies_to_dict()
api_key=cls._access_token,
cookies=cls.cookies_to_dict()
)
@classmethod
@ -44,9 +41,12 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):
auth_result: AuthResult,
**kwargs
) -> AsyncResult:
Copilot._access_token = getattr(auth_result, "api_key")
Copilot._cookies = getattr(auth_result, "cookies")
Copilot.needs_auth = cls.needs_auth
for chunk in Copilot.create_completion(model, messages, **kwargs):
cls._access_token = getattr(auth_result, "api_key")
cls._cookies = getattr(auth_result, "cookies")
async for chunk in cls.create_async_generator(model, messages, **kwargs):
yield chunk
auth_result.cookies = cookies_to_dict()
auth_result.cookies = cls.cookies_to_dict()
@classmethod
def cookies_to_dict(cls):
return cls._cookies if isinstance(cls._cookies, dict) else {c.name: c.value for c in cls._cookies}

View file

@ -146,7 +146,7 @@ async def get_access_token_and_user_agent(url: str, proxy: str = None):
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="designer")
try:
page = await browser.get(url)
user_agent = await page.evaluate("navigator.userAgent")
user_agent = await page.evaluate("navigator.userAgent", return_by_value=True)
access_token = None
while access_token is None:
access_token = await page.evaluate("""

View file

@ -489,6 +489,16 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
if "type" in line:
if line["type"] == "title_generation":
yield TitleGeneration(line["title"])
fields.p = line.get("p", fields.p)
if fields.p.startswith("/message/content/thoughts"):
if fields.p.endswith("/content"):
if fields.thoughts_summary:
yield Reasoning(token="", status=fields.thoughts_summary)
fields.thoughts_summary = ""
yield Reasoning(token=line.get("v"))
elif fields.p.endswith("/summary"):
fields.thoughts_summary += line.get("v")
return
if "v" in line:
v = line.get("v")
if isinstance(v, str) and fields.is_recipient:
@ -502,7 +512,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
for entry in [p.get("entries") for p in m.get("v")]:
for link in entry:
sources.add_source(link)
elif re.match(r"^/message/metadata/content_references/\d+$", m.get("p")):
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+$", m.get("p")):
sources.add_source(m.get("v"))
elif m.get("p") == "/message/metadata/finished_text":
fields.is_thinking = False
@ -578,11 +588,14 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
cls._set_api_key(api_key)
else:
try:
await get_request_config(cls.request_config, proxy)
cls.request_config = await get_request_config(cls.request_config, proxy)
if cls.request_config is None:
cls.request_config = RequestConfig()
cls._create_request_args(cls.request_config.cookies, cls.request_config.headers)
if cls.request_config.access_token is not None or cls.needs_auth:
if not cls._set_api_key(cls.request_config.access_token):
raise NoValidHarFileError(f"Access token is not valid: {cls.request_config.access_token}")
if cls.needs_auth and cls.request_config.access_token is None:
raise NoValidHarFileError(f"Missing access token")
if not cls._set_api_key(cls.request_config.access_token):
raise NoValidHarFileError(f"Access token is not valid: {cls.request_config.access_token}")
except NoValidHarFileError:
if has_nodriver:
if cls._api_key is None:
@ -622,15 +635,18 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
page = await browser.get(cls.url)
user_agent = await page.evaluate("window.navigator.userAgent")
await page.select("#prompt-textarea", 240)
await page.evaluate("document.getElementById('prompt-textarea').innerText = 'Hello'")
await page.select("[data-testid=\"send-button\"]", 30)
user_agent = await page.evaluate("window.navigator.userAgent", return_by_value=True)
while not await page.evaluate("document.getElementById('prompt-textarea').id"):
await asyncio.sleep(1)
while not await page.evaluate("document.querySelector('[data-testid=\"send-button\"]').type"):
await asyncio.sleep(1)
await page.evaluate("document.querySelector('[data-testid=\"send-button\"]').click()")
while True:
body = await page.evaluate("JSON.stringify(window.__remixContext)")
body = await page.evaluate("JSON.stringify(window.__remixContext)", return_by_value=True)
if hasattr(body, "value"):
body = body.value
if body:
match = re.search(r'"accessToken":"(.*?)"', body)
match = re.search(r'"accessToken":"(.+?)"', body)
if match:
cls._api_key = match.group(1)
break
@ -674,6 +690,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
@classmethod
def _set_api_key(cls, api_key: str):
cls._api_key = api_key
if api_key:
exp = api_key.split(".")[1]
exp = (exp + "=" * (4 - len(exp) % 4)).encode()
@ -681,11 +698,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
debug.log(f"OpenaiChat: API key expires at\n {cls._expires} we have:\n {time.time()}")
if time.time() > cls._expires:
debug.log(f"OpenaiChat: API key is expired")
return False
else:
cls._api_key = api_key
cls._headers["authorization"] = f"Bearer {api_key}"
return True
return False
return True
@classmethod
def _update_cookie_header(cls):
@ -704,6 +721,8 @@ class Conversation(JsonConversation):
self.parent_message_id = message_id if parent_message_id is None else parent_message_id
self.user_id = user_id
self.is_thinking = is_thinking
self.p = None
self.thoughts_summary = ""
def get_cookies(
urls: Optional[Iterator[str]] = None

View file

@ -86,8 +86,6 @@ def readHAR(request_config: RequestConfig):
request_config.cookies = {c['name']: c['value'] for c in v['request']['cookies']}
except Exception as e:
debug.log(f"Error on read headers: {e}")
if request_config.proof_token is None:
raise NoValidHarFileError("No proof_token found in .har files")
def get_headers(entry) -> dict:
return {h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')}
@ -152,8 +150,9 @@ def getN() -> str:
return base64.b64encode(timestamp.encode()).decode()
async def get_request_config(request_config: RequestConfig, proxy: str) -> RequestConfig:
if request_config.proof_token is None:
readHAR(request_config)
readHAR(request_config)
if request_config.arkose_request is not None:
request_config.arkose_token = await sendRequest(genArkReq(request_config.arkose_request), proxy)
if request_config.proof_token is None:
raise NoValidHarFileError("No proof_token found in .har files")
return request_config

View file

@ -1533,6 +1533,7 @@ form textarea {
.chat-top-panel .convo-title {
margin: 0 10px;
font-size: 14px;
font-weight: bold;
text-align: center;
flex: 1;
}
@ -1581,7 +1582,6 @@ form textarea {
}
.chat-body {
flex: 1;
padding: 10px;
overflow-y: auto;
display: flex;
flex-direction: column;
@ -1613,11 +1613,25 @@ form textarea {
border: 1px dashed var(--conversations);
box-shadow: 1px 1px 1px 0px rgba(0,0,0,0.75);
}
.white .chat-footer .send-buttons button {
.suggestions {
display: flex;
gap: 6px;
flex-wrap: wrap;
}
.suggestions .suggestion {
background: var(--blur-bg);
color: var(--colour-3);
padding: 10px;
margin: 0 2px 0 4px;
border-radius: 5px;
cursor: pointer;
border: 1px dashed var(--conversations);
}
.white .chat-footer .send-buttons button, .white .suggestions .suggestion {
border-style: solid;
border-color: var(--blur-border);
}
.chat-footer .send-buttons button:hover {
.chat-footer .send-buttons button:hover, .suggestions .suggestion:hover {
border-style: solid;
box-shadow: none;
background-color: var(--button-hover);

View file

@ -48,6 +48,7 @@ let wakeLock = null;
let countTokensEnabled = true;
let reloadConversation = true;
let privateConversation = null;
let suggestions = null;
userInput.addEventListener("blur", () => {
document.documentElement.scrollTop = 0;
@ -933,9 +934,7 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
} else if (message.type == "login") {
update_message(content_map, message_id, markdown_render(message.login), scroll);
} else if (message.type == "finish") {
if (!finish_storage[message_id]) {
finish_storage[message_id] = message.finish;
}
finish_storage[message_id] = message.finish;
} else if (message.type == "usage") {
usage_storage[message_id] = message.usage;
} else if (message.type == "reasoning") {
@ -958,6 +957,8 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
Object.entries(message.parameters).forEach(([key, value]) => {
parameters_storage[provider][key] = value;
});
} else if (message.type == "suggestions") {
suggestions = message.suggestions;
}
}
@ -998,6 +999,9 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
if (scroll) {
await lazy_scroll_to_bottom();
}
let suggestions_el = chatBody.querySelector('.suggestions');
suggestions_el ? suggestions_el.remove() : null;
if (countTokensEnabled) {
let count_total = chatBody.querySelector('.count_total');
count_total ? count_total.parentElement.removeChild(count_total) : null;
@ -1507,7 +1511,7 @@ const load_conversation = async (conversation, scroll=true) => {
} else if (reason == "stop" && buffer.split("```").length - 1 % 2 === 1) {
reason = "length";
}
if (reason == "length" || reason == "max_tokens" || reason == "error") {
if (reason != "stop") {
actions.push("continue")
}
}
@ -1578,8 +1582,23 @@ const load_conversation = async (conversation, scroll=true) => {
</div>
`);
});
chatBody.innerHTML = elements.join("");
if (countTokensEnabled && window.GPTTokenizer_cl100k_base) {
if (suggestions) {
const suggestions_el = document.createElement("div");
suggestions_el.classList.add("suggestions");
suggestions.forEach((suggestion)=> {
const el = document.createElement("button");
el.classList.add("suggestion");
el.innerHTML = `<span>${escapeHtml(suggestion)}</span> <i class="fa-solid fa-turn-up"></i>`;
el.onclick = async () => {
await handle_ask(true, suggestion);
}
suggestions_el.appendChild(el);
});
chatBody.appendChild(suggestions_el);
suggestions = null;
} else if (countTokensEnabled && window.GPTTokenizer_cl100k_base) {
const has_media = messages.filter((item)=>Array.isArray(item.content)).length > 0;
if (!has_media) {
const filtered = prepare_messages(messages, null, true, false);
@ -1587,13 +1606,15 @@ const load_conversation = async (conversation, scroll=true) => {
last_model = last_model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4"
let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length
if (count_total > 0) {
elements.push(`<div class="count_total">(${count_total} total tokens)</div>`);
const count_total_el = document.createElement("div");
count_total_el.classList.add("count_total");
count_total_el.innerText = `(${count_total} total tokens)`;
chatBody.appendChild(count_total_el);
}
}
}
}
chatBody.innerHTML = elements.join("");
await register_message_buttons();
highlight(chatBody);
regenerate_button.classList.remove("regenerate-hidden");
@ -2484,8 +2505,14 @@ async function on_api() {
const hide_systemPrompt = document.getElementById("hide-systemPrompt")
const slide_systemPrompt_icon = document.querySelector(".slide-header i");
document.querySelector(".slide-header")?.addEventListener("click", () => {
const checked = slide_systemPrompt_icon.classList.contains("fa-angles-up");
chatPrompt.classList[checked ? "add": "remove"]("hidden");
slide_systemPrompt_icon.classList[checked ? "remove": "add"]("fa-angles-up");
slide_systemPrompt_icon.classList[checked ? "add": "remove"]("fa-angles-down");
});
if (hide_systemPrompt.checked) {
chatPrompt.classList.add("hidden");
slide_systemPrompt_icon.click();
}
hide_systemPrompt.addEventListener('change', async (event) => {
if (event.target.checked) {
@ -2494,12 +2521,6 @@ async function on_api() {
chatPrompt.classList.remove("hidden");
}
});
document.querySelector(".slide-header")?.addEventListener("click", () => {
const checked = slide_systemPrompt_icon.classList.contains("fa-angles-up");
chatPrompt.classList[checked ? "add": "remove"]("hidden");
slide_systemPrompt_icon.classList[checked ? "remove": "add"]("fa-angles-up");
slide_systemPrompt_icon.classList[checked ? "add": "remove"]("fa-angles-down");
});
const userInputHeight = document.getElementById("message-input-height");
if (userInputHeight) {
if (userInputHeight.value) {

View file

@ -215,6 +215,8 @@ class Api:
yield self._format_json("content", chunk.to_string())
elif isinstance(chunk, AudioResponse):
yield self._format_json("content", str(chunk))
elif isinstance(chunk, SuggestedFollowups):
yield self._format_json("suggestions", chunk.suggestions)
elif isinstance(chunk, DebugResponse):
yield self._format_json("log", chunk.log)
elif isinstance(chunk, RawResponse):

View file

@ -28,7 +28,7 @@ def get_media_extension(media: str) -> str:
extension = os.path.splitext(path)[1]
if not extension:
extension = os.path.splitext(media)[1]
if not extension:
if not extension or len(extension) > 4:
return ""
if extension[1:] not in EXTENSIONS_MAP:
raise ValueError(f"Unsupported media extension: {extension} in: {media}")

View file

@ -18,7 +18,6 @@ from .Provider import (
Free2GPT,
FreeGpt,
HuggingSpace,
G4F,
Grok,
DeepseekAI_JanusPro7b,
Glider,
@ -535,7 +534,7 @@ deepseek_r1 = Model(
janus_pro_7b = VisionModel(
name = DeepseekAI_JanusPro7b.default_model,
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepseekAI_JanusPro7b, G4F])
best_provider = IterListProvider([DeepseekAI_JanusPro7b])
)
### x.ai ###
@ -985,7 +984,7 @@ demo_models = {
llama_3_2_11b.name: [llama_3_2_11b, [HuggingChat]],
qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]],
deepseek_r1.name: [deepseek_r1, [HuggingFace, PollinationsAI]],
janus_pro_7b.name: [janus_pro_7b, [HuggingSpace, G4F]],
janus_pro_7b.name: [janus_pro_7b, [HuggingSpace]],
command_r.name: [command_r, [HuggingSpace]],
command_r_plus.name: [command_r_plus, [HuggingSpace]],
command_r7b.name: [command_r7b, [HuggingSpace]],

View file

@ -425,9 +425,14 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
if auth_result is not None:
cache_file.parent.mkdir(parents=True, exist_ok=True)
try:
cache_file.write_text(json.dumps(auth_result.get_dict()))
except TypeError:
raise RuntimeError(f"Failed to save: {auth_result.get_dict()}")
def toJSON(obj):
if hasattr(obj, "get_dict"):
return obj.get_dict()
return str(obj)
with cache_file.open("w") as cache_file:
json.dump(auth_result, cache_file, default=toJSON)
except TypeError as e:
raise RuntimeError(f"Failed to save: {auth_result.get_dict()}\n{type(e).__name__}: {e}")
elif cache_file.exists():
cache_file.unlink()
@ -443,7 +448,9 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
try:
if cache_file.exists():
with cache_file.open("r") as f:
auth_result = AuthResult(**json.load(f))
data = f.read()
if data:
auth_result = AuthResult(**json.loads(data))
else:
raise MissingAuthError
yield from to_sync_generator(cls.create_authed(model, messages, auth_result, **kwargs))

View file

@ -240,6 +240,15 @@ class Sources(ResponseType):
for idx, link in enumerate(self.list)
]))
class SourceLink(ResponseType):
def __init__(self, title: str, url: str) -> None:
self.title = title
self.url = url
def __str__(self) -> str:
title = f"[{self.title}]"
return f" {format_link(self.url, title)}"
class YouTube(HiddenResponse):
def __init__(self, ids: List[str]) -> None:
"""Initialize with a list of YouTube IDs."""

View file

@ -103,7 +103,7 @@ async def get_args_from_nodriver(
else:
await browser.cookies.set_all(get_cookie_params_from_dict(cookies, url=url, domain=domain))
page = await browser.get(url)
user_agent = str(await page.evaluate("window.navigator.userAgent"))
user_agent = await page.evaluate("window.navigator.userAgent", return_by_value=True)
await page.wait_for("body:not(.no-js)", timeout=timeout)
if wait_for is not None:
await page.wait_for(wait_for, timeout=timeout)