mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-15 14:51:19 -08:00
* **Provider/Blackbox.py**
* Raise `RateLimitError` when `"You have reached your request limit for the hour"` substring is detected
* **Provider/Copilot.py**
* Convert class to `AsyncGeneratorProvider`; rename `create_completion` → `create_async_generator`
* Swap `curl_cffi.requests.Session` for `AsyncSession`; reduce default timeout to **30 s**
* Fully async websocket flow (`await session.ws_connect`, `await wss.send/recv/close`)
* Emit new response types: `TitleGeneration`, `SourceLink`, aggregated `Sources`
* Track request completion with `done` flag; collect citations in `sources` dict
* **Provider/DuckDuckGo.py**
* Replace `duckduckgo_search.DDGS` with `duckai.DuckAI`
* Change base class to `AbstractProvider`; drop nodriver‑based auth
* **Provider/PollinationsAI.py**
* Re‑build text/audio model lists ensuring uniqueness; remove unused `extra_text_models`
* Fix image seed logic (`i==1` for first retry); propagate streaming `error` field via `ResponseError`
* **Provider/hf_space**
* **New file** `LMArenaProvider.py` implementing async queue/stream client
* Register `LMArenaProvider` in `hf_space/__init__.py`; delete `G4F` import
* **Provider/needs_auth/CopilotAccount.py**
* Inherit order changed to `Copilot, AsyncAuthedProvider`
* Refactor token & cookie propagation; add `cookies_to_dict` helper
* **Provider/needs_auth/OpenaiChat.py**
* Parse reasoning thoughts/summary; yield `Reasoning` responses
* Tighten access‑token validation and nodriver JS evaluations (`return_by_value`)
* Extend `Conversation` with `p` and `thoughts_summary`
* **providers/response.py**
* Add `SourceLink` response class returning single formatted citation link
* **providers/base_provider.py**
* Serialize `AuthResult` with custom `json.dump` to handle non‑serializable fields
* Gracefully skip empty cache files when loading auth data
* **image/copy_images.py**
* Ignore file extensions longer than 4 chars when inferring type
* **requests/__init__.py**
* Use `return_by_value=True` for `navigator.userAgent` extraction
* **models.py**
* Remove `G4F` from model provider lists; update `janus_pro_7b` best providers
* **GUI server/api.py**
* Stream `SuggestedFollowups` to client (`"suggestions"` event)
* **GUI static assets**
* **style.css**: bold chat title, add `.suggestions` styles, remove padding from `.chat-body`
* **chat.v1.js**
* Capture `suggestions` packets, render buttons, and send as quick replies
* Re‑order finish‑reason logic; adjust token count placement and system‑prompt toggling
* **chat-top-panel / footer** interactions updated accordingly
* **gui/client/static/js/chat.v1.js** & **css** further UI refinements (scroll handling, token counting, hide prompt toggle)
* Minor updates across multiple files to match new async interfaces and headers (`userAgent`, `raise_for_status`)
51 lines
No EOL
1.6 KiB
Python
51 lines
No EOL
1.6 KiB
Python
from __future__ import annotations
|
|
|
|
import asyncio
|
|
|
|
try:
|
|
from duckai import DuckAI
|
|
has_requirements = True
|
|
except ImportError:
|
|
has_requirements = False
|
|
|
|
from ..typing import CreateResult, Messages
|
|
from .base_provider import AbstractProvider, ProviderModelMixin
|
|
from .helper import get_last_user_message
|
|
|
|
class DuckDuckGo(AbstractProvider, ProviderModelMixin):
|
|
label = "Duck.ai (duckduckgo_search)"
|
|
url = "https://duckduckgo.com/aichat"
|
|
api_base = "https://duckduckgo.com/duckchat/v1/"
|
|
|
|
working = has_requirements
|
|
supports_stream = True
|
|
supports_system_message = True
|
|
supports_message_history = True
|
|
|
|
default_model = "gpt-4o-mini"
|
|
models = [default_model, "meta-llama/Llama-3.3-70B-Instruct-Turbo", "claude-3-haiku-20240307", "o3-mini", "mistralai/Mistral-Small-24B-Instruct-2501"]
|
|
|
|
duck_ai: DuckAI = None
|
|
|
|
model_aliases = {
|
|
"gpt-4": "gpt-4o-mini",
|
|
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
"claude-3-haiku": "claude-3-haiku-20240307",
|
|
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
|
}
|
|
|
|
@classmethod
|
|
def create_completion(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
proxy: str = None,
|
|
timeout: int = 60,
|
|
**kwargs
|
|
) -> CreateResult:
|
|
if not has_requirements:
|
|
raise ImportError("duckai is not installed. Install it with `pip install -U duckai`.")
|
|
if cls.duck_ai is None:
|
|
cls.duck_ai = DuckAI(proxy=proxy, timeout=timeout)
|
|
model = cls.get_model(model)
|
|
yield cls.duck_ai.chat(get_last_user_message(messages), model, timeout) |