mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-05 18:20:35 -08:00
- **Docs**
- `docs/file.md`: update upload instructions to use inline `bucket` content parts instead of `tool_calls/bucket_tool`.
- `docs/media.md`: add asynchronous audio transcription example, detailed explanation, and notes.
- **New audio provider**
- Add `g4f/Provider/audio/EdgeTTS.py` implementing Edge Text‑to‑Speech (`EdgeTTS`).
- Create `g4f/Provider/audio/__init__.py` for provider export.
- Register provider in `g4f/Provider/__init__.py`.
- **Refactor image → media**
- Introduce `generated_media/` directory and `get_media_dir()` helper in `g4f/image/copy_images.py`; add `ensure_media_dir()`; keep back‑compat with legacy `generated_images/`.
- Replace `images_dir` references with `get_media_dir()` across:
- `g4f/api/__init__.py`
- `g4f/client/stubs.py`
- `g4f/gui/server/api.py`
- `g4f/gui/server/backend_api.py`
- `g4f/image/copy_images.py`
- Rename CLI/API config field/flag from `image_provider` to `media_provider` (`g4f/cli.py`, `g4f/api/__init__.py`, `g4f/client/__init__.py`).
- Extend `g4f/image/__init__.py`
- add `MEDIA_TYPE_MAP`, `get_extension()`
- revise `is_allowed_extension()`, `to_input_audio()` to support wider media types.
- **Provider adjustments**
- `g4f/Provider/ARTA.py`: swap `raise_error()` parameter order.
- `g4f/Provider/Cloudflare.py`: drop unused `MissingRequirementsError` import; move `get_args_from_nodriver()` inside try; handle `FileNotFoundError`.
- **Core enhancements**
- `g4f/providers/any_provider.py`: use `default_model` instead of literal `"default"`; broaden model/provider matching; update model list cleanup.
- `g4f/models.py`: safeguard provider count logic when model name is falsy.
- `g4f/providers/base_provider.py`: catch `json.JSONDecodeError` when reading auth cache, delete corrupted file.
- `g4f/providers/response.py`: allow `AudioResponse` to accept extra kwargs.
- **Misc**
- Remove obsolete `g4f/image.py`.
- `g4f/Provider/Cloudflare.py`, `g4f/client/types.py`: minor whitespace and import tidy‑ups.
144 lines
6.1 KiB
Python
144 lines
6.1 KiB
Python
from __future__ import annotations
|
|
|
|
import asyncio
|
|
import json
|
|
|
|
from ..typing import AsyncResult, Messages, Cookies
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin, get_running_loop
|
|
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
|
|
from ..requests import DEFAULT_HEADERS, has_nodriver, has_curl_cffi
|
|
from ..providers.response import FinishReason, Usage
|
|
from ..errors import ResponseStatusError, ModelNotFoundError
|
|
from .. import debug
|
|
from .helper import render_messages
|
|
|
|
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|
label = "Cloudflare AI"
|
|
url = "https://playground.ai.cloudflare.com"
|
|
working = has_curl_cffi
|
|
use_nodriver = True
|
|
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
|
|
models_url = "https://playground.ai.cloudflare.com/api/models"
|
|
supports_stream = True
|
|
supports_system_message = True
|
|
supports_message_history = True
|
|
default_model = "@cf/meta/llama-3.3-70b-instruct-fp8-fast"
|
|
model_aliases = {
|
|
"llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
|
|
"llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
|
|
"llama-3-8b": "@cf/meta/llama-3-8b-instruct",
|
|
"llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
|
|
"llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
|
|
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
|
|
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
|
|
"llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
|
|
"llama-4-scout": "@cf/meta/llama-4-scout-17b-16e-instruct",
|
|
"deepseek-math-7b": "@cf/deepseek-ai/deepseek-math-7b-instruct",
|
|
"deepseek-r1-qwen-32b": "@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
|
|
"falcon-7b": "@cf/tiiuae/falcon-7b-instruct",
|
|
"qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
|
|
"qwen-2.5-coder": "@cf/qwen/qwen2.5-coder-32b-instruct",
|
|
}
|
|
fallback_models = list(model_aliases.keys())
|
|
_args: dict = None
|
|
|
|
@classmethod
|
|
def get_models(cls) -> str:
|
|
def read_models():
|
|
with Session(**cls._args) as session:
|
|
response = session.get(cls.models_url)
|
|
cls._args["cookies"] = merge_cookies(cls._args["cookies"], response)
|
|
raise_for_status(response)
|
|
json_data = response.json()
|
|
def clean_name(name: str) -> str:
|
|
return name.split("/")[-1].replace(
|
|
"-instruct", "").replace(
|
|
"-17b-16e", "").replace(
|
|
"-chat", "").replace(
|
|
"-fp8", "").replace(
|
|
"-fast", "").replace(
|
|
"-int8", "").replace(
|
|
"-awq", "").replace(
|
|
"-qvq", "").replace(
|
|
"-r1", "")
|
|
model_map = {clean_name(model.get("name")): model.get("name") for model in json_data.get("models")}
|
|
cls.models = list(model_map.keys())
|
|
cls.model_aliases = {**cls.model_aliases, **model_map}
|
|
if not cls.models:
|
|
try:
|
|
if cls._args is None:
|
|
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}}
|
|
read_models()
|
|
except ResponseStatusError as f:
|
|
if has_nodriver:
|
|
get_running_loop(check_nested=True)
|
|
try:
|
|
args = get_args_from_nodriver(cls.url)
|
|
cls._args = asyncio.run(args)
|
|
read_models()
|
|
except (RuntimeError, FileNotFoundError) as e:
|
|
cls.models = cls.fallback_models
|
|
debug.log(f"Nodriver is not available: {type(e).__name__}: {e}")
|
|
else:
|
|
cls.models = cls.fallback_models
|
|
debug.log(f"Nodriver is not installed: {type(f).__name__}: {f}")
|
|
return cls.models
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
proxy: str = None,
|
|
max_tokens: int = 2048,
|
|
cookies: Cookies = None,
|
|
timeout: int = 300,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
cache_file = cls.get_cache_file()
|
|
if cls._args is None:
|
|
if cache_file.exists():
|
|
with cache_file.open("r") as f:
|
|
cls._args = json.load(f)
|
|
elif has_nodriver:
|
|
cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies)
|
|
else:
|
|
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}, "impersonate": "chrome"}
|
|
try:
|
|
model = cls.get_model(model)
|
|
except ModelNotFoundError:
|
|
pass
|
|
data = {
|
|
"messages": [{
|
|
**message,
|
|
"parts": [{"type":"text", "text": message["content"]}]} for message in render_messages(messages)],
|
|
"lora": None,
|
|
"model": model,
|
|
"max_tokens": max_tokens,
|
|
"stream": True,
|
|
"system_message":"You are a helpful assistant",
|
|
"tools":[]
|
|
}
|
|
async with StreamSession(**cls._args) as session:
|
|
async with session.post(
|
|
cls.api_endpoint,
|
|
json=data,
|
|
) as response:
|
|
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
|
|
try:
|
|
await raise_for_status(response)
|
|
except ResponseStatusError:
|
|
cls._args = None
|
|
if cache_file.exists():
|
|
cache_file.unlink()
|
|
raise
|
|
async for line in response.iter_lines():
|
|
if line.startswith(b'0:'):
|
|
yield json.loads(line[2:])
|
|
elif line.startswith(b'e:'):
|
|
finish = json.loads(line[2:])
|
|
yield Usage(**finish.get("usage"))
|
|
yield FinishReason(finish.get("finishReason"))
|
|
|
|
with cache_file.open("w") as f:
|
|
json.dump(cls._args, f)
|