mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
* feat: introduce AnyProvider & LM Arena, overhaul model/provider logic
- **Provider additions & removals**
- Added `Provider/LMArenaProvider.py` with full async stream implementation and vision model support
- Registered `LMArenaProvider` in `Provider/__init__.py`; removed old `hf_space/LMArenaProvider.py`
- Created `providers/any_provider.py`; registers `AnyProvider` dynamically in `Provider`
- **Provider framework enhancements**
- `providers/base_provider.py`
- Added `video_models` and `audio_models` attributes
- `providers/retry_provider.py`
- Introduced `is_content()` helper; now treats `AudioResponse` as stream content
- **Cloudflare provider refactor**
- `Provider/Cloudflare.py`
- Re‑implemented `get_models()` with `read_models()` helper, `fallback_models`, robust nodriver/curl handling and model‑name cleaning
- **Other provider tweaks**
- `Provider/Copilot.py` – removed `"reasoning"` alias and initial `setOptions` WS message
- `Provider/PollinationsAI.py` & `PollinationsImage.py`
- Converted `audio_models` from list to dict, adjusted usage checks and labels
- `Provider/hf/__init__.py` – applies `model_aliases` remap before dispatch
- `Provider/hf_space/DeepseekAI_JanusPro7b.py` – now merges media before upload
- `needs_auth/Gemini.py` – dropped obsolete Gemini model entries
- `needs_auth/GigaChat.py` – added lowercase `"gigachat"` alias
- **API & client updates**
- Replaced `ProviderUtils` with new `Provider` map usage throughout API and GUI server
- Integrated `AnyProvider` as default fallback in `g4f/client` sync & async flows
- API endpoints now return counts of providers per model and filter by `x_ignored` header
- **GUI improvements**
- Updated JS labels with emoji icons, provider ignore logic, model count display
- **Model registry**
- Renamed base model `"GigaChat:latest"` ➜ `"gigachat"` in `models.py`
- **Miscellaneous**
- Added audio/video flags to GUI provider list
- Tightened error propagation in `retry_provider.raise_exceptions`
* Fix unittests
* fix: handle None conversation when accessing provider-specific data
- Modified `AnyProvider` class in `g4f/providers/any_provider.py`
- Updated logic to check if `conversation` is not None before accessing `provider.__name__` attribute
- Wrapped `getattr(conversation, provider.__name__, None)` block in an additional `if conversation is not None` condition
- Changed `setattr(conversation, provider.__name__, chunk)` to use `chunk.get_dict()` instead of the object directly
- Ensured consistent use of `JsonConversation` when modifying or assigning `conversation` data
* ```
feat: add provider string conversion & update IterListProvider call
- In g4f/client/__init__.py, within both Completions and AsyncCompletions, added a check to convert the provider from a string using convert_to_provider(provider) when applicable.
- In g4f/providers/any_provider.py, removed the second argument (False) from the IterListProvider constructor call in the async for loop.
```
---------
Co-authored-by: hlohaus <983577+hlohaus@users.noreply.github.com>
103 lines
3.9 KiB
Python
103 lines
3.9 KiB
Python
from __future__ import annotations
|
|
|
|
import random
|
|
|
|
from ...typing import AsyncResult, Messages, MediaListType
|
|
from ...errors import ResponseError
|
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
|
|
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
|
from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
|
|
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
|
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
|
|
from .Microsoft_Phi_4 import Microsoft_Phi_4
|
|
from .Qwen_QVQ_72B import Qwen_QVQ_72B
|
|
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
|
|
from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M
|
|
from .Qwen_Qwen_2_5_Max import Qwen_Qwen_2_5_Max
|
|
from .Qwen_Qwen_2_72B import Qwen_Qwen_2_72B
|
|
from .StabilityAI_SD35Large import StabilityAI_SD35Large
|
|
from .Voodoohop_Flux1Schnell import Voodoohop_Flux1Schnell
|
|
|
|
class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
|
url = "https://huggingface.co/spaces"
|
|
|
|
working = True
|
|
|
|
default_model = Qwen_Qwen_2_72B.default_model
|
|
default_image_model = BlackForestLabs_Flux1Dev.default_model
|
|
default_vision_model = Qwen_QVQ_72B.default_model
|
|
providers = [
|
|
BlackForestLabs_Flux1Dev,
|
|
BlackForestLabs_Flux1Schnell,
|
|
CohereForAI_C4AI_Command,
|
|
DeepseekAI_JanusPro7b,
|
|
Microsoft_Phi_4,
|
|
Qwen_QVQ_72B,
|
|
Qwen_Qwen_2_5,
|
|
Qwen_Qwen_2_5M,
|
|
Qwen_Qwen_2_5_Max,
|
|
Qwen_Qwen_2_72B,
|
|
StabilityAI_SD35Large,
|
|
Voodoohop_Flux1Schnell,
|
|
]
|
|
|
|
@classmethod
|
|
def get_parameters(cls, **kwargs) -> dict:
|
|
parameters = {}
|
|
for provider in cls.providers:
|
|
parameters = {**parameters, **provider.get_parameters(**kwargs)}
|
|
return parameters
|
|
|
|
@classmethod
|
|
def get_models(cls, **kwargs) -> list[str]:
|
|
if not cls.models:
|
|
models = []
|
|
image_models = []
|
|
vision_models = []
|
|
for provider in cls.providers:
|
|
models.extend(provider.get_models(**kwargs))
|
|
models.extend(provider.model_aliases.keys())
|
|
image_models.extend(provider.image_models)
|
|
vision_models.extend(provider.vision_models)
|
|
models = list(set(models))
|
|
models.sort()
|
|
cls.models = models
|
|
cls.image_models = list(set(image_models))
|
|
cls.vision_models = list(set(vision_models))
|
|
return cls.models
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls, model: str, messages: Messages, media: MediaListType = None, **kwargs
|
|
) -> AsyncResult:
|
|
if not model and media is not None:
|
|
model = cls.default_vision_model
|
|
is_started = False
|
|
random.shuffle(cls.providers)
|
|
for provider in cls.providers:
|
|
if model in provider.model_aliases:
|
|
async for chunk in provider.create_async_generator(provider.model_aliases[model], messages, media=media, **kwargs):
|
|
is_started = True
|
|
yield chunk
|
|
if is_started:
|
|
return
|
|
error = None
|
|
for provider in cls.providers:
|
|
if model in provider.get_models():
|
|
try:
|
|
async for chunk in provider.create_async_generator(model, messages, media=media, **kwargs):
|
|
is_started = True
|
|
yield chunk
|
|
if is_started:
|
|
break
|
|
except ResponseError as e:
|
|
if is_started:
|
|
raise e
|
|
error = e
|
|
if not is_started and error is not None:
|
|
raise error
|
|
|
|
for provider in HuggingSpace.providers:
|
|
provider.parent = HuggingSpace.__name__
|
|
provider.hf_space = True
|