mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
* feat: introduce AnyProvider & LM Arena, overhaul model/provider logic
- **Provider additions & removals**
- Added `Provider/LMArenaProvider.py` with full async stream implementation and vision model support
- Registered `LMArenaProvider` in `Provider/__init__.py`; removed old `hf_space/LMArenaProvider.py`
- Created `providers/any_provider.py`; registers `AnyProvider` dynamically in `Provider`
- **Provider framework enhancements**
- `providers/base_provider.py`
- Added `video_models` and `audio_models` attributes
- `providers/retry_provider.py`
- Introduced `is_content()` helper; now treats `AudioResponse` as stream content
- **Cloudflare provider refactor**
- `Provider/Cloudflare.py`
- Re‑implemented `get_models()` with `read_models()` helper, `fallback_models`, robust nodriver/curl handling and model‑name cleaning
- **Other provider tweaks**
- `Provider/Copilot.py` – removed `"reasoning"` alias and initial `setOptions` WS message
- `Provider/PollinationsAI.py` & `PollinationsImage.py`
- Converted `audio_models` from list to dict, adjusted usage checks and labels
- `Provider/hf/__init__.py` – applies `model_aliases` remap before dispatch
- `Provider/hf_space/DeepseekAI_JanusPro7b.py` – now merges media before upload
- `needs_auth/Gemini.py` – dropped obsolete Gemini model entries
- `needs_auth/GigaChat.py` – added lowercase `"gigachat"` alias
- **API & client updates**
- Replaced `ProviderUtils` with new `Provider` map usage throughout API and GUI server
- Integrated `AnyProvider` as default fallback in `g4f/client` sync & async flows
- API endpoints now return counts of providers per model and filter by `x_ignored` header
- **GUI improvements**
- Updated JS labels with emoji icons, provider ignore logic, model count display
- **Model registry**
- Renamed base model `"GigaChat:latest"` ➜ `"gigachat"` in `models.py`
- **Miscellaneous**
- Added audio/video flags to GUI provider list
- Tightened error propagation in `retry_provider.raise_exceptions`
* Fix unittests
* fix: handle None conversation when accessing provider-specific data
- Modified `AnyProvider` class in `g4f/providers/any_provider.py`
- Updated logic to check if `conversation` is not None before accessing `provider.__name__` attribute
- Wrapped `getattr(conversation, provider.__name__, None)` block in an additional `if conversation is not None` condition
- Changed `setattr(conversation, provider.__name__, chunk)` to use `chunk.get_dict()` instead of the object directly
- Ensured consistent use of `JsonConversation` when modifying or assigning `conversation` data
* ```
feat: add provider string conversion & update IterListProvider call
- In g4f/client/__init__.py, within both Completions and AsyncCompletions, added a check to convert the provider from a string using convert_to_provider(provider) when applicable.
- In g4f/providers/any_provider.py, removed the second argument (False) from the IterListProvider constructor call in the async for loop.
```
---------
Co-authored-by: hlohaus <983577+hlohaus@users.noreply.github.com>
142 lines
6.9 KiB
Python
142 lines
6.9 KiB
Python
from __future__ import annotations
|
|
|
|
import unittest
|
|
|
|
from g4f.errors import ModelNotFoundError
|
|
from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk
|
|
from g4f.client.service import get_model_and_provider
|
|
from g4f.Provider.Copilot import Copilot
|
|
from g4f.models import gpt_4o
|
|
from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock
|
|
|
|
DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
|
|
|
|
class AsyncTestPassModel(unittest.IsolatedAsyncioTestCase):
|
|
|
|
async def test_response(self):
|
|
client = AsyncClient(provider=AsyncGeneratorProviderMock)
|
|
response = await client.chat.completions.create(DEFAULT_MESSAGES, "")
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("Mock", response.choices[0].message.content)
|
|
|
|
async def test_pass_model(self):
|
|
client = AsyncClient(provider=ModelProviderMock)
|
|
response = await client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("Hello", response.choices[0].message.content)
|
|
|
|
async def test_max_tokens(self):
|
|
client = AsyncClient(provider=YieldProviderMock)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
|
|
response = await client.chat.completions.create(messages, "Hello", max_tokens=1)
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("How ", response.choices[0].message.content)
|
|
response = await client.chat.completions.create(messages, "Hello", max_tokens=2)
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("How are ", response.choices[0].message.content)
|
|
|
|
async def test_max_stream(self):
|
|
client = AsyncClient(provider=YieldProviderMock)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
|
|
response = client.chat.completions.create(messages, "Hello", stream=True)
|
|
async for chunk in response:
|
|
chunk: ChatCompletionChunk = chunk
|
|
self.assertIsInstance(chunk, ChatCompletionChunk)
|
|
if chunk.choices[0].delta.content is not None:
|
|
self.assertIsInstance(chunk.choices[0].delta.content, str)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
|
|
response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
|
|
response_list = []
|
|
async for chunk in response:
|
|
response_list.append(chunk)
|
|
self.assertEqual(len(response_list), 3)
|
|
for chunk in response_list:
|
|
if chunk.choices[0].delta.content is not None:
|
|
self.assertEqual(chunk.choices[0].delta.content, "You ")
|
|
|
|
async def test_stop(self):
|
|
client = AsyncClient(provider=YieldProviderMock)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
|
|
response = await client.chat.completions.create(messages, "Hello", stop=["and"])
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("How are you?", response.choices[0].message.content)
|
|
|
|
class TestPassModel(unittest.TestCase):
|
|
|
|
def test_response(self):
|
|
client = Client(provider=AsyncGeneratorProviderMock)
|
|
response = client.chat.completions.create(DEFAULT_MESSAGES, "")
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("Mock", response.choices[0].message.content)
|
|
|
|
def test_pass_model(self):
|
|
client = Client(provider=ModelProviderMock)
|
|
response = client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("Hello", response.choices[0].message.content)
|
|
|
|
def test_max_tokens(self):
|
|
client = Client(provider=YieldProviderMock)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
|
|
response = client.chat.completions.create(messages, "Hello", max_tokens=1)
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("How ", response.choices[0].message.content)
|
|
response = client.chat.completions.create(messages, "Hello", max_tokens=2)
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("How are ", response.choices[0].message.content)
|
|
|
|
def test_max_stream(self):
|
|
client = Client(provider=YieldProviderMock)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
|
|
response = client.chat.completions.create(messages, "Hello", stream=True)
|
|
for chunk in response:
|
|
self.assertIsInstance(chunk, ChatCompletionChunk)
|
|
if chunk.choices[0].delta.content is not None:
|
|
self.assertIsInstance(chunk.choices[0].delta.content, str)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
|
|
response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
|
|
response_list = list(response)
|
|
self.assertEqual(len(response_list), 3)
|
|
for chunk in response_list:
|
|
if chunk.choices[0].delta.content is not None:
|
|
self.assertEqual(chunk.choices[0].delta.content, "You ")
|
|
|
|
def test_stop(self):
|
|
client = Client(provider=YieldProviderMock)
|
|
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
|
|
response = client.chat.completions.create(messages, "Hello", stop=["and"])
|
|
self.assertIsInstance(response, ChatCompletion)
|
|
self.assertEqual("How are you?", response.choices[0].message.content)
|
|
|
|
def test_model_not_found(self):
|
|
def run_exception():
|
|
client = Client()
|
|
client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
|
|
self.assertRaises(ModelNotFoundError, run_exception)
|
|
|
|
def test_best_provider(self):
|
|
not_default_model = "gpt-4o"
|
|
model, provider = get_model_and_provider(not_default_model, None, False)
|
|
self.assertTrue(hasattr(provider, "create_completion"))
|
|
self.assertEqual(model, not_default_model)
|
|
|
|
def test_default_model(self):
|
|
default_model = ""
|
|
model, provider = get_model_and_provider(default_model, None, False)
|
|
self.assertTrue(hasattr(provider, "create_completion"))
|
|
self.assertEqual(model, default_model)
|
|
|
|
def test_provider_as_model(self):
|
|
provider_as_model = Copilot.__name__
|
|
model, provider = get_model_and_provider(provider_as_model, None, False)
|
|
self.assertTrue(hasattr(provider, "create_completion"))
|
|
self.assertIsInstance(model, str)
|
|
self.assertEqual(model, Copilot.default_model)
|
|
|
|
def test_get_model(self):
|
|
model, provider = get_model_and_provider(gpt_4o.name, None, False)
|
|
self.assertTrue(hasattr(provider, "create_completion"))
|
|
self.assertEqual(model, gpt_4o.name)
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|