mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-15 14:51:19 -08:00
* Update model configurations, provider implementations, and documentation - Updated model names and aliases for Qwen QVQ 72B and Qwen 2 72B (@TheFirstNoob) - Revised HuggingSpace class configuration, added default_image_model - Added llama-3.2-70b alias for Llama 3.2 70B model in AutonomousAI - Removed BlackboxCreateAgent class - Added gpt-4o alias for Copilot model - Moved api_key to Mhystical class attribute - Added models property with default_model value for Free2GPT - Simplified Jmuz class implementation - Improved image generation and model handling in DeepInfra - Standardized default models and removed aliases in Gemini - Replaced model aliases with direct model list in GlhfChat (@TheFirstNoob) - Removed trailing slash from image generation URL in PollinationsAI (https://github.com/xtekky/gpt4free/issues/2571) - Updated llama and qwen model configurations - Enhanced provider documentation and model details * Removed from (g4f/models.py) 'Yqcloud' provider from Default due to error 'ResponseStatusError: Response 429: 文字过长,请删减后重试。' * Update docs/providers-and-models.md * refactor(g4f/Provider/DDG.py): Add error handling and rate limiting to DDG provider - Add custom exception classes for rate limits, timeouts, and conversation limits - Implement rate limiting with sleep between requests (0.75s minimum delay) - Add model validation method to check supported models - Add proper error handling for API responses with custom exceptions - Improve session cookie handling for conversation persistence - Clean up User-Agent string and remove redundant code - Add proper error propagation through async generator Breaking changes: - New custom exceptions may require updates to error handling code - Rate limiting affects request timing and throughput - Model validation is now stricter Related: - Adds error handling similar to standard API clients - Improves reliability and robustness of chat interactions * Update g4f/models.py g4f/Provider/PollinationsAI.py * Update g4f/models.py * Restored provider which was not working and was disabled (g4f/Provider/DeepInfraChat.py) * Fixing a bug with Streaming Completions * Update g4f/Provider/PollinationsAI.py * Update g4f/Provider/Blackbox.py g4f/Provider/DDG.py * Added another model for generating images 'ImageGeneration2' to the 'Blackbox' provider * Update docs/providers-and-models.md * Update g4f/models.py g4f/Provider/Blackbox.py * Added a new OIVSCode provider from the Text Models and Vision (Image Upload) model * Update docs/providers-and-models.md * docs: add Conversation Memory class with context handling requested by @TheFirstNoob * Simplified README.md documentation added new docs/configuration.md documentation * Update add README.md docs/configuration.md * Update README.md * Update docs/providers-and-models.md g4f/models.py g4f/Provider/PollinationsAI.py * Added new model deepseek-r1 to Blackbox provider. @TheFirstNoob * Fixed bugs and updated docs/providers-and-models.md etc/unittest/client.py g4f/models.py g4f/Provider/. --------- Co-authored-by: kqlio67 <> Co-authored-by: H Lohaus <hlohaus@users.noreply.github.com>
81 lines
2.6 KiB
Python
81 lines
2.6 KiB
Python
from __future__ import annotations
|
|
|
|
from ..typing import AsyncResult, Messages
|
|
from .needs_auth.OpenaiAPI import OpenaiAPI
|
|
|
|
class Jmuz(OpenaiAPI):
|
|
label = "Jmuz"
|
|
url = "https://discord.gg/Ew6JzjA2NR"
|
|
login_url = None
|
|
api_base = "https://jmuz.me/gpt/api/v2"
|
|
api_key = "prod"
|
|
|
|
working = True
|
|
needs_auth = False
|
|
supports_stream = True
|
|
supports_system_message = False
|
|
|
|
default_model = "gpt-4o"
|
|
model_aliases = {
|
|
"qwq-32b": "qwq-32b-preview",
|
|
"gemini-1.5-flash": "gemini-flash",
|
|
"gemini-1.5-pro": "gemini-pro",
|
|
"gemini-2.0-flash-thinking": "gemini-thinking",
|
|
"deepseek-chat": "deepseek-v3",
|
|
}
|
|
|
|
@classmethod
|
|
def get_models(cls, **kwargs):
|
|
if not cls.models:
|
|
cls.models = super().get_models(api_key=cls.api_key, api_base=cls.api_base)
|
|
return cls.models
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
stream: bool = True,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
model = cls.get_model(model)
|
|
headers = {
|
|
"Authorization": f"Bearer {cls.api_key}",
|
|
"Content-Type": "application/json",
|
|
"accept": "*/*",
|
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
|
|
}
|
|
|
|
started = False
|
|
buffer = ""
|
|
async for chunk in super().create_async_generator(
|
|
model=model,
|
|
messages=messages,
|
|
api_base=cls.api_base,
|
|
api_key=cls.api_key,
|
|
stream=cls.supports_stream,
|
|
headers=headers,
|
|
**kwargs
|
|
):
|
|
if isinstance(chunk, str):
|
|
buffer += chunk
|
|
if "Join for free".startswith(buffer) or buffer.startswith("Join for free"):
|
|
if buffer.endswith("\n"):
|
|
buffer = ""
|
|
continue
|
|
if "https://discord.gg/".startswith(buffer) or "https://discord.gg/" in buffer:
|
|
if "..." in buffer:
|
|
buffer = ""
|
|
continue
|
|
if "o1-preview".startswith(buffer) or buffer.startswith("o1-preview"):
|
|
if "\n" in buffer:
|
|
buffer = ""
|
|
continue
|
|
if not started:
|
|
buffer = buffer.lstrip()
|
|
if buffer:
|
|
started = True
|
|
yield buffer
|
|
buffer = ""
|
|
else:
|
|
yield chunk
|