mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
* Update model configurations, provider implementations, and documentation - Updated model names and aliases for Qwen QVQ 72B and Qwen 2 72B (@TheFirstNoob) - Revised HuggingSpace class configuration, added default_image_model - Added llama-3.2-70b alias for Llama 3.2 70B model in AutonomousAI - Removed BlackboxCreateAgent class - Added gpt-4o alias for Copilot model - Moved api_key to Mhystical class attribute - Added models property with default_model value for Free2GPT - Simplified Jmuz class implementation - Improved image generation and model handling in DeepInfra - Standardized default models and removed aliases in Gemini - Replaced model aliases with direct model list in GlhfChat (@TheFirstNoob) - Removed trailing slash from image generation URL in PollinationsAI (https://github.com/xtekky/gpt4free/issues/2571) - Updated llama and qwen model configurations - Enhanced provider documentation and model details * Removed from (g4f/models.py) 'Yqcloud' provider from Default due to error 'ResponseStatusError: Response 429: 文字过长,请删减后重试。' * Update docs/providers-and-models.md * refactor(g4f/Provider/DDG.py): Add error handling and rate limiting to DDG provider - Add custom exception classes for rate limits, timeouts, and conversation limits - Implement rate limiting with sleep between requests (0.75s minimum delay) - Add model validation method to check supported models - Add proper error handling for API responses with custom exceptions - Improve session cookie handling for conversation persistence - Clean up User-Agent string and remove redundant code - Add proper error propagation through async generator Breaking changes: - New custom exceptions may require updates to error handling code - Rate limiting affects request timing and throughput - Model validation is now stricter Related: - Adds error handling similar to standard API clients - Improves reliability and robustness of chat interactions * Update g4f/models.py g4f/Provider/PollinationsAI.py * Update g4f/models.py * Restored provider which was not working and was disabled (g4f/Provider/DeepInfraChat.py) * Fixing a bug with Streaming Completions * Update g4f/Provider/PollinationsAI.py * Update g4f/Provider/Blackbox.py g4f/Provider/DDG.py * Added another model for generating images 'ImageGeneration2' to the 'Blackbox' provider * Update docs/providers-and-models.md * Update g4f/models.py g4f/Provider/Blackbox.py * Added a new OIVSCode provider from the Text Models and Vision (Image Upload) model * Update docs/providers-and-models.md * docs: add Conversation Memory class with context handling requested by @TheFirstNoob * Simplified README.md documentation added new docs/configuration.md documentation * Update add README.md docs/configuration.md * Update README.md * Update docs/providers-and-models.md g4f/models.py g4f/Provider/PollinationsAI.py * Added new model deepseek-r1 to Blackbox provider. @TheFirstNoob * Fixed bugs and updated docs/providers-and-models.md etc/unittest/client.py g4f/models.py g4f/Provider/. --------- Co-authored-by: kqlio67 <> Co-authored-by: H Lohaus <hlohaus@users.noreply.github.com>
74 lines
2.6 KiB
Python
74 lines
2.6 KiB
Python
from __future__ import annotations
|
|
|
|
import time
|
|
from hashlib import sha256
|
|
|
|
from aiohttp import BaseConnector, ClientSession
|
|
|
|
from ..errors import RateLimitError
|
|
from ..requests import raise_for_status
|
|
from ..requests.aiohttp import get_connector
|
|
from ..typing import AsyncResult, Messages
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
|
|
|
|
class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
|
|
url = "https://chat10.free2gpt.xyz"
|
|
working = True
|
|
supports_message_history = True
|
|
default_model = 'mistral-7b'
|
|
models = [default_model]
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
proxy: str = None,
|
|
connector: BaseConnector = None,
|
|
**kwargs,
|
|
) -> AsyncResult:
|
|
headers = {
|
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
"Accept": "*/*",
|
|
"Accept-Language": "en-US,en;q=0.9",
|
|
"Accept-Encoding": "gzip, deflate, br",
|
|
"Content-Type": "text/plain;charset=UTF-8",
|
|
"Referer": f"{cls.url}/",
|
|
"Origin": cls.url,
|
|
"Sec-Fetch-Dest": "empty",
|
|
"Sec-Fetch-Mode": "cors",
|
|
"Sec-Fetch-Site": "same-origin",
|
|
"Sec-Ch-Ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
|
"Sec-Ch-Ua-Mobile": "?0",
|
|
"Sec-Ch-Ua-Platform": '"Linux"',
|
|
"Cache-Control": "no-cache",
|
|
"Pragma": "no-cache",
|
|
"Priority": "u=1, i",
|
|
}
|
|
async with ClientSession(
|
|
connector=get_connector(connector, proxy), headers=headers
|
|
) as session:
|
|
timestamp = int(time.time() * 1e3)
|
|
data = {
|
|
"messages": messages,
|
|
"time": timestamp,
|
|
"pass": None,
|
|
"sign": generate_signature(timestamp, messages[-1]["content"]),
|
|
}
|
|
async with session.post(
|
|
f"{cls.url}/api/generate", json=data, proxy=proxy
|
|
) as response:
|
|
if response.status == 500:
|
|
if "Quota exceeded" in await response.text():
|
|
raise RateLimitError(
|
|
f"Response {response.status}: Rate limit reached"
|
|
)
|
|
await raise_for_status(response)
|
|
async for chunk in response.content.iter_any():
|
|
yield chunk.decode(errors="ignore")
|
|
|
|
|
|
def generate_signature(time: int, text: str, secret: str = ""):
|
|
message = f"{time}:{text}:{secret}"
|
|
return sha256(message.encode()).hexdigest()
|