mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 10:40:43 -08:00
* docs(docs/providers-and-models.md): update documentation structure and model listings * refactor(g4f/debug.py): add type hints and docstrings * refactor(g4f/tools/run_tools.py): Restructure tool handling and improve modularity * refactor(g4f/providers/response.py): enhance type hints and code documentation * feat(g4f/models.py): Update model providers and add new models * feat(g4f/Provider/Blackbox.py): add encrypted session handling and model updates * fix(g4f/Provider/ChatGptEs.py): migrate to curl_cffi for request handling and improve error resilience * feat(g4f/Provider/DeepInfraChat.py): Update default model and add new DeepSeek variants * feat(g4f/Provider/Free2GPT.py): add Gemini models and streamline headers * feat(g4f/Provider/FreeGpt.py): Add support for Gemini 1.5 Flash model * feat(g4f/Provider/Liaobots.py): Add Claude 3.7 models and update default GPT-4o * fix(g4f/Provider/PollinationsAI.py): Correct model mappings and generation parameters * feat(g4f/Provider/PollinationsImage.py): Add class identifier label * chore(g4f/Provider/TeachAnything.py): Update default model and simplify model handling * (g4f/Provider/Mhystical.py): Remove class implementation * chore(g4f/Provider/Prodia.py > g4f/Provider/not_working/Prodia.py): mark Prodia provider as non-working * feat(g4f/Provider/Blackbox.py): Add Claude 3.7 Sonnet model alias * chore(g4f/models.py): Update model configurations * fix(g4f/Provider/ChatGptEs.py): improve request reliability and nonce detection --------- Co-authored-by: kqlio67 <>
67 lines
2.3 KiB
Python
67 lines
2.3 KiB
Python
from __future__ import annotations
|
|
|
|
import time
|
|
from hashlib import sha256
|
|
|
|
from aiohttp import BaseConnector, ClientSession
|
|
|
|
from ..errors import RateLimitError
|
|
from ..requests import raise_for_status
|
|
from ..requests.aiohttp import get_connector
|
|
from ..typing import AsyncResult, Messages
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
|
|
|
|
class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
|
|
url = "https://chat10.free2gpt.xyz"
|
|
|
|
working = True
|
|
supports_message_history = True
|
|
|
|
default_model = 'gemini-1.5-pro'
|
|
models = [default_model, 'gemini-1.5-flash']
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
proxy: str = None,
|
|
connector: BaseConnector = None,
|
|
**kwargs,
|
|
) -> AsyncResult:
|
|
headers = {
|
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
|
"Accept": "*/*",
|
|
"Accept-Language": "en-US,en;q=0.9",
|
|
"Accept-Encoding": "gzip, deflate, br",
|
|
"Content-Type": "text/plain;charset=UTF-8",
|
|
"Referer": f"{cls.url}/",
|
|
"Origin": cls.url,
|
|
}
|
|
async with ClientSession(
|
|
connector=get_connector(connector, proxy), headers=headers
|
|
) as session:
|
|
timestamp = int(time.time() * 1e3)
|
|
data = {
|
|
"messages": messages,
|
|
"time": timestamp,
|
|
"pass": None,
|
|
"sign": generate_signature(timestamp, messages[-1]["content"]),
|
|
}
|
|
async with session.post(
|
|
f"{cls.url}/api/generate", json=data, proxy=proxy
|
|
) as response:
|
|
if response.status == 500:
|
|
if "Quota exceeded" in await response.text():
|
|
raise RateLimitError(
|
|
f"Response {response.status}: Rate limit reached"
|
|
)
|
|
await raise_for_status(response)
|
|
async for chunk in response.content.iter_any():
|
|
yield chunk.decode(errors="ignore")
|
|
|
|
|
|
def generate_signature(time: int, text: str, secret: str = ""):
|
|
message = f"{time}:{text}:{secret}"
|
|
return sha256(message.encode()).hexdigest()
|