mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-15 14:51:19 -08:00
- Deleted multiple deprecated providers including Acytoo, AiAsk, AiService, Aibn, Aivvm, Berlin, ChatAnywhere, ChatgptDuo, CodeLinkAva, Cromicle, DfeHub, EasyChat, FakeGpt, FastGpt, Forefront, GPTalk, GeekGpt, GetGpt, H2o, Hashnode, Myshell, NoowAi, Opchatgpts, OpenAssistant, V50, Vitalentum, VoiGpt, Wewordle, Wuguokai, Ylokh, Yqcloud, and corresponding deprecated/__init__.py
- Renamed LMArenaProvider.py to LMArena.py and incorporated its functionality with enhancements, including updated model lists, aliases, comprehensive model discovery, payload building, and asynchronous generator for completions
- Removed LMArenaProvider import and added LMArena import in Provider/__init__.py
- Modified Blackbox provider:
- Removed generate_session_data method and updated generate_session to use fixed email
- Updated session payload usage in send request to use generate_session without email argument
- Added asyncMode flag set to False in session payload
- In DeepInfraChat, removed model aliases for "llama-4-maverick-17b" and "llama-4-scout-17b"
- In PollinationsAI, updated model aliases: replaced "command-r-plus-08-2024" with "command-r-plus"; added "gpt-image" and "grok-3-mini" aliases
- In LambdaChat, added "llama-3.3-70b" mapping to "llama3.3-70b-instruct"
- In hf_space:
- Deleted Qwen_QVQ_72B and Voodoohop_Flux1Schnell providers
- Updated model_aliases in Qwen_Qwen_2_5_Max to fix model alias key from "qwen-2-5-max" to "qwen-2.5-max"
- Changed model_aliases in StabilityAI_SD35Large from "stable-diffusion-3.5-large" to "sd-3.5-large"
- Removed imports of deleted providers in hf_space/__init__.py and updated defaults accordingly
- In BingCreateImages moved import to relative from .bing.create_images
- Moved bing directory into needs_auth directory and updated imports accordingly
- Changed PuterJS provider:
- Changed working flag from True to False
- Changed return_conversation default from False to True in create_async_generator
- Changed yield error messages to raising exceptions for authentication and rate limits
- Modified models.py:
- Added ModelRegistry class for dynamic registration and lookup of Model instances
- Modified Model dataclass to auto-register instances on initialization via ModelRegistry
- Adjusted imports and removed PuterJS from lists of providers and best_provider assignments
- Replaced many references of PuterJS as best_provider with LMArena or IterListProvider including core models like gpt-3.5-turbo, gpt-4, gpt-4o, llama series, mistral, hermes, Microsoft phi, gemini, anthopic claude, cohere, qwen, deepseek, and others
- Fixed aliases and model names (e.g., "qwen-2-5-max" to "qwen-2.5-max")
- Removed outdated or deprecated model definitions referencing PuterJS
- Updated HarProvider label to "LM Arena (Har)" from "LM Arena"
- Removed deprecated providers imports from Provider/__init__.py and not_working directory imports updated accordingly
- Various exact functions impacted: create_async_generator in providers Blackbox, LMArena, PollinationsAI, LambdaChat, PuterJS; model aliases and model definitions in models.py; Provider package __init__.py files; BingCreateImages import; and deletions of numerous deprecated providers and not_working providers.
71 lines
No EOL
2.7 KiB
Python
71 lines
No EOL
2.7 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
from abc import ABC, abstractmethod
|
|
|
|
import requests
|
|
|
|
from ...typing import Any, CreateResult
|
|
from ..base_provider import AbstractProvider
|
|
|
|
|
|
class Equing(AbstractProvider):
|
|
url: str = 'https://next.eqing.tech/'
|
|
working = False
|
|
supports_stream = True
|
|
supports_gpt_35_turbo = True
|
|
supports_gpt_4 = False
|
|
|
|
@staticmethod
|
|
@abstractmethod
|
|
def create_completion(
|
|
model: str,
|
|
messages: list[dict[str, str]],
|
|
stream: bool, **kwargs: Any) -> CreateResult:
|
|
|
|
headers = {
|
|
'authority' : 'next.eqing.tech',
|
|
'accept' : 'text/event-stream',
|
|
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
'cache-control' : 'no-cache',
|
|
'content-type' : 'application/json',
|
|
'origin' : 'https://next.eqing.tech',
|
|
'plugins' : '0',
|
|
'pragma' : 'no-cache',
|
|
'referer' : 'https://next.eqing.tech/',
|
|
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
|
'sec-ch-ua-mobile' : '?0',
|
|
'sec-ch-ua-platform': '"macOS"',
|
|
'sec-fetch-dest' : 'empty',
|
|
'sec-fetch-mode' : 'cors',
|
|
'sec-fetch-site' : 'same-origin',
|
|
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
|
'usesearch' : 'false',
|
|
'x-requested-with' : 'XMLHttpRequest'
|
|
}
|
|
|
|
json_data = {
|
|
'messages' : messages,
|
|
'stream' : stream,
|
|
'model' : model,
|
|
'temperature' : kwargs.get('temperature', 0.5),
|
|
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
|
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
|
'top_p' : kwargs.get('top_p', 1),
|
|
}
|
|
|
|
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
|
|
headers=headers, json=json_data, stream=stream)
|
|
|
|
if not stream:
|
|
yield response.json()["choices"][0]["message"]["content"]
|
|
return
|
|
|
|
for line in response.iter_content(chunk_size=1024):
|
|
if line:
|
|
if b'content' in line:
|
|
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
|
|
|
token = line_json['choices'][0]['delta'].get('content')
|
|
if token:
|
|
yield token |