mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
refactor: update providers list, env vars, and minor fixes
- **example.env**:
- Added `G4F_API_KEY` and `G4F_PROXY` variables
- Removed Azure-related API keys and routes block
- Added `OLLAMA_API_KEY`, `NVIDIA_API_KEY`, and `PUTER_API_KEY`
- **g4f/Provider/EasyChat.py**:
- Replaced `page.js_dumps` with `page.evaluate` for retrieving `guestId`
- **g4f/Provider/deprecated/LegacyLMArena.py**:
- Changed `working = True` to `working = False`
- **g4f/Provider/deprecated/har/__init__.py**:
- Changed `working = True` to `working = False`
- **g4f/providers/any_provider.py**:
- Updated imports, adding `Custom`, `PollinationsImage`, `OpenaiAccount`, and reordering providers
- Renamed `PROVIERS_LIST_2` → `PROVIDERS_LIST_2` and `PROVIERS_LIST_3` → `PROVIDERS_LIST_3`
- Removed `PROVIERS_LIST_1` and replaced its usage with `Provider.__providers__`
- Changed `cls.audio_models` initialization from `{}` to `[]`
- Adjusted handling of model mapping with `clean_name` for consistency
- GeminiPro check updated to `if provider == GeminiPro`
- Changed final `cls.audio_models` assignment to `[ *cls.audio_models ]` instead of `list(cls.audio_models.keys())`
- Adjusted provider fallback in `AnyProvider` to use `PROVIDERS_LIST_2 + PROVIDERS_LIST_3`
- **g4f/requests/__init__.py**:
- Changed SSE parsing condition from `line.startswith(b"data: ")` to `line.startswith(b"data:")`
- Updated slice from `line[6:]` to `line[5:]` when extracting `rest
This commit is contained in:
parent
b6bf9ee62c
commit
05f0f55711
6 changed files with 29 additions and 49 deletions
22
example.env
22
example.env
|
|
@ -1,6 +1,9 @@
|
|||
# Rename this file to .env and copy it to your cookies directory
|
||||
# Update the API_KEY and other variables as needed
|
||||
|
||||
G4F_API_KEY=
|
||||
G4F_PROXY=
|
||||
|
||||
HUUGINGFACE_API_KEY=
|
||||
POLLINATIONS_API_KEY=
|
||||
GEMINI_API_KEY=
|
||||
|
|
@ -9,19 +12,6 @@ DEEPINFRA_API_KEY=
|
|||
OPENAI_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
AZURE_API_KEYS='{
|
||||
"default": "",
|
||||
"flux-1.1-pro": "",
|
||||
"flux.1-kontext-pro": ""
|
||||
}'
|
||||
AZURE_DEFAULT_MODEL="model-router"
|
||||
AZURE_ROUTES='{
|
||||
"model-router": "https://HOST.cognitiveservices.azure.com/openai/deployments/model-router/chat/completions?api-version=2025-01-01-preview",
|
||||
"deepseek-r1": "https://HOST.services.ai.azure.com/models/chat/completions?api-version=2024-05-01-preview",
|
||||
"gpt-4.1": "https://HOST.cognitiveservices.azure.com/openai/deployments/gpt-4.1/chat/completions?api-version=2025-01-01-preview",
|
||||
"gpt-4o-mini-audio-preview": "https://HOST.cognitiveservices.azure.com/openai/deployments/gpt-4o-mini-audio-preview/chat/completions?api-version=2025-01-01-preview",
|
||||
"o4-mini": "https://HOST.cognitiveservices.azure.com/openai/deployments/o4-mini/chat/completions?api-version=2025-01-01-preview",
|
||||
"grok-3": "https://HOST.services.ai.azure.com/models/chat/completions?api-version=2024-05-01-preview",
|
||||
"flux-1.1-pro": "https://HOST.cognitiveservices.azure.com/openai/deployments/FLUX-1.1-pro/images/generations?api-version=2025-04-01-preview",
|
||||
"flux.1-kontext-pro": "https://HOST.services.ai.azure.com/openai/deployments/FLUX.1-Kontext-pro/images/edits?api-version=2025-04-01-preview"
|
||||
}'
|
||||
OLLAMA_API_KEY=
|
||||
NVIDIA_API_KEY=
|
||||
PUTER_API_KEY=
|
||||
|
|
@ -93,7 +93,7 @@ class EasyChat(OpenaiTemplate, AuthFileMixin):
|
|||
await asyncio.sleep(1)
|
||||
if cls.captchaToken:
|
||||
break
|
||||
cls.guestId = await page.js_dumps('JSON.parse(localStorage.getItem("user-info") || "{}")?.state?.guestId')
|
||||
cls.guestId = await page.evaluate('"" + JSON.parse(localStorage.getItem("user-info") || "{}")?.state?.guestId')
|
||||
await asyncio.sleep(3)
|
||||
if cache_file.exists():
|
||||
with cache_file.open("r") as f:
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class LegacyLMArena(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
url = "https://legacy.lmarena.ai"
|
||||
api_endpoint = "/queue/join?"
|
||||
|
||||
working = True
|
||||
working = False
|
||||
|
||||
default_model = "chatgpt-4o-latest-20250326"
|
||||
models = []
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class HarProvider(AsyncAuthedProvider, ProviderModelMixin):
|
|||
label = "LMArena (Har)"
|
||||
url = "https://legacy.lmarena.ai"
|
||||
api_endpoint = "/queue/join?"
|
||||
working = True
|
||||
working = False
|
||||
active_by_default = True
|
||||
default_model = LegacyLMArena.default_model
|
||||
|
||||
|
|
|
|||
|
|
@ -9,34 +9,23 @@ from ..image import is_data_an_audio
|
|||
from ..providers.retry_provider import RotatedProvider
|
||||
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
|
||||
from ..Provider.hf_space import HuggingSpace
|
||||
from ..Provider import Copilot, Cloudflare, Gemini, GeminiPro, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
|
||||
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer0501, OIVSCodeSer2, TeachAnything, OperaAria, Startnest
|
||||
from ..Provider import WeWordle, Yqcloud, Chatai, ImageLabs, LegacyLMArena, LMArenaBeta, Free2GPT
|
||||
from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM
|
||||
from ..Provider import HarProvider, HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree
|
||||
from ..Provider import Custom, PollinationsImage, OpenaiAccount, Copilot, Cloudflare, Gemini, Grok, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
|
||||
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, LMArenaBeta, EdgeTTS, gTTS, MarkItDown, OpenAIFM
|
||||
from ..Provider import HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree, GeminiPro
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .. import Provider
|
||||
from .. import models
|
||||
from .. import debug
|
||||
from .any_model_map import audio_models, image_models, vision_models, video_models, model_map, models_count, parents, model_aliases
|
||||
|
||||
# Add all model aliases to the model map
|
||||
PROVIERS_LIST_1 = [
|
||||
CopilotAccount, OpenaiChat, Cloudflare, PerplexityLabs, Gemini, Grok, DeepSeekAPI, Blackbox, OpenAIFM,
|
||||
OIVSCodeSer2, OIVSCodeSer0501, TeachAnything, WeWordle, Yqcloud, Chatai, Free2GPT, ImageLabs,
|
||||
# Has lazy loading model lists
|
||||
PollinationsAI, HarProvider, LegacyLMArena, LMArenaBeta, LambdaChat, DeepInfraChat,
|
||||
HuggingSpace, HuggingFace, HuggingFaceMedia, GeminiPro, PuterJS, OperaAria, Startnest
|
||||
]
|
||||
|
||||
# Add providers to existing models on map
|
||||
PROVIERS_LIST_2 = [
|
||||
PROVIDERS_LIST_2 = [
|
||||
OpenaiChat, Copilot, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok, Azure, Qwen, EasyChat, GLM, OpenRouterFree
|
||||
]
|
||||
|
||||
# Add all models to the model map
|
||||
PROVIERS_LIST_3 = [
|
||||
HarProvider, LambdaChat, DeepInfraChat, HuggingFace, HuggingFaceMedia, LegacyLMArena, LMArenaBeta,
|
||||
PROVIDERS_LIST_3 = [
|
||||
LambdaChat, DeepInfraChat, HuggingFace, HuggingFaceMedia, LMArenaBeta,
|
||||
PuterJS, Cloudflare, HuggingSpace
|
||||
]
|
||||
|
||||
|
|
@ -112,7 +101,7 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
|||
|
||||
@classmethod
|
||||
def create_model_map(cls):
|
||||
cls.audio_models = {}
|
||||
cls.audio_models = []
|
||||
cls.image_models = []
|
||||
cls.vision_models = []
|
||||
cls.video_models = []
|
||||
|
|
@ -132,7 +121,7 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
|||
cls.image_models.append(name)
|
||||
|
||||
# Process special providers
|
||||
for provider in PROVIERS_LIST_2:
|
||||
for provider in PROVIDERS_LIST_2:
|
||||
if not provider.working:
|
||||
continue
|
||||
try:
|
||||
|
|
@ -172,7 +161,7 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
|||
if hasattr(provider, 'video_models'):
|
||||
cls.video_models.extend(provider.video_models)
|
||||
|
||||
for provider in PROVIERS_LIST_3:
|
||||
for provider in PROVIDERS_LIST_3:
|
||||
if not provider.working:
|
||||
continue
|
||||
try:
|
||||
|
|
@ -205,15 +194,16 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
|||
cls.video_models.extend(provider.video_models)
|
||||
cls.video_models.extend([clean_name(model) for model in provider.video_models])
|
||||
|
||||
for provider in PROVIERS_LIST_1:
|
||||
if provider.working:
|
||||
for provider in Provider.__providers__:
|
||||
if provider.working and hasattr(provider, "get_models") and provider not in [AnyProvider, Custom, PollinationsImage, OpenaiAccount]:
|
||||
for model in provider.get_models():
|
||||
if model in cls.model_map:
|
||||
cls.model_map[model].update({provider.__name__: model})
|
||||
clean = clean_name(model)
|
||||
if clean in cls.model_map:
|
||||
cls.model_map[clean].update({provider.__name__: model})
|
||||
for alias, model in provider.model_aliases.items():
|
||||
if alias in cls.model_map:
|
||||
cls.model_map[alias].update({provider.__name__: model})
|
||||
if provider.__name__ == "GeminiPro":
|
||||
if provider == GeminiPro:
|
||||
for model in cls.model_map.keys():
|
||||
if "gemini" in model or "gemma" in model:
|
||||
cls.model_map[alias].update({provider.__name__: model})
|
||||
|
|
@ -230,11 +220,11 @@ class AnyModelProviderMixin(ProviderModelMixin):
|
|||
|
||||
cls.video_models.append("video")
|
||||
cls.model_map["video"] = {"Video": "video"}
|
||||
cls.audio_models = list(cls.audio_models.keys())
|
||||
cls.audio_models = [*cls.audio_models]
|
||||
|
||||
# Create a mapping of parent providers to their children
|
||||
cls.parents = {}
|
||||
for provider in PROVIERS_LIST_1:
|
||||
for provider in Provider.__providers__:
|
||||
if provider.working and provider.__name__ != provider.get_parent():
|
||||
if provider.get_parent() not in cls.parents:
|
||||
cls.parents[provider.get_parent()] = [provider.__name__]
|
||||
|
|
@ -393,7 +383,7 @@ class AnyProvider(AsyncGeneratorProvider, AnyModelProviderMixin):
|
|||
provider.model_aliases[model] = alias
|
||||
providers.append(provider)
|
||||
if not providers:
|
||||
for provider in PROVIERS_LIST_1:
|
||||
for provider in PROVIDERS_LIST_2 + PROVIDERS_LIST_3:
|
||||
if model in provider.get_models():
|
||||
providers.append(provider)
|
||||
elif model in provider.model_aliases:
|
||||
|
|
|
|||
|
|
@ -226,8 +226,8 @@ async def sse_stream(iter_lines: Iterator[bytes]) -> AsyncIterator[dict]:
|
|||
elif hasattr(iter_lines, "iter_lines"):
|
||||
iter_lines = iter_lines.iter_lines()
|
||||
async for line in iter_lines:
|
||||
if line.startswith(b"data: "):
|
||||
rest = line[6:].strip()
|
||||
if line.startswith(b"data:"):
|
||||
rest = line[5:].strip()
|
||||
if not rest:
|
||||
continue
|
||||
if rest.startswith(b"[DONE]"):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue