diff --git a/example.env b/example.env index 7ddcffa0..a640ba7c 100644 --- a/example.env +++ b/example.env @@ -1,6 +1,9 @@ # Rename this file to .env and copy it to your cookies directory # Update the API_KEY and other variables as needed +G4F_API_KEY= +G4F_PROXY= + HUUGINGFACE_API_KEY= POLLINATIONS_API_KEY= GEMINI_API_KEY= @@ -9,19 +12,6 @@ DEEPINFRA_API_KEY= OPENAI_API_KEY= GROQ_API_KEY= OPENROUTER_API_KEY= -AZURE_API_KEYS='{ - "default": "", - "flux-1.1-pro": "", - "flux.1-kontext-pro": "" -}' -AZURE_DEFAULT_MODEL="model-router" -AZURE_ROUTES='{ - "model-router": "https://HOST.cognitiveservices.azure.com/openai/deployments/model-router/chat/completions?api-version=2025-01-01-preview", - "deepseek-r1": "https://HOST.services.ai.azure.com/models/chat/completions?api-version=2024-05-01-preview", - "gpt-4.1": "https://HOST.cognitiveservices.azure.com/openai/deployments/gpt-4.1/chat/completions?api-version=2025-01-01-preview", - "gpt-4o-mini-audio-preview": "https://HOST.cognitiveservices.azure.com/openai/deployments/gpt-4o-mini-audio-preview/chat/completions?api-version=2025-01-01-preview", - "o4-mini": "https://HOST.cognitiveservices.azure.com/openai/deployments/o4-mini/chat/completions?api-version=2025-01-01-preview", - "grok-3": "https://HOST.services.ai.azure.com/models/chat/completions?api-version=2024-05-01-preview", - "flux-1.1-pro": "https://HOST.cognitiveservices.azure.com/openai/deployments/FLUX-1.1-pro/images/generations?api-version=2025-04-01-preview", - "flux.1-kontext-pro": "https://HOST.services.ai.azure.com/openai/deployments/FLUX.1-Kontext-pro/images/edits?api-version=2025-04-01-preview" -}' \ No newline at end of file +OLLAMA_API_KEY= +NVIDIA_API_KEY= +PUTER_API_KEY= \ No newline at end of file diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py index 56c6e031..e56c0bb8 100644 --- a/g4f/Provider/EasyChat.py +++ b/g4f/Provider/EasyChat.py @@ -93,7 +93,7 @@ class EasyChat(OpenaiTemplate, AuthFileMixin): await asyncio.sleep(1) if cls.captchaToken: break - cls.guestId = await page.js_dumps('JSON.parse(localStorage.getItem("user-info") || "{}")?.state?.guestId') + cls.guestId = await page.evaluate('"" + JSON.parse(localStorage.getItem("user-info") || "{}")?.state?.guestId') await asyncio.sleep(3) if cache_file.exists(): with cache_file.open("r") as f: diff --git a/g4f/Provider/deprecated/LegacyLMArena.py b/g4f/Provider/deprecated/LegacyLMArena.py index e2733a5d..e524573f 100644 --- a/g4f/Provider/deprecated/LegacyLMArena.py +++ b/g4f/Provider/deprecated/LegacyLMArena.py @@ -21,7 +21,7 @@ class LegacyLMArena(AsyncGeneratorProvider, ProviderModelMixin): url = "https://legacy.lmarena.ai" api_endpoint = "/queue/join?" - working = True + working = False default_model = "chatgpt-4o-latest-20250326" models = [] diff --git a/g4f/Provider/deprecated/har/__init__.py b/g4f/Provider/deprecated/har/__init__.py index 1898e51d..8e40c8a6 100644 --- a/g4f/Provider/deprecated/har/__init__.py +++ b/g4f/Provider/deprecated/har/__init__.py @@ -23,7 +23,7 @@ class HarProvider(AsyncAuthedProvider, ProviderModelMixin): label = "LMArena (Har)" url = "https://legacy.lmarena.ai" api_endpoint = "/queue/join?" - working = True + working = False active_by_default = True default_model = LegacyLMArena.default_model diff --git a/g4f/providers/any_provider.py b/g4f/providers/any_provider.py index a31ad4fc..32f4fdcd 100644 --- a/g4f/providers/any_provider.py +++ b/g4f/providers/any_provider.py @@ -9,34 +9,23 @@ from ..image import is_data_an_audio from ..providers.retry_provider import RotatedProvider from ..Provider.needs_auth import OpenaiChat, CopilotAccount from ..Provider.hf_space import HuggingSpace -from ..Provider import Copilot, Cloudflare, Gemini, GeminiPro, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS -from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer0501, OIVSCodeSer2, TeachAnything, OperaAria, Startnest -from ..Provider import WeWordle, Yqcloud, Chatai, ImageLabs, LegacyLMArena, LMArenaBeta, Free2GPT -from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM -from ..Provider import HarProvider, HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree +from ..Provider import Custom, PollinationsImage, OpenaiAccount, Copilot, Cloudflare, Gemini, Grok, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS +from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, LMArenaBeta, EdgeTTS, gTTS, MarkItDown, OpenAIFM +from ..Provider import HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree, GeminiPro from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .. import Provider from .. import models from .. import debug from .any_model_map import audio_models, image_models, vision_models, video_models, model_map, models_count, parents, model_aliases -# Add all model aliases to the model map -PROVIERS_LIST_1 = [ - CopilotAccount, OpenaiChat, Cloudflare, PerplexityLabs, Gemini, Grok, DeepSeekAPI, Blackbox, OpenAIFM, - OIVSCodeSer2, OIVSCodeSer0501, TeachAnything, WeWordle, Yqcloud, Chatai, Free2GPT, ImageLabs, - # Has lazy loading model lists - PollinationsAI, HarProvider, LegacyLMArena, LMArenaBeta, LambdaChat, DeepInfraChat, - HuggingSpace, HuggingFace, HuggingFaceMedia, GeminiPro, PuterJS, OperaAria, Startnest -] - # Add providers to existing models on map -PROVIERS_LIST_2 = [ +PROVIDERS_LIST_2 = [ OpenaiChat, Copilot, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok, Azure, Qwen, EasyChat, GLM, OpenRouterFree ] # Add all models to the model map -PROVIERS_LIST_3 = [ - HarProvider, LambdaChat, DeepInfraChat, HuggingFace, HuggingFaceMedia, LegacyLMArena, LMArenaBeta, +PROVIDERS_LIST_3 = [ + LambdaChat, DeepInfraChat, HuggingFace, HuggingFaceMedia, LMArenaBeta, PuterJS, Cloudflare, HuggingSpace ] @@ -112,7 +101,7 @@ class AnyModelProviderMixin(ProviderModelMixin): @classmethod def create_model_map(cls): - cls.audio_models = {} + cls.audio_models = [] cls.image_models = [] cls.vision_models = [] cls.video_models = [] @@ -132,7 +121,7 @@ class AnyModelProviderMixin(ProviderModelMixin): cls.image_models.append(name) # Process special providers - for provider in PROVIERS_LIST_2: + for provider in PROVIDERS_LIST_2: if not provider.working: continue try: @@ -172,7 +161,7 @@ class AnyModelProviderMixin(ProviderModelMixin): if hasattr(provider, 'video_models'): cls.video_models.extend(provider.video_models) - for provider in PROVIERS_LIST_3: + for provider in PROVIDERS_LIST_3: if not provider.working: continue try: @@ -205,15 +194,16 @@ class AnyModelProviderMixin(ProviderModelMixin): cls.video_models.extend(provider.video_models) cls.video_models.extend([clean_name(model) for model in provider.video_models]) - for provider in PROVIERS_LIST_1: - if provider.working: + for provider in Provider.__providers__: + if provider.working and hasattr(provider, "get_models") and provider not in [AnyProvider, Custom, PollinationsImage, OpenaiAccount]: for model in provider.get_models(): - if model in cls.model_map: - cls.model_map[model].update({provider.__name__: model}) + clean = clean_name(model) + if clean in cls.model_map: + cls.model_map[clean].update({provider.__name__: model}) for alias, model in provider.model_aliases.items(): if alias in cls.model_map: cls.model_map[alias].update({provider.__name__: model}) - if provider.__name__ == "GeminiPro": + if provider == GeminiPro: for model in cls.model_map.keys(): if "gemini" in model or "gemma" in model: cls.model_map[alias].update({provider.__name__: model}) @@ -230,11 +220,11 @@ class AnyModelProviderMixin(ProviderModelMixin): cls.video_models.append("video") cls.model_map["video"] = {"Video": "video"} - cls.audio_models = list(cls.audio_models.keys()) + cls.audio_models = [*cls.audio_models] # Create a mapping of parent providers to their children cls.parents = {} - for provider in PROVIERS_LIST_1: + for provider in Provider.__providers__: if provider.working and provider.__name__ != provider.get_parent(): if provider.get_parent() not in cls.parents: cls.parents[provider.get_parent()] = [provider.__name__] @@ -393,7 +383,7 @@ class AnyProvider(AsyncGeneratorProvider, AnyModelProviderMixin): provider.model_aliases[model] = alias providers.append(provider) if not providers: - for provider in PROVIERS_LIST_1: + for provider in PROVIDERS_LIST_2 + PROVIDERS_LIST_3: if model in provider.get_models(): providers.append(provider) elif model in provider.model_aliases: diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py index 96a9ee88..8fc39872 100644 --- a/g4f/requests/__init__.py +++ b/g4f/requests/__init__.py @@ -226,8 +226,8 @@ async def sse_stream(iter_lines: Iterator[bytes]) -> AsyncIterator[dict]: elif hasattr(iter_lines, "iter_lines"): iter_lines = iter_lines.iter_lines() async for line in iter_lines: - if line.startswith(b"data: "): - rest = line[6:].strip() + if line.startswith(b"data:"): + rest = line[5:].strip() if not rest: continue if rest.startswith(b"[DONE]"):