diff --git a/etc/tool/create_provider.py b/etc/tool/create_provider.py index 319676b6..57751937 100644 --- a/etc/tool/create_provider.py +++ b/etc/tool/create_provider.py @@ -12,6 +12,7 @@ g4f.debug.logging = True def read_code(text): if match := re.search(r"```(python|py|)\n(?P[\S\s]+?)\n```", text): return match.group("code") + return text def input_command(): print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.") @@ -119,7 +120,8 @@ And replace "gpt-3.5-turbo" with `model`. stream=True, ): print(chunk, end="", flush=True) - response.append(chunk) + if not isinstance(chunk, Exception): + response.append(str(chunk)) print() response = "".join(response) diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py index 2e0197e4..a06aa72e 100644 --- a/g4f/Provider/Cloudflare.py +++ b/g4f/Provider/Cloudflare.py @@ -3,7 +3,7 @@ from __future__ import annotations import asyncio import json -from ..typing import AsyncResult, Messages, Cookies +from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin, get_running_loop from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies from ..requests import DEFAULT_HEADERS, has_nodriver, has_curl_cffi diff --git a/g4f/Provider/LMArenaBeta.py b/g4f/Provider/LMArenaBeta.py new file mode 100644 index 00000000..e97054ea --- /dev/null +++ b/g4f/Provider/LMArenaBeta.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +import time +import uuid +import json +import asyncio + +from ..typing import AsyncResult, Messages +from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies +from ..requests import DEFAULT_HEADERS, has_nodriver +from ..errors import ModelNotFoundError +from ..providers.response import FinishReason, Usage, JsonConversation, ImageResponse +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin,AuthFileMixin +from .helper import get_last_user_message +from .. import debug + +models = [{"id":"7a55108b-b997-4cff-a72f-5aa83beee918","publicName":"gemini-2.0-flash-001","organization":"google","provider":"google","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"f44e280a-7914-43ca-a25d-ecfcc5d48d09","publicName":"claude-3-5-sonnet-20241022","organization":"anthropic","provider":"anthropic","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"bd2c8278-af7a-4ec3-84db-0a426c785564","publicName":"grok-3-preview-02-24","organization":"xai","provider":"xaiPrivate","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"9513524d-882e-4350-b31e-e4584440c2c8","publicName":"chatgpt-4o-latest-20250326","organization":"openai","provider":"openai","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"ce2092c1-28d4-4d42-a1e0-6b061dfe0b20","publicName":"gemini-2.5-flash-preview-05-20","organization":"google","provider":"google","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"49bd7403-c7fd-4d91-9829-90a91906ad6c","publicName":"llama-4-maverick-03-26-experimental","organization":"meta","provider":"meta","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"14e9311c-94d2-40c2-8c54-273947e208b0","publicName":"gpt-4.1-2025-04-14","organization":"openai","provider":"openai","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"885976d3-d178-48f5-a3f4-6e13e0718872","publicName":"qwq-32b","organization":"alibaba","provider":"alibaba","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"aba0d185-6e8d-4cec-9933-20bc2ca3112a","publicName":"folsom-exp-v1.5","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"69bbf7d4-9f44-447e-a868-abc4f7a31810","publicName":"gemini-2.0-flash-preview-image-generation","organization":"google","provider":"google","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"c5a11495-081a-4dc6-8d9a-64a4fd6f7bbc","publicName":"claude-3-7-sonnet-20250219","organization":"anthropic","provider":"anthropic","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"ee116d12-64d6-48a8-88e5-b2d06325cdd2","publicName":"claude-opus-4-20250514","organization":"anthropic","provider":"anthropic","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"789e245f-eafe-4c72-b563-d135e93988fc","publicName":"gemma-3-27b-it","organization":"google","provider":"google","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"3e271620-a2b8-44a1-b0f8-8cffc29f3c0d","publicName":"stephen","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"f6fbf06c-532c-4c8a-89c7-f3ddcfb34bd1","publicName":"claude-3-5-haiku-20241022","organization":"anthropic","provider":"anthropic","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"7699c8d4-0742-42f9-a117-d10e84688dab","publicName":"grok-3-mini-beta","organization":"xai","provider":"xaiPublic","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"2c59ec43-86fc-4bcf-b63f-0f76cd06c79d","publicName":"bagel","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"0f785ba1-efcb-472d-961e-69f7b251c7e3","publicName":"command-a-03-2025","organization":"cohere","provider":"cohere","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"30ab90f5-e020-4f83-aff5-f750d2e78769","publicName":"deepseek-r1-0528","organization":"deepseek","provider":"deepseek","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"6a5437a7-c786-467b-b701-17b0bc8c8231","publicName":"gpt-4.1-mini-2025-04-14","organization":"openai","provider":"openai","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"a14546b5-d78d-4cf6-bb61-ab5b8510a9d6","publicName":"amazon.nova-pro-v1:0","organization":"amazon","provider":"amazonBedrock","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"c680645e-efac-4a81-b0af-da16902b2541","publicName":"o3-mini","organization":"openai","provider":"openai","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"be98fcfd-345c-4ae1-9a82-a19123ebf1d2","publicName":"claude-3-7-sonnet-20250219-thinking-32k","organization":"anthropic","provider":"anthropic","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"cb0f1e24-e8e9-4745-aabc-b926ffde7475","publicName":"o3-2025-04-16","organization":"openai","provider":"openai","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"f1102bbf-34ca-468f-a9fc-14bcf63f315b","publicName":"o4-mini-2025-04-16","organization":"openai","provider":"openai","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"7fff29a7-93cc-44ab-b685-482c55ce4fa6","publicName":"gemini-2.5-flash-preview-04-17","organization":"google","provider":"google","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"27b9f8c6-3ee1-464a-9479-a8b3c2a48fd4","publicName":"mistral-medium-2505","organization":"mistral","provider":"mistral","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"b5ad3ab7-fc56-4ecd-8921-bd56b55c1159","publicName":"llama-4-maverick-17b-128e-instruct","organization":"meta","provider":"fireworks","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"51ad1d79-61e2-414c-99e3-faeb64bb6b1b","publicName":"imagen-3.0-generate-002","organization":"google","provider":"googleVertex","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"2f5253e4-75be-473c-bcfc-baeb3df0f8ad","publicName":"deepseek-v3-0324","organization":"deepseek","provider":"fireworks","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"6e855f13-55d7-4127-8656-9168a9f4dcc0","publicName":"gpt-image-1","organization":"openai","provider":"customOpenai","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"34ee5a83-8d85-4d8b-b2c1-3b3413e9ed98","publicName":"ideogram-v2","organization":"Ideogram","provider":"replicate","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"17e31227-36d7-4a7a-943a-7ebffa3a00eb","publicName":"photon","organization":"luma-ai","provider":"replicate","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"ac44dd10-0666-451c-b824-386ccfea7bcc","publicName":"claude-sonnet-4-20250514","organization":"anthropic","provider":"anthropic","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"68e498cb-a1b3-45fa-ae84-1b746d48652f","publicName":"X-preview","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"9a066f6a-7205-4325-8d0b-d81cc4b049c0","publicName":"qwen3-30b-a3b","organization":"alibaba","provider":"alibaba","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"fe8003fc-2e5d-4a3f-8f07-c1cff7ba0159","publicName":"qwen-max-2025-01-25","organization":"alibaba","provider":"alibaba","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"2595a594-fa54-4299-97cd-2d7380d21c80","publicName":"qwen3-235b-a22b","organization":"alibaba","provider":"alibaba","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"dcbd7897-5a37-4a34-93f1-76a24c7bb028","publicName":"llama-3.3-70b-instruct","organization":"meta","provider":"fireworks","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"0337ee08-8305-40c0-b820-123ad42b60cf","publicName":"gemini-2.5-pro-preview-05-06","organization":"google","provider":"google","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"e2d9d353-6dbe-4414-bf87-bd289d523726","publicName":"goldmane","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"5b5ad048-73b6-4cc2-a27f-2d2c2c2379a7","publicName":"glm-4-air-250414","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}},{"id":"bb97bc68-131c-4ea4-a59e-03a6252de0d2","publicName":"dall-e-3","organization":"openai","provider":"openai","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"b70ab012-18e7-4d6f-a887-574e05de6c20","publicName":"recraft-v3","organization":"Recraft","provider":"replicate","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"3f4930f2-8898-429b-8c46-1969edfe2e19","publicName":"redsword","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"b5a3aa9e-26ea-47e4-9dee-b348c8eaa132","publicName":"stephen","capabilities":{"inputCapabilities":{"text":True,"image":True},"outputCapabilities":{"text":True}}},{"id":"9e8525b7-fe50-4e50-bf7f-ad1d3d205d3c","publicName":"flux-1.1-pro","organization":"Black Forest Labs","provider":"replicate","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"eb5da04f-9b28-406b-bf06-4539158c66ef","publicName":"anonymous-bot-0514","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"image":{"aspectRatios":["1:1"]}}}},{"id":"1a400d9a-f61c-4bc2-89b4-a9b7e77dff12","publicName":"qwen3-235b-a22b-no-thinking","organization":"alibaba","provider":"alibaba","capabilities":{"inputCapabilities":{"text":True},"outputCapabilities":{"text":True}}}] +text_models = {model["publicName"]: model["id"] for model in models if "text" in model["capabilities"]["outputCapabilities"]} +image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]} + +class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin): + label = "LMArena Beta" + url = "https://beta.lmarena.ai" + api_endpoint = "https://beta.lmarena.ai/api/stream/create-evaluation" + working = True + + default_model = list(text_models.keys())[0] + models = list(text_models) + list(image_models) + image_models = list(image_models) + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + conversation: JsonConversation = None, + proxy: str = None, + **kwargs + ) -> AsyncResult: + cache_file = cls.get_cache_file() + if cache_file.exists() and cache_file.stat().st_mtime > time.time() - 60 * 30: + with cache_file.open("r") as f: + args = json.load(f) + elif has_nodriver: + try: + async def callback(page): + while not await page.evaluate('document.cookie.indexOf("arena-auth-prod-v1") >= 0'): + await asyncio.sleep(1) + await asyncio.sleep(5) + args = await get_args_from_nodriver(cls.url, proxy=proxy, callback=callback) + except (RuntimeError, FileNotFoundError) as e: + debug.log(f"Nodriver is not available: {type(e).__name__}: {e}") + args = {"headers": DEFAULT_HEADERS, "cookies": {}, "impersonate": "chrome"} + else: + args = {"headers": DEFAULT_HEADERS, "cookies": {}, "impersonate": "chrome"} + + # Build the JSON payload + is_image_model = model in image_models + if model in image_models: + model = image_models[model] + elif model in text_models: + model = text_models[model] + else: + raise ModelNotFoundError(f"Model '{model}' is not supported by LMArena Beta.") + userMessageId = str(uuid.uuid4()) + modelAMessageId = str(uuid.uuid4()) + evaluationSessionId = str(uuid.uuid4()) + prompt = get_last_user_message(messages) + data = { + "id": evaluationSessionId, + "mode": "direct", + "modelAId": model, + "userMessageId": userMessageId, + "modelAMessageId": modelAMessageId, + "messages": [ + { + "id": userMessageId, + "role": "user", + "content": prompt, + "experimental_attachments": [], + "parentMessageIds": [] if conversation is None else conversation.message_ids, + "participantPosition": "a", + "modelId": None, + "evaluationSessionId": evaluationSessionId, + "status": "pending", + "failureReason": None + }, + { + "id": modelAMessageId, + "role": "assistant", + "content": "", + "experimental_attachments": [], + "parentMessageIds": [userMessageId], + "participantPosition": "a", + "modelId": model, + "evaluationSessionId": evaluationSessionId, + "status": "pending", + "failureReason": None + } + ], + "modality": "image" if is_image_model else "chat" + } + async with StreamSession(**args) as session: + async with session.post( + cls.api_endpoint, + json=data, + proxy=proxy + ) as response: + await raise_for_status(response) + args["cookies"] = merge_cookies(args["cookies"], response) + async for chunk in response.iter_lines(): + line = chunk.decode() + if line.startswith("af:"): + yield JsonConversation(message_ids=[modelAMessageId]) + elif line.startswith("a0:"): + yield json.loads(line[3:]) + elif line.startswith("a2:"): + yield ImageResponse([image.get("image") for image in json.loads(line[3:])], prompt) + elif line.startswith("ad:"): + finish = json.loads(line[3:]) + if "finishReason" in finish: + yield FinishReason(finish["finishReason"]) + if "usage" in finish: + yield Usage(**finish["usage"]) + + # Save the args to cache file + with cache_file.open("w") as f: + json.dump(args, f) \ No newline at end of file diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index ef25d5f2..a5bb4732 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -458,7 +458,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): yield FinishReason(finish_reason) if reasoning: yield Reasoning(status="Done") - if "action" in kwargs and "tools" not in data and "response_format" not in data: + if kwargs.get("action") == "next": data = { "model": model, "messages": messages + FOLLOWUPS_DEVELOPER_MESSAGE, diff --git a/g4f/Provider/PuterJS.py b/g4f/Provider/PuterJS.py new file mode 100644 index 00000000..6c199927 --- /dev/null +++ b/g4f/Provider/PuterJS.py @@ -0,0 +1,446 @@ +from __future__ import annotations + +import random +import json +import requests +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages, MediaListType +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..providers.response import FinishReason, Usage, Reasoning, ToolCalls +from ..tools.media import render_messages +from ..requests import see_stream, raise_for_status +from ..errors import ResponseError, ModelNotFoundError, MissingAuthError +from . import debug + +class PuterJS(AsyncGeneratorProvider, ProviderModelMixin): + label = "Puter.js" + url = "https://docs.puter.com/playground" + login_url = "https://github.com/HeyPuter/puter-cli" + api_endpoint = "https://api.puter.com/drivers/call" + working = True + needs_auth = True + + default_model = 'gpt-4o' + default_vision_model = default_model + openai_models = [default_vision_model,"gpt-4o-mini", "o1", "o1-mini", "o1-pro", "o3", "o3-mini", "o4-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4.5-preview"] + claude_models = ["claude-3-7-sonnet-20250219", "claude-3-7-sonnet-latest", "claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", "claude-3-5-sonnet-20240620", "claude-3-haiku-20240307"] + mistral_models = ["ministral-3b-2410","ministral-3b-latest","ministral-8b-2410","ministral-8b-latest","open-mistral-7b","mistral-tiny","mistral-tiny-2312","open-mixtral-8x7b","mistral-small","mistral-small-2312","open-mixtral-8x22b","open-mixtral-8x22b-2404","mistral-large-2411","mistral-large-latest","pixtral-large-2411","pixtral-large-latest","mistral-large-pixtral-2411","codestral-2501","codestral-latest","codestral-2412","codestral-2411-rc5","pixtral-12b-2409","pixtral-12b","pixtral-12b-latest","mistral-small-2503","mistral-small-latest"] + xai_models = ["grok-beta", "grok-vision-beta"] + deepseek_models = ["deepseek-chat","deepseek-reasoner"] + gemini_models = ["gemini-1.5-flash","gemini-2.0-flash"] + model_aliases = { + ### mistral_models ### + "mixtral-8x22b": ["open-mixtral-8x22b", "open-mixtral-8x22b-2404"], + "pixtral-large": ["pixtral-large-2411","pixtral-large-latest", "mistral-large-pixtral-2411"], + + ### openrouter_models ### + # llama + "llama-2-70b": "openrouter:meta-llama/llama-2-70b-chat", + "llama-3-8b": "openrouter:meta-llama/llama-3-8b-instruct", + "llama-3-70b": "openrouter:meta-llama/llama-3-70b-instruct", + "llama-3.1-8b": ["openrouter:meta-llama/llama-3.1-8b-instruct:free", "openrouter:meta-llama/llama-3.1-8b-instruct"], + "llama-3.1-70b": "openrouter:meta-llama/llama-3.1-70b-instruct", + "llama-3.1-405b": ["openrouter:meta-llama/llama-3.1-405b:free", "openrouter:meta-llama/llama-3.1-405b", "openrouter:meta-llama/llama-3.1-405b-instruct"], + "llama-3.2-1b": ["openrouter:meta-llama/llama-3.2-1b-instruct:free", "openrouter:meta-llama/llama-3.2-1b-instruct"], + "llama-3.2-3b": ["openrouter:meta-llama/llama-3.2-3b-instruct:free","openrouter:meta-llama/llama-3.2-3b-instruct"], + "llama-3.2-11b": ["openrouter:meta-llama/llama-3.2-11b-vision-instruct:free", "openrouter:meta-llama/llama-3.2-11b-vision-instruct"], + "llama-3.2-90b": "openrouter:meta-llama/llama-3.2-90b-vision-instruct", + "llama-3.3-8b": "openrouter:meta-llama/llama-3.3-8b-instruct:free", + "llama-3.3-70b": ["openrouter:meta-llama/llama-3.3-70b-instruct:free", "openrouter:meta-llama/llama-3.3-70b-instruct"], + "llama-4-maverick": ["openrouter:meta-llama/llama-4-maverick:free", "openrouter:meta-llama/llama-4-maverick"], + "llama-4-scout": ["openrouter:meta-llama/llama-4-scout:free", "openrouter:meta-llama/llama-4-scout"], + + # google (gemini) + "gemini-1.5-flash": ["gemini-1.5-flash", "openrouter:google/gemini-flash-1.5", "gemini-flash-1.5-8b"], + "gemini-1.5-8b-flash": "openrouter:google/gemini-flash-1.5-8b", + "gemini-1.5-pro": "openrouter:google/gemini-pro-1.5", + "gemini-2.0-flash": ["gemini-2.0-flash", "openrouter:google/gemini-2.0-flash-lite-001", "openrouter:google/gemini-2.0-flash-001", "openrouter:google/gemini-2.0-flash-exp:free"], + "gemini-2.5-pro": ["openrouter:google/gemini-2.5-pro-preview", "openrouter:google/gemini-2.5-pro-exp-03-25"], + "gemini-2.5-flash": "openrouter:google/gemini-2.5-flash-preview", + "gemini-2.5-flash-thinking": "openrouter:google/gemini-2.5-flash-preview:thinking", + + # google (gemma) + "gemma-2-9b": ["openrouter:google/gemma-2-9b-it:free","openrouter:google/gemma-2-9b-it"], + "gemma-2-27b": "openrouter:google/gemma-2-27b-it", + "gemma-3-1b": "openrouter:google/gemma-3-1b-it:free", + "gemma-3-4b": ["openrouter:google/gemma-3-4b-it:free", "openrouter:google/gemma-3-4b-it"], + "gemma-3-12b": ["openrouter:google/gemma-3-12b-it:free", "openrouter:google/gemma-3-12b-it"], + "gemma-3-27b": ["openrouter:google/gemma-3-27b-it:free", "openrouter:google/gemma-3-27b-it"], + + # openai (gpt-3.5) + "gpt-3.5-turbo": ["openrouter:openai/gpt-3.5-turbo-0613", "openrouter:openai/gpt-3.5-turbo-1106", "openrouter:openai/gpt-3.5-turbo-0125", "openrouter:openai/gpt-3.5-turbo", "openrouter:openai/gpt-3.5-turbo-instruct", "openrouter:openai/gpt-3.5-turbo-16k"], + + # openai (gpt-4) + "gpt-4": ["openrouter:openai/gpt-4-1106-preview", "openrouter:openai/gpt-4-32k", "openrouter:openai/gpt-4-32k-0314", "openrouter:openai/gpt-4", "openrouter:openai/gpt-4-0314",], + "gpt-4-turbo": ["openrouter:openai/gpt-4-turbo", "openrouter:openai/gpt-4-turbo-preview"], + + # openai (gpt-4o) + "gpt-4o": ["gpt-4o", "openrouter:openai/gpt-4o-2024-08-06", "openrouter:openai/gpt-4o-2024-11-20", "openrouter:openai/chatgpt-4o-latest", "openrouter:openai/gpt-4o", "openrouter:openai/gpt-4o:extended", "openrouter:openai/gpt-4o-2024-05-13",], + "gpt-4o-search": "openrouter:openai/gpt-4o-search-preview", + "gpt-4o-mini": ["gpt-4o-mini", "openrouter:openai/gpt-4o-mini", "openrouter:openai/gpt-4o-mini-2024-07-18"], + "gpt-4o-mini-search": "openrouter:openai/gpt-4o-mini-search-preview", + + # openai (o1) + "o1": ["o1", "openrouter:openai/o1", "openrouter:openai/o1-preview", "openrouter:openai/o1-preview-2024-09-12"], + "o1-mini": ["o1-mini", "openrouter:openai/o1-mini", "openrouter:openai/o1-mini-2024-09-12"], + "o1-pro": ["o1-pro", "openrouter:openai/o1-pro"], + + # openai (o3) + "o3": ["o3", "openrouter:openai/o3"], + "o3-mini": ["o3-mini", "openrouter:openai/o3-mini", "openrouter:openai/o3-mini-high"], + "o3-mini-high": "openrouter:openai/o3-mini-high", + + # openai (o4) + "o4-mini": ["o4-mini", "openrouter:openai/o4-mini"], + "o4-mini-high": "openrouter:openai/o4-mini-high", + + # openai (gpt-4.1) + "gpt-4.1": ["gpt-4.1", "openrouter:openai/gpt-4.1"], + "gpt-4.1-mini": ["gpt-4.1-mini", "openrouter:openai/gpt-4.1-mini"], + "gpt-4.1-nano": ["gpt-4.1-nano", "openrouter:openai/gpt-4.1-nano"], + + # openai (gpt-4.5) + "gpt-4.5": ["gpt-4.5-preview", "openrouter:openai/gpt-4.5-preview"], + + # mistralai + "mistral-large": ["openrouter:mistralai/mistral-large", "openrouter:mistralai/mistral-large-2411", "openrouter:mistralai/mistral-large-2407", "openrouter:mistralai/pixtral-large-2411"], + "mistral-medium": ["openrouter:mistralai/mistral-medium", "openrouter:mistralai/mistral-medium-3"], + "mistral-small": ["mistral-small", "mistral-small-2312", "mistral-small-2503","mistral-small-latest", "openrouter:mistralai/mistral-small", "openrouter:mistralai/mistral-small-3.1-24b-instruct:free", "openrouter:mistralai/mistral-small-3.1-24b-instruct", "openrouter:mistralai/mistral-small-24b-instruct-2501:free", "openrouter:mistralai/mistral-small-24b-instruct-2501"], + "mistral-tiny": ["mistral-tiny", "mistral-tiny-2312", "openrouter:mistralai/mistral-tiny"], + "mistral-7b": ["open-mistral-7b", "openrouter:mistralai/mistral-7b-instruct", "openrouter:mistralai/mistral-7b-instruct:free", "openrouter:mistralai/mistral-7b-instruct-v0.1", "openrouter:mistralai/mistral-7b-instruct-v0.2", "openrouter:mistralai/mistral-7b-instruct-v0.3",], + "mixtral-8x7b": ["open-mixtral-8x7b", "openrouter:mistralai/mixtral-8x7b-instruct"], + "mixtral-8x22b": ["open-mixtral-8x22b", "open-mixtral-8x22b-2404", "openrouter:mistralai/mixtral-8x7b-instruct", "openrouter:mistralai/mixtral-8x22b-instruct"], + "ministral-8b": ["ministral-8b-2410", "ministral-8b-latest", "openrouter:mistral/ministral-8b", "openrouter:mistralai/ministral-8b"], + "mistral-nemo": ["openrouter:mistralai/mistral-nemo:free", "openrouter:mistralai/mistral-nemo"], + "ministral-3b": ["ministral-3b-2410", "ministral-3b-latest", "openrouter:mistralai/ministral-3b"], + "mistral-saba": "openrouter:mistralai/mistral-saba", + "codestral": ["codestral-2501","codestral-latest","codestral-2412","codestral-2411-rc5", "openrouter:mistralai/codestral-2501", "openrouter:mistralai/codestral-mamba"], + "pixtral-12b": ["pixtral-12b-2409","pixtral-12b","pixtral-12b-latest", "openrouter:mistralai/pixtral-12b"], + + # nousresearch + "hermes-2-dpo": "openrouter:nousresearch/nous-hermes-2-mixtral-8x7b-dpo", + "hermes-2-pro": "openrouter:nousresearch/hermes-2-pro-llama-3-8b", + "hermes-3-70b": "openrouter:nousresearch/hermes-3-llama-3.1-70b", + "hermes-3-405b": "openrouter:nousresearch/hermes-3-llama-3.1-405b", + "deephermes-3-8b": "openrouter:nousresearch/deephermes-3-llama-3-8b-preview:free", + "deephermes-3-24b": "openrouter:nousresearch/deephermes-3-mistral-24b-preview:free", + + # microsoft + "phi-3-mini": "openrouter:microsoft/phi-3-mini-128k-instruct", + "phi-3-medium": "openrouter:microsoft/phi-3-medium-128k-instruct", + "phi-3.5-mini": "openrouter:microsoft/phi-3.5-mini-128k-instruct", + "phi-4": "openrouter:microsoft/phi-4", + "phi-4-multimodal": "openrouter:microsoft/phi-4-multimodal-instruct", + "phi-4-reasoning": "openrouter:microsoft/phi-4-reasoning:free", + "phi-4-reasoning-plus": ["openrouter:microsoft/phi-4-reasoning-plus:free", "openrouter:microsoft/phi-4-reasoning-plus"], + + "wizardlm-2-8x22b": "openrouter:microsoft/wizardlm-2-8x22b", + + "mai-ds-r1": "openrouter:microsoft/mai-ds-r1:free", + + # anthropic + "claude-3.7-sonnet": ["claude-3-7-sonnet-20250219", "claude-3-7-sonnet-latest", "openrouter:anthropic/claude-3.7-sonnet", "openrouter:anthropic/claude-3.7-sonnet:beta",], + "claude-3.7-sonnet-thinking": "openrouter:anthropic/claude-3.7-sonnet:thinking", + "claude-3.5-haiku": ["openrouter:anthropic/claude-3.5-haiku:beta", "openrouter:anthropic/claude-3.5-haiku", "openrouter:anthropic/claude-3.5-haiku-20241022:beta", "openrouter:anthropic/claude-3.5-haiku-20241022"], + "claude-3.5-sonnet": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", "claude-3-5-sonnet-20240620", "openrouter:anthropic/claude-3.5-sonnet-20240620:beta", "openrouter:anthropic/claude-3.5-sonnet-20240620", "openrouter:anthropic/claude-3.5-sonnet:beta", "openrouter:anthropic/claude-3.5-sonnet",], + "claude-3-haiku": ["claude-3-haiku-20240307", "openrouter:anthropic/claude-3-haiku:beta", "openrouter:anthropic/claude-3-haiku"], + "claude-3-opus": ["openrouter:anthropic/claude-3-opus:beta", "openrouter:anthropic/claude-3-opus"], + "claude-3-sonnet": ["openrouter:anthropic/claude-3-sonnet:beta", "openrouter:anthropic/claude-3-sonnet"], + "claude-2.1": ["openrouter:anthropic/claude-2.1:beta", "openrouter:anthropic/claude-2.1"], + "claude-2": ["openrouter:anthropic/claude-2:beta", "openrouter:anthropic/claude-2",], + "claude-2.0": ["openrouter:anthropic/claude-2.0:beta", "openrouter:anthropic/claude-2.0"], + + # rekaai + "reka-flash": "openrouter:rekaai/reka-flash-3:free", + + # cohere + "command-r7b": "openrouter:cohere/command-r7b-12-2024", + "command-r-plus": ["openrouter:cohere/command-r-plus-08-2024", "openrouter:cohere/command-r-plus", "openrouter:cohere/command-r-plus-04-2024"], + "command": "openrouter:cohere/command", + "command-r": ["openrouter:cohere/command-r-08-2024", "openrouter:cohere/command-r", "openrouter:cohere/command-r-03-2024"], + "command-a": "openrouter:cohere/command-a", + + # qwen + "qwq-32b": ["openrouter:qwen/qwq-32b-preview", "openrouter:qwen/qwq-32b:free", "openrouter:qwen/qwq-32b"], + "qwen-vl-plus": "openrouter:qwen/qwen-vl-plus", + "qwen-vl-max": "openrouter:qwen/qwen-vl-max", + "qwen-turbo": "openrouter:qwen/qwen-turbo", + "qwen-2.5-vl-72b": ["openrouter:qwen/qwen2.5-vl-72b-instruct:free", "openrouter:qwen/qwen2.5-vl-72b-instruct"], + "qwen-plus": "openrouter:qwen/qwen-plus", + "qwen-max": "openrouter:qwen/qwen-max", + "qwen-2.5-coder-32b": ["openrouter:qwen/qwen-2.5-coder-32b-instruct:free", "openrouter:qwen/qwen-2.5-coder-32b-instruct"], + "qwen-2.5-7b": ["openrouter:qwen/qwen-2.5-7b-instruct:free", "openrouter:qwen/qwen-2.5-7b-instruct"], + "qwen-2.5-72b": ["openrouter:qwen/qwen-2.5-72b-instruct:free", "openrouter:qwen/qwen-2.5-72b-instruct"], + "qwen-2.5-vl-7b": ["openrouter:qwen/qwen-2.5-vl-7b-instruct:free", "openrouter:qwen/qwen-2.5-vl-7b-instruct"], + "qwen-2-72b": "openrouter:qwen/qwen-2-72b-instruct", + "qwen-3-0.6b": "openrouter:qwen/qwen3-0.6b-04-28:free", + "qwen-3-1.7b": "openrouter:qwen/qwen3-1.7b:free", + "qwen-3-4b": "openrouter:qwen/qwen3-4b:free", + "qwen-3-30b": ["openrouter:qwen/qwen3-30b-a3b:free", "openrouter:qwen/qwen3-30b-a3b"], + "qwen-3-8b": ["openrouter:qwen/qwen3-8b:free", "openrouter:qwen/qwen3-8b"], + "qwen-3-14b": ["openrouter:qwen/qwen3-14b:free", "openrouter:qwen/qwen3-14b"], + "qwen-3-32b": ["openrouter:qwen/qwen3-32b:free", "openrouter:qwen/qwen3-32b"], + "qwen-3-235b": ["openrouter:qwen/qwen3-235b-a22b:free", "openrouter:qwen/qwen3-235b-a22b"], + "qwen-2.5-coder-7b": "openrouter:qwen/qwen2.5-coder-7b-instruct", + "qwen-2.5-vl-3b": "openrouter:qwen/qwen2.5-vl-3b-instruct:free", + "qwen-2.5-vl-32b": ["openrouter:qwen/qwen2.5-vl-32b-instruct:free", "openrouter:qwen/qwen2.5-vl-32b-instruct"], + + # deepseek + "deepseek-prover-v2": ["openrouter:deepseek/deepseek-prover-v2:free", "openrouter:deepseek/deepseek-prover-v2"], + "deepseek-v3": "openrouter:deepseek/deepseek-v3-base:free", + "deepseek-v3-0324": ["deepseek-chat", "openrouter:deepseek/deepseek-chat-v3-0324:free", "openrouter:deepseek/deepseek-chat-v3-0324"], + "deepseek-r1-zero": "openrouter:deepseek/deepseek-r1-zero:free", + "deepseek-r1-distill-llama-8b": "openrouter:deepseek/deepseek-r1-distill-llama-8b", + "deepseek-r1-distill-qwen-1.5b": "openrouter:deepseek/deepseek-r1-distill-qwen-1.5b", + "deepseek-r1-distill-qwen-32b": ["openrouter:deepseek/deepseek-r1-distill-qwen-32b:free", "openrouter:deepseek/deepseek-r1-distill-qwen-32b"], + "deepseek-r1-distill-qwen-14b": ["openrouter:deepseek/deepseek-r1-distill-qwen-14b:free","openrouter:deepseek/deepseek-r1-distill-qwen-14b"], + "deepseek-r1-distill-llama-70b": ["openrouter:deepseek/deepseek-r1-distill-llama-70b:free", "openrouter:deepseek/deepseek-r1-distill-llama-70b"], + "deepseek-r1": ["deepseek-reasoner", "openrouter:deepseek/deepseek-r1:free", "openrouter:deepseek/deepseek-r1"], + "deepseek-chat": ["deepseek-chat", "openrouter:deepseek/deepseek-chat:free", "openrouter:deepseek/deepseek-chat"], + "deepseek-coder": ["openrouter:deepseek/deepseek-coder"], + + # inflection + "inflection-3-productivity": "openrouter:inflection/inflection-3-productivity", + "inflection-3-pi": "openrouter:inflection/inflection-3-pi", + + # x-ai + "grok-3-mini": "openrouter:x-ai/grok-3-mini-beta", + "grok-3-beta": "openrouter:x-ai/grok-3-beta", + "grok-2": ["openrouter:x-ai/grok-2-vision-1212", "openrouter:x-ai/grok-2-1212"], + "grok": ["openrouter:x-ai/grok-vision-beta", "openrouter:x-ai/grok-2-vision-1212", "openrouter:x-ai/grok-2-1212", "grok-beta","grok-vision-beta", "openrouter:x-ai/grok-beta", "openrouter:x-ai/grok-3-beta", "openrouter:x-ai/grok-3-mini-beta"], + "grok-beta": ["grok-beta","grok-vision-beta", "openrouter:x-ai/grok-beta", "openrouter:x-ai/grok-3-beta"], + + # perplexity + "sonar-reasoning-pro": "openrouter:perplexity/sonar-reasoning-pro", + "sonar-pro": "openrouter:perplexity/sonar-pro", + "sonar-deep-research": "openrouter:perplexity/sonar-deep-research", + "r1-1776": "openrouter:perplexity/r1-1776", + "sonar-reasoning": "openrouter:perplexity/sonar-reasoning", + "sonar": "openrouter:perplexity/sonar", + "llama-3.1-sonar-small-online": "openrouter:perplexity/llama-3.1-sonar-small-128k-online", + "llama-3.1-sonar-large-online": "openrouter:perplexity/llama-3.1-sonar-large-128k-online", + + # nvidia + "nemotron-49b": ["openrouter:nvidia/llama-3.3-nemotron-super-49b-v1:free", "openrouter:nvidia/llama-3.3-nemotron-super-49b-v1"], + "nemotron-70b": "openrouter:nvidia/llama-3.1-nemotron-70b-instruct", + "nemotron-253b": "openrouter:nvidia/llama-3.1-nemotron-ultra-253b-v1:free", + + # thudm + "glm-4": ["openrouter:thudm/glm-4-32b:free", "openrouter:thudm/glm-4-32b", "openrouter:thudm/glm-4-9b:free",], + "glm-4-32b": ["openrouter:thudm/glm-4-32b:free", "openrouter:thudm/glm-4-32b"], + "glm-z1-32b": ["openrouter:thudm/glm-z1-32b:free", "openrouter:thudm/glm-z1-32b"], + "glm-4-9b": "openrouter:thudm/glm-4-9b:free", + "glm-z1-9b": "openrouter:thudm/glm-z1-9b:free", + "glm-z1-rumination-32b": "openrouter:thudm/glm-z1-rumination-32b", + + # minimax + "minimax": "openrouter:minimax/minimax-01", + + # cognitivecomputations + "dolphin-3.0-r1-24b": "openrouter:cognitivecomputations/dolphin3.0-r1-mistral-24b:free", + "dolphin-3.0-24b": "openrouter:cognitivecomputations/dolphin3.0-mistral-24b:free", + "dolphin-8x22b": "openrouter:cognitivecomputations/dolphin-mixtral-8x22b", + + # agentica-org + "deepcoder-14b": "openrouter:agentica-org/deepcoder-14b-preview:free", + + # moonshotai + "kimi-vl-thinking": "openrouter:moonshotai/kimi-vl-a3b-thinking:free", + "moonlight-16b": "openrouter:moonshotai/moonlight-16b-a3b-instruct:free", + + # featherless + "qwerky-72b": "openrouter:featherless/qwerky-72b:free", + + # liquid + "lfm-7b": "openrouter:liquid/lfm-7b", + "lfm-3b": "openrouter:liquid/lfm-3b", + "lfm-40b": "openrouter:liquid/lfm-40b", + +} + @classmethod + def get_models(cls) -> list[str]: + if not cls.models: + try: + url = "https://api.puter.com/puterai/chat/models/" + cls.models = requests.get(url).json().get("models", []) + except Exception as e: + debug.log(f"PuterJS: Failed to fetch models from API: {e}") + cls.models = list(cls.model_aliases.keys()) + cls.vision_models = [] + for model in cls.models: + for tag in ["vision", "multimodal", "gpt", "o1", "o3", "o4"]: + if tag in model: + cls.vision_models.append(model) + return cls.models + + @staticmethod + def get_driver_for_model(model: str) -> str: + """Determine the appropriate driver based on the model name.""" + if model in PuterJS.openai_models: + return "openai-completion" + elif model in PuterJS.claude_models: + return "claude" + elif model in PuterJS.mistral_models: + return "mistral" + elif model in PuterJS.xai_models: + return "xai" + elif model in PuterJS.deepseek_models: + return "deepseek" + elif model in PuterJS.gemini_models: + return "gemini" + elif "openrouter:" in model: + return "openrouter" + else: + # Default to OpenAI for unknown models + return "openai-completion" + + @classmethod + def get_model(cls, model: str) -> str: + """Get the internal model name from the user-provided model name.""" + + if not model: + return cls.default_model + + # Check if the model exists directly in our models list + if model in cls.models: + return model + + # Check if there's an alias for this model + if model in cls.model_aliases: + alias = cls.model_aliases[model] + # If the alias is a list, randomly select one of the options + if isinstance(alias, list): + selected_model = random.choice(alias) + debug.log(f"PuterJS: Selected model '{selected_model}' from alias '{model}'") + return selected_model + debug.log(f"PuterJS: Using model '{alias}' for alias '{model}'") + return alias + + raise ModelNotFoundError(f"Model {model} not found") + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = True, + api_key: str = None, + media: MediaListType = None, + **kwargs + ) -> AsyncResult: + if not api_key: + raise MissingAuthError("API key is required for Puter.js API") + + # Check if we need to use a vision model + if not model and media is not None and len(media) > 0: + model = cls.default_vision_model + + # Check for image URLs in messages + if not model: + for msg in messages: + if msg["role"] == "user": + content = msg.get("content", "") + if isinstance(content, list): + for item in content: + if item.get("type") == "image_url": + model = cls.default_vision_model + break + + # Get the model name from the user-provided model + try: + model = cls.get_model(model) + except ModelNotFoundError: + pass + + async with ClientSession() as session: + headers = { + "authorization": f"Bearer {api_key}", + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36", + "content-type": "application/json;charset=UTF-8", + # Set appropriate accept header based on stream mode + "accept": "text/event-stream" if stream else "*/*", + "origin": "http://docs.puter.com", + "sec-fetch-site": "cross-site", + "sec-fetch-mode": "cors", + "sec-fetch-dest": "empty", + "referer": "http://docs.puter.com/", + "accept-encoding": "gzip", + "accept-language": "en-US,en;q=0.9" + } + + # Determine the appropriate driver based on the model + driver = cls.get_driver_for_model(model) + + json_data = { + "interface": "puter-chat-completion", + "driver": driver, + "test_mode": False, + "method": "complete", + "args": { + "messages": list(render_messages(messages, media)), + "model": model, + "stream": stream, + **kwargs + } + } + async with session.post( + cls.api_endpoint, + headers=headers, + json=json_data, + proxy=proxy + ) as response: + await raise_for_status(response) + mime_type = response.headers.get("content-type", "") + if mime_type.startswith("text/plain"): + yield await response.text() + return + elif mime_type.startswith("text/event-stream"): + reasoning = False + async for result in see_stream(response.content): + if "error" in result: + raise ResponseError(result["error"].get("message", result["error"])) + if result.get("usage") is not None: + yield Usage(**result["usage"]) + choices = result.get("choices", [{}]) + choice = choices.pop() if choices else {} + content = choice.get("delta", {}).get("content") + if content: + yield content + tool_calls = choice.get("delta", {}).get("tool_calls") + if tool_calls: + yield ToolCalls(choice["delta"]["tool_calls"]) + reasoning_content = choice.get("delta", {}).get("reasoning_content") + if reasoning_content: + reasoning = True + yield Reasoning(reasoning_content) + finish_reason = choice.get("finish_reason") + if finish_reason: + yield FinishReason(finish_reason) + if reasoning: + yield Reasoning(status="Done") + elif mime_type.startswith("application/json"): + result = await response.json() + if "choices" in result: + choice = result["choices"][0] + message = choice.get("message", {}) + content = message.get("content", "") + if content: + yield content + if "tool_calls" in message: + yield ToolCalls(message["tool_calls"]) + else: + raise ResponseError(result) + if result.get("usage") is not None: + yield Usage(**result["usage"]) + finish_reason = choice.get("finish_reason") + if finish_reason: + yield FinishReason(finish_reason) + elif mime_type.startswith("application/x-ndjson"): + async for line in response.content: + data = json.loads(line) + if data.get("type") == "text": + yield data.get("text", "") + else: + raise ResponseError(f"Unexpected content type: {mime_type}") \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index b73b5b68..fdebd17e 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -10,7 +10,7 @@ try: except ImportError as e: debug.error("Deprecated providers not loaded:", e) from .needs_auth import * -from .template import OpenaiTemplate, BackendApi, Puter +from .template import OpenaiTemplate, BackendApi from .hf import HuggingFace, HuggingChat, HuggingFaceAPI, HuggingFaceInference, HuggingFaceMedia from .har import HarProvider try: @@ -53,6 +53,7 @@ from .GizAI import GizAI from .ImageLabs import ImageLabs from .LambdaChat import LambdaChat from .LegacyLMArena import LegacyLMArena +from .LMArenaBeta import LMArenaBeta from .OIVSCodeSer2 import OIVSCodeSer2 from .OIVSCodeSer5 import OIVSCodeSer5 from .OIVSCodeSer0501 import OIVSCodeSer0501 @@ -61,6 +62,7 @@ from .Pi import Pi from .Pizzagpt import Pizzagpt from .PollinationsAI import PollinationsAI from .PollinationsImage import PollinationsImage +from .PuterJS import PuterJS from .TeachAnything import TeachAnything from .You import You from .Websim import Websim diff --git a/g4f/Provider/har/__init__.py b/g4f/Provider/har/__init__.py index 69d18da0..d320e486 100644 --- a/g4f/Provider/har/__init__.py +++ b/g4f/Provider/har/__init__.py @@ -15,7 +15,7 @@ from ..helper import get_last_user_message from ..openai.har_file import get_headers class HarProvider(AsyncGeneratorProvider, ProviderModelMixin): - label = "Legacy LM Arena (Har)" + label = "LMArena (Har)" url = "https://legacy.lmarena.ai" api_endpoint = "/queue/join?" working = True diff --git a/g4f/Provider/hf_space/Qwen_Qwen_3.py b/g4f/Provider/hf_space/Qwen_Qwen_3.py index 1b291a4e..f42ab534 100644 --- a/g4f/Provider/hf_space/Qwen_Qwen_3.py +++ b/g4f/Provider/hf_space/Qwen_Qwen_3.py @@ -126,7 +126,7 @@ class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin): status=update[2].get('options', {}).get('title')) is_thinking = True elif update[2].get('type') == 'text': - yield Reasoning(update[2].get('content')) + yield update[2].get('content') is_thinking = False elif isinstance(update, list) and isinstance(update[1], list) and len( update[1]) > 4: diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index 6aaddcc5..7ef763e6 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -235,7 +235,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin): image_prompt = response_part = None last_content = "" youtube_ids = [] - async for line in response.content: + for line in (await response.text()).split("\n"): try: try: line = json.loads(line) diff --git a/g4f/Provider/not_working/PuterJS.py b/g4f/Provider/not_working/PuterJS.py deleted file mode 100644 index 298502e3..00000000 --- a/g4f/Provider/not_working/PuterJS.py +++ /dev/null @@ -1,1232 +0,0 @@ -from __future__ import annotations - -import json -import time -import re -import random -import json -import uuid -import asyncio -from typing import Optional, Dict, Any -from aiohttp import ClientSession - -from ...typing import AsyncResult, Messages, MediaListType -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ...providers.response import FinishReason, JsonConversation -from ...image import to_data_uri -from ...errors import RateLimitError, ModelNotFoundError -from ... import debug - -class AuthData: - """ - Stores authentication data for a specific model. - """ - def __init__(self): - self.auth_token: Optional[str] = None - self.app_token: Optional[str] = None - self.created_at: float = time.time() - self.tokens_valid: bool = False - self.rate_limited_until: float = 0 - - def is_valid(self, expiration_time: int) -> bool: - """Check if the tokens are still valid based on expiration time.""" - return (self.auth_token and self.app_token and - self.tokens_valid and - time.time() - self.created_at < expiration_time) - - def invalidate(self): - """Mark tokens as invalid.""" - self.tokens_valid = False - - def set_rate_limit(self, seconds: int = 60): - """Set rate limit expiry time.""" - self.rate_limited_until = time.time() + seconds - - def is_rate_limited(self) -> bool: - """Check if currently rate limited.""" - return time.time() < self.rate_limited_until - - -class Conversation(JsonConversation): - """ - Stores conversation state and authentication tokens for PuterJS provider. - Maintains separate authentication data for different models. - """ - message_history: Messages = [] - - def __init__(self, model: str): - self.model = model - self.message_history = [] - # Authentication tokens by specific model - self._auth_data: Dict[str, AuthData] = {} - - def get_auth_for_model(self, model: str, provider: PuterJS) -> AuthData: - """Get the authentication data for a specific model.""" - # Create auth data for this model if it doesn't exist - if model not in self._auth_data: - self._auth_data[model] = AuthData() - - return self._auth_data[model] - - # Override get_dict to exclude auth_data - def get_dict(self) -> Dict[str, Any]: - """Return a dictionary representation for JSON serialization.""" - return { - "model": self.model, - "message_history": self.message_history - } - - # Override __getstate__ for pickle serialization - def __getstate__(self) -> Dict[str, Any]: - """Support for pickle serialization.""" - # Include auth_data for pickle serialization - state = self.__dict__.copy() - return state - - # Override __setstate__ for pickle deserialization - def __setstate__(self, state: Dict[str, Any]) -> None: - """Support for pickle deserialization.""" - self.__dict__.update(state) - - -class PuterJS(AsyncGeneratorProvider, ProviderModelMixin): - label = "Puter.js" - url = "https://docs.puter.com/playground" - api_endpoint = "https://api.puter.com/drivers/call" - - working = False - needs_auth = False - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'gpt-4o' - default_vision_model = default_model - # https://api.puter.com/puterai/chat/models/ - openai_models = [default_vision_model,"gpt-4o-mini", "o1", "o1-mini", "o1-pro", "o3", "o3-mini", "o4-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4.5-preview"] - claude_models = ["claude-3-7-sonnet-20250219", "claude-3-7-sonnet-latest", "claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", "claude-3-5-sonnet-20240620", "claude-3-haiku-20240307"] - mistral_models = ["ministral-3b-2410","ministral-3b-latest","ministral-8b-2410","ministral-8b-latest","open-mistral-7b","mistral-tiny","mistral-tiny-2312","open-mixtral-8x7b","mistral-small","mistral-small-2312","open-mixtral-8x22b","open-mixtral-8x22b-2404","mistral-large-2411","mistral-large-latest","pixtral-large-2411","pixtral-large-latest","mistral-large-pixtral-2411","codestral-2501","codestral-latest","codestral-2412","codestral-2411-rc5","pixtral-12b-2409","pixtral-12b","pixtral-12b-latest","mistral-small-2503","mistral-small-latest"] - xai_models = ["grok-beta", "grok-vision-beta"] - deepseek_models = ["deepseek-chat","deepseek-reasoner"] - gemini_models = ["gemini-1.5-flash","gemini-2.0-flash"] - openrouter_models = ["openrouter:meta-llama/llama-3.3-8b-instruct:free","openrouter:nousresearch/deephermes-3-mistral-24b-preview:free","openrouter:mistralai/mistral-medium-3","openrouter:google/gemini-2.5-pro-preview","openrouter:arcee-ai/caller-large","openrouter:arcee-ai/spotlight","openrouter:arcee-ai/maestro-reasoning","openrouter:arcee-ai/virtuoso-large","openrouter:arcee-ai/coder-large","openrouter:arcee-ai/virtuoso-medium-v2","openrouter:arcee-ai/arcee-blitz","openrouter:microsoft/phi-4-reasoning-plus:free","openrouter:microsoft/phi-4-reasoning-plus","openrouter:microsoft/phi-4-reasoning:free","openrouter:qwen/qwen3-0.6b-04-28:free","openrouter:inception/mercury-coder-small-beta","openrouter:qwen/qwen3-1.7b:free","openrouter:qwen/qwen3-4b:free","openrouter:opengvlab/internvl3-14b:free","openrouter:opengvlab/internvl3-2b:free","openrouter:deepseek/deepseek-prover-v2:free","openrouter:deepseek/deepseek-prover-v2","openrouter:meta-llama/llama-guard-4-12b","openrouter:qwen/qwen3-30b-a3b:free","openrouter:qwen/qwen3-30b-a3b","openrouter:qwen/qwen3-8b:free","openrouter:qwen/qwen3-8b","openrouter:qwen/qwen3-14b:free","openrouter:qwen/qwen3-14b","openrouter:qwen/qwen3-32b:free","openrouter:qwen/qwen3-32b","openrouter:qwen/qwen3-235b-a22b:free","openrouter:qwen/qwen3-235b-a22b","openrouter:tngtech/deepseek-r1t-chimera:free","openrouter:thudm/glm-z1-rumination-32b","openrouter:thudm/glm-z1-9b:free","openrouter:thudm/glm-4-9b:free","openrouter:microsoft/mai-ds-r1:free","openrouter:thudm/glm-z1-32b:free","openrouter:thudm/glm-z1-32b","openrouter:thudm/glm-4-32b:free","openrouter:thudm/glm-4-32b","openrouter:google/gemini-2.5-flash-preview","openrouter:google/gemini-2.5-flash-preview:thinking","openrouter:openai/o4-mini-high","openrouter:openai/o3","openrouter:openai/o4-mini","openrouter:shisa-ai/shisa-v2-llama3.3-70b:free","openrouter:qwen/qwen2.5-coder-7b-instruct","openrouter:openai/gpt-4.1","openrouter:openai/gpt-4.1-mini","openrouter:openai/gpt-4.1-nano","openrouter:eleutherai/llemma_7b","openrouter:alfredpros/codellama-7b-instruct-solidity","openrouter:arliai/qwq-32b-arliai-rpr-v1:free","openrouter:agentica-org/deepcoder-14b-preview:free","openrouter:moonshotai/kimi-vl-a3b-thinking:free","openrouter:x-ai/grok-3-mini-beta","openrouter:x-ai/grok-3-beta","openrouter:nvidia/llama-3.3-nemotron-super-49b-v1:free","openrouter:nvidia/llama-3.3-nemotron-super-49b-v1","openrouter:nvidia/llama-3.1-nemotron-ultra-253b-v1:free","openrouter:meta-llama/llama-4-maverick:free","openrouter:meta-llama/llama-4-maverick","openrouter:meta-llama/llama-4-scout:free","openrouter:meta-llama/llama-4-scout","openrouter:all-hands/openhands-lm-32b-v0.1","openrouter:mistral/ministral-8b","openrouter:deepseek/deepseek-v3-base:free","openrouter:scb10x/llama3.1-typhoon2-8b-instruct","openrouter:scb10x/llama3.1-typhoon2-70b-instruct","openrouter:bytedance-research/ui-tars-72b:free","openrouter:qwen/qwen2.5-vl-3b-instruct:free","openrouter:google/gemini-2.5-pro-exp-03-25","openrouter:qwen/qwen2.5-vl-32b-instruct:free","openrouter:qwen/qwen2.5-vl-32b-instruct","openrouter:deepseek/deepseek-chat-v3-0324:free","openrouter:deepseek/deepseek-chat-v3-0324","openrouter:featherless/qwerky-72b:free","openrouter:openai/o1-pro","openrouter:mistralai/mistral-small-3.1-24b-instruct:free","openrouter:mistralai/mistral-small-3.1-24b-instruct","openrouter:open-r1/olympiccoder-32b:free","openrouter:google/gemma-3-1b-it:free","openrouter:google/gemma-3-4b-it:free","openrouter:google/gemma-3-4b-it","openrouter:ai21/jamba-1.6-large","openrouter:ai21/jamba-1.6-mini","openrouter:google/gemma-3-12b-it:free","openrouter:google/gemma-3-12b-it","openrouter:cohere/command-a","openrouter:openai/gpt-4o-mini-search-preview","openrouter:openai/gpt-4o-search-preview","openrouter:rekaai/reka-flash-3:free","openrouter:google/gemma-3-27b-it:free","openrouter:google/gemma-3-27b-it","openrouter:thedrummer/anubis-pro-105b-v1","openrouter:thedrummer/skyfall-36b-v2","openrouter:microsoft/phi-4-multimodal-instruct","openrouter:perplexity/sonar-reasoning-pro","openrouter:perplexity/sonar-pro","openrouter:perplexity/sonar-deep-research","openrouter:deepseek/deepseek-r1-zero:free","openrouter:qwen/qwq-32b:free","openrouter:qwen/qwq-32b","openrouter:moonshotai/moonlight-16b-a3b-instruct:free","openrouter:nousresearch/deephermes-3-llama-3-8b-preview:free","openrouter:openai/gpt-4.5-preview","openrouter:google/gemini-2.0-flash-lite-001","openrouter:anthropic/claude-3.7-sonnet","openrouter:anthropic/claude-3.7-sonnet:thinking","openrouter:anthropic/claude-3.7-sonnet:beta","openrouter:perplexity/r1-1776","openrouter:mistralai/mistral-saba","openrouter:cognitivecomputations/dolphin3.0-r1-mistral-24b:free","openrouter:cognitivecomputations/dolphin3.0-mistral-24b:free","openrouter:meta-llama/llama-guard-3-8b","openrouter:openai/o3-mini-high","openrouter:deepseek/deepseek-r1-distill-llama-8b","openrouter:google/gemini-2.0-flash-001","openrouter:qwen/qwen-vl-plus","openrouter:aion-labs/aion-1.0","openrouter:aion-labs/aion-1.0-mini","openrouter:aion-labs/aion-rp-llama-3.1-8b","openrouter:qwen/qwen-vl-max","openrouter:qwen/qwen-turbo","openrouter:qwen/qwen2.5-vl-72b-instruct:free","openrouter:qwen/qwen2.5-vl-72b-instruct","openrouter:qwen/qwen-plus","openrouter:qwen/qwen-max","openrouter:openai/o3-mini","openrouter:deepseek/deepseek-r1-distill-qwen-1.5b","openrouter:mistralai/mistral-small-24b-instruct-2501:free","openrouter:mistralai/mistral-small-24b-instruct-2501","openrouter:deepseek/deepseek-r1-distill-qwen-32b:free","openrouter:deepseek/deepseek-r1-distill-qwen-32b","openrouter:deepseek/deepseek-r1-distill-qwen-14b:free","openrouter:deepseek/deepseek-r1-distill-qwen-14b","openrouter:perplexity/sonar-reasoning","openrouter:perplexity/sonar","openrouter:liquid/lfm-7b","openrouter:liquid/lfm-3b","openrouter:deepseek/deepseek-r1-distill-llama-70b:free","openrouter:deepseek/deepseek-r1-distill-llama-70b","openrouter:deepseek/deepseek-r1:free","openrouter:deepseek/deepseek-r1","openrouter:minimax/minimax-01","openrouter:mistralai/codestral-2501","openrouter:microsoft/phi-4","openrouter:deepseek/deepseek-chat:free","openrouter:deepseek/deepseek-chat","openrouter:sao10k/l3.3-euryale-70b","openrouter:openai/o1","openrouter:eva-unit-01/eva-llama-3.33-70b","openrouter:x-ai/grok-2-vision-1212","openrouter:x-ai/grok-2-1212","openrouter:cohere/command-r7b-12-2024","openrouter:google/gemini-2.0-flash-exp:free","openrouter:meta-llama/llama-3.3-70b-instruct:free","openrouter:meta-llama/llama-3.3-70b-instruct","openrouter:amazon/nova-lite-v1","openrouter:amazon/nova-micro-v1","openrouter:amazon/nova-pro-v1","openrouter:qwen/qwq-32b-preview","openrouter:eva-unit-01/eva-qwen-2.5-72b","openrouter:openai/gpt-4o-2024-11-20","openrouter:mistralai/mistral-large-2411","openrouter:mistralai/mistral-large-2407","openrouter:mistralai/pixtral-large-2411","openrouter:x-ai/grok-vision-beta","openrouter:infermatic/mn-inferor-12b","openrouter:qwen/qwen-2.5-coder-32b-instruct:free","openrouter:qwen/qwen-2.5-coder-32b-instruct","openrouter:raifle/sorcererlm-8x22b","openrouter:eva-unit-01/eva-qwen-2.5-32b","openrouter:thedrummer/unslopnemo-12b","openrouter:anthropic/claude-3.5-haiku:beta","openrouter:anthropic/claude-3.5-haiku","openrouter:anthropic/claude-3.5-haiku-20241022:beta","openrouter:anthropic/claude-3.5-haiku-20241022","openrouter:neversleep/llama-3.1-lumimaid-70b","openrouter:anthracite-org/magnum-v4-72b","openrouter:anthropic/claude-3.5-sonnet:beta","openrouter:anthropic/claude-3.5-sonnet","openrouter:x-ai/grok-beta","openrouter:mistralai/ministral-8b","openrouter:mistralai/ministral-3b","openrouter:qwen/qwen-2.5-7b-instruct:free","openrouter:qwen/qwen-2.5-7b-instruct","openrouter:nvidia/llama-3.1-nemotron-70b-instruct","openrouter:inflection/inflection-3-productivity","openrouter:inflection/inflection-3-pi","openrouter:google/gemini-flash-1.5-8b","openrouter:thedrummer/rocinante-12b","openrouter:anthracite-org/magnum-v2-72b","openrouter:liquid/lfm-40b","openrouter:meta-llama/llama-3.2-3b-instruct:free","openrouter:meta-llama/llama-3.2-3b-instruct","openrouter:meta-llama/llama-3.2-1b-instruct:free","openrouter:meta-llama/llama-3.2-1b-instruct","openrouter:meta-llama/llama-3.2-90b-vision-instruct","openrouter:meta-llama/llama-3.2-11b-vision-instruct:free","openrouter:meta-llama/llama-3.2-11b-vision-instruct","openrouter:qwen/qwen-2.5-72b-instruct:free","openrouter:qwen/qwen-2.5-72b-instruct","openrouter:neversleep/llama-3.1-lumimaid-8b","openrouter:openai/o1-preview","openrouter:openai/o1-preview-2024-09-12","openrouter:openai/o1-mini","openrouter:openai/o1-mini-2024-09-12","openrouter:mistralai/pixtral-12b","openrouter:cohere/command-r-plus-08-2024","openrouter:cohere/command-r-08-2024","openrouter:qwen/qwen-2.5-vl-7b-instruct:free","openrouter:qwen/qwen-2.5-vl-7b-instruct","openrouter:sao10k/l3.1-euryale-70b","openrouter:microsoft/phi-3.5-mini-128k-instruct","openrouter:nousresearch/hermes-3-llama-3.1-70b","openrouter:nousresearch/hermes-3-llama-3.1-405b","openrouter:openai/chatgpt-4o-latest","openrouter:sao10k/l3-lunaris-8b","openrouter:aetherwiing/mn-starcannon-12b","openrouter:openai/gpt-4o-2024-08-06","openrouter:meta-llama/llama-3.1-405b:free","openrouter:meta-llama/llama-3.1-405b","openrouter:nothingiisreal/mn-celeste-12b","openrouter:perplexity/llama-3.1-sonar-small-128k-online","openrouter:perplexity/llama-3.1-sonar-large-128k-online","openrouter:meta-llama/llama-3.1-8b-instruct:free","openrouter:meta-llama/llama-3.1-8b-instruct","openrouter:meta-llama/llama-3.1-405b-instruct","openrouter:meta-llama/llama-3.1-70b-instruct","openrouter:mistralai/codestral-mamba","openrouter:mistralai/mistral-nemo:free","openrouter:mistralai/mistral-nemo","openrouter:openai/gpt-4o-mini","openrouter:openai/gpt-4o-mini-2024-07-18","openrouter:google/gemma-2-27b-it","openrouter:alpindale/magnum-72b","openrouter:google/gemma-2-9b-it:free","openrouter:google/gemma-2-9b-it","openrouter:01-ai/yi-large","openrouter:ai21/jamba-instruct","openrouter:anthropic/claude-3.5-sonnet-20240620:beta","openrouter:anthropic/claude-3.5-sonnet-20240620","openrouter:sao10k/l3-euryale-70b","openrouter:cognitivecomputations/dolphin-mixtral-8x22b","openrouter:qwen/qwen-2-72b-instruct","openrouter:mistralai/mistral-7b-instruct:free","openrouter:mistralai/mistral-7b-instruct","openrouter:nousresearch/hermes-2-pro-llama-3-8b","openrouter:mistralai/mistral-7b-instruct-v0.3","openrouter:microsoft/phi-3-mini-128k-instruct","openrouter:microsoft/phi-3-medium-128k-instruct","openrouter:neversleep/llama-3-lumimaid-70b","openrouter:deepseek/deepseek-coder","openrouter:google/gemini-flash-1.5","openrouter:openai/gpt-4o","openrouter:openai/gpt-4o:extended","openrouter:meta-llama/llama-guard-2-8b","openrouter:openai/gpt-4o-2024-05-13","openrouter:allenai/olmo-7b-instruct","openrouter:neversleep/llama-3-lumimaid-8b:extended","openrouter:neversleep/llama-3-lumimaid-8b","openrouter:sao10k/fimbulvetr-11b-v2","openrouter:meta-llama/llama-3-8b-instruct","openrouter:meta-llama/llama-3-70b-instruct","openrouter:mistralai/mixtral-8x22b-instruct","openrouter:microsoft/wizardlm-2-8x22b","openrouter:google/gemini-pro-1.5","openrouter:openai/gpt-4-turbo","openrouter:cohere/command-r-plus","openrouter:cohere/command-r-plus-04-2024","openrouter:sophosympatheia/midnight-rose-70b","openrouter:cohere/command","openrouter:cohere/command-r","openrouter:anthropic/claude-3-haiku:beta","openrouter:anthropic/claude-3-haiku","openrouter:anthropic/claude-3-opus:beta","openrouter:anthropic/claude-3-opus","openrouter:anthropic/claude-3-sonnet:beta","openrouter:anthropic/claude-3-sonnet","openrouter:cohere/command-r-03-2024","openrouter:mistralai/mistral-large","openrouter:openai/gpt-3.5-turbo-0613","openrouter:openai/gpt-4-turbo-preview","openrouter:nousresearch/nous-hermes-2-mixtral-8x7b-dpo","openrouter:mistralai/mistral-medium","openrouter:mistralai/mistral-small","openrouter:mistralai/mistral-tiny","openrouter:mistralai/mistral-7b-instruct-v0.2","openrouter:mistralai/mixtral-8x7b-instruct","openrouter:neversleep/noromaid-20b","openrouter:anthropic/claude-2.1:beta","openrouter:anthropic/claude-2.1","openrouter:anthropic/claude-2:beta","openrouter:anthropic/claude-2","openrouter:undi95/toppy-m-7b","openrouter:alpindale/goliath-120b","openrouter:openrouter/auto","openrouter:openai/gpt-3.5-turbo-1106","openrouter:openai/gpt-4-1106-preview","openrouter:jondurbin/airoboros-l2-70b","openrouter:openai/gpt-3.5-turbo-instruct","openrouter:mistralai/mistral-7b-instruct-v0.1","openrouter:pygmalionai/mythalion-13b","openrouter:openai/gpt-3.5-turbo-16k","openrouter:openai/gpt-4-32k","openrouter:openai/gpt-4-32k-0314","openrouter:mancer/weaver","openrouter:anthropic/claude-2.0:beta","openrouter:anthropic/claude-2.0","openrouter:undi95/remm-slerp-l2-13b","openrouter:gryphe/mythomax-l2-13b","openrouter:meta-llama/llama-2-70b-chat","openrouter:openai/gpt-3.5-turbo","openrouter:openai/gpt-3.5-turbo-0125","openrouter:openai/gpt-4","openrouter:openai/gpt-4-0314"] - - vision_models = [*openai_models, *claude_models, *mistral_models, *xai_models, *deepseek_models, *gemini_models, *openrouter_models] - - models = vision_models - - model_aliases = { - ### mistral_models ### - "mixtral-8x22b": ["open-mixtral-8x22b", "open-mixtral-8x22b-2404"], - "pixtral-large": ["pixtral-large-2411","pixtral-large-latest", "mistral-large-pixtral-2411"], - - ### openrouter_models ### - # llama - "llama-2-70b": "openrouter:meta-llama/llama-2-70b-chat", - "llama-3-8b": "openrouter:meta-llama/llama-3-8b-instruct", - "llama-3-70b": "openrouter:meta-llama/llama-3-70b-instruct", - "llama-3.1-8b": ["openrouter:meta-llama/llama-3.1-8b-instruct:free", "openrouter:meta-llama/llama-3.1-8b-instruct"], - "llama-3.1-70b": "openrouter:meta-llama/llama-3.1-70b-instruct", - "llama-3.1-405b": ["openrouter:meta-llama/llama-3.1-405b:free", "openrouter:meta-llama/llama-3.1-405b", "openrouter:meta-llama/llama-3.1-405b-instruct"], - "llama-3.2-1b": ["openrouter:meta-llama/llama-3.2-1b-instruct:free", "openrouter:meta-llama/llama-3.2-1b-instruct"], - "llama-3.2-3b": ["openrouter:meta-llama/llama-3.2-3b-instruct:free","openrouter:meta-llama/llama-3.2-3b-instruct"], - "llama-3.2-11b": ["openrouter:meta-llama/llama-3.2-11b-vision-instruct:free", "openrouter:meta-llama/llama-3.2-11b-vision-instruct"], - "llama-3.2-90b": "openrouter:meta-llama/llama-3.2-90b-vision-instruct", - "llama-3.3-8b": "openrouter:meta-llama/llama-3.3-8b-instruct:free", - "llama-3.3-70b": ["openrouter:meta-llama/llama-3.3-70b-instruct:free", "openrouter:meta-llama/llama-3.3-70b-instruct"], - "llama-4-maverick": ["openrouter:meta-llama/llama-4-maverick:free", "openrouter:meta-llama/llama-4-maverick"], - "llama-4-scout": ["openrouter:meta-llama/llama-4-scout:free", "openrouter:meta-llama/llama-4-scout"], - #"": "openrouter:meta-llama/llama-guard-3-8b", - #"": "openrouter:meta-llama/llama-guard-2-8b", - #"": "openrouter:meta-llama/llama-guard-4-12b", - - # google (gemini) - "gemini-1.5-flash": ["gemini-1.5-flash", "openrouter:google/gemini-flash-1.5", "gemini-flash-1.5-8b"], - "gemini-1.5-8b-flash": "openrouter:google/gemini-flash-1.5-8b", - "gemini-1.5-pro": "openrouter:google/gemini-pro-1.5", - "gemini-2.0-flash": ["gemini-2.0-flash", "openrouter:google/gemini-2.0-flash-lite-001", "openrouter:google/gemini-2.0-flash-001", "openrouter:google/gemini-2.0-flash-exp:free"], - "gemini-2.5-pro": ["openrouter:google/gemini-2.5-pro-preview", "openrouter:google/gemini-2.5-pro-exp-03-25"], - "gemini-2.5-flash": "openrouter:google/gemini-2.5-flash-preview", - "gemini-2.5-flash-thinking": "openrouter:google/gemini-2.5-flash-preview:thinking", - - # google (gemma) - "gemma-2-9b": ["openrouter:google/gemma-2-9b-it:free","openrouter:google/gemma-2-9b-it"], - "gemma-2-27b": "openrouter:google/gemma-2-27b-it", - "gemma-3-1b": "openrouter:google/gemma-3-1b-it:free", - "gemma-3-4b": ["openrouter:google/gemma-3-4b-it:free", "openrouter:google/gemma-3-4b-it"], - "gemma-3-12b": ["openrouter:google/gemma-3-12b-it:free", "openrouter:google/gemma-3-12b-it"], - "gemma-3-27b": ["openrouter:google/gemma-3-27b-it:free", "openrouter:google/gemma-3-27b-it"], - - # openai (gpt-3.5) - "gpt-3.5-turbo": ["openrouter:openai/gpt-3.5-turbo-0613", "openrouter:openai/gpt-3.5-turbo-1106", "openrouter:openai/gpt-3.5-turbo-0125", "openrouter:openai/gpt-3.5-turbo", "openrouter:openai/gpt-3.5-turbo-instruct", "openrouter:openai/gpt-3.5-turbo-16k"], - - # openai (gpt-4) - "gpt-4": ["openrouter:openai/gpt-4-1106-preview", "openrouter:openai/gpt-4-32k", "openrouter:openai/gpt-4-32k-0314", "openrouter:openai/gpt-4", "openrouter:openai/gpt-4-0314",], - "gpt-4-turbo": ["openrouter:openai/gpt-4-turbo", "openrouter:openai/gpt-4-turbo-preview"], - - # openai (gpt-4o) - "gpt-4o": ["gpt-4o", "openrouter:openai/gpt-4o-2024-08-06", "openrouter:openai/gpt-4o-2024-11-20", "openrouter:openai/chatgpt-4o-latest", "openrouter:openai/gpt-4o", "openrouter:openai/gpt-4o:extended", "openrouter:openai/gpt-4o-2024-05-13",], - "gpt-4o-search": "openrouter:openai/gpt-4o-search-preview", - "gpt-4o-mini": ["gpt-4o-mini", "openrouter:openai/gpt-4o-mini", "openrouter:openai/gpt-4o-mini-2024-07-18"], - "gpt-4o-mini-search": "openrouter:openai/gpt-4o-mini-search-preview", - - # openai (o1) - "o1": ["o1", "openrouter:openai/o1", "openrouter:openai/o1-preview", "openrouter:openai/o1-preview-2024-09-12"], - "o1-mini": ["o1-mini", "openrouter:openai/o1-mini", "openrouter:openai/o1-mini-2024-09-12"], - "o1-pro": ["o1-pro", "openrouter:openai/o1-pro"], - - # openai (o3) - "o3": ["o3", "openrouter:openai/o3"], - "o3-mini": ["o3-mini", "openrouter:openai/o3-mini", "openrouter:openai/o3-mini-high"], - "o3-mini-high": "openrouter:openai/o3-mini-high", - - # openai (o4) - "o4-mini": ["o4-mini", "openrouter:openai/o4-mini"], - "o4-mini-high": "openrouter:openai/o4-mini-high", - - # openai (gpt-4.1) - "gpt-4.1": ["gpt-4.1", "openrouter:openai/gpt-4.1"], - "gpt-4.1-mini": ["gpt-4.1-mini", "openrouter:openai/gpt-4.1-mini"], - "gpt-4.1-nano": ["gpt-4.1-nano", "openrouter:openai/gpt-4.1-nano"], - - # openai (gpt-4.5) - "gpt-4.5": ["gpt-4.5-preview", "openrouter:openai/gpt-4.5-preview"], - - # mistralai - "mistral-large": ["openrouter:mistralai/mistral-large", "openrouter:mistralai/mistral-large-2411", "openrouter:mistralai/mistral-large-2407", "openrouter:mistralai/pixtral-large-2411"], - "mistral-medium": ["openrouter:mistralai/mistral-medium", "openrouter:mistralai/mistral-medium-3"], - "mistral-small": ["mistral-small", "mistral-small-2312", "mistral-small-2503","mistral-small-latest", "openrouter:mistralai/mistral-small", "openrouter:mistralai/mistral-small-3.1-24b-instruct:free", "openrouter:mistralai/mistral-small-3.1-24b-instruct", "openrouter:mistralai/mistral-small-24b-instruct-2501:free", "openrouter:mistralai/mistral-small-24b-instruct-2501"], - "mistral-tiny": ["mistral-tiny", "mistral-tiny-2312", "openrouter:mistralai/mistral-tiny"], - "mistral-7b": ["open-mistral-7b", "openrouter:mistralai/mistral-7b-instruct", "openrouter:mistralai/mistral-7b-instruct:free", "openrouter:mistralai/mistral-7b-instruct-v0.1", "openrouter:mistralai/mistral-7b-instruct-v0.2", "openrouter:mistralai/mistral-7b-instruct-v0.3",], - "mixtral-8x7b": ["open-mixtral-8x7b", "openrouter:mistralai/mixtral-8x7b-instruct"], - "mixtral-8x22b": ["open-mixtral-8x22b", "open-mixtral-8x22b-2404", "openrouter:mistralai/mixtral-8x7b-instruct", "openrouter:mistralai/mixtral-8x22b-instruct"], - "ministral-8b": ["ministral-8b-2410", "ministral-8b-latest", "openrouter:mistral/ministral-8b", "openrouter:mistralai/ministral-8b"], - "mistral-nemo": ["openrouter:mistralai/mistral-nemo:free", "openrouter:mistralai/mistral-nemo"], - "ministral-3b": ["ministral-3b-2410", "ministral-3b-latest", "openrouter:mistralai/ministral-3b"], - "mistral-saba": "openrouter:mistralai/mistral-saba", - "codestral": ["codestral-2501","codestral-latest","codestral-2412","codestral-2411-rc5", "openrouter:mistralai/codestral-2501", "openrouter:mistralai/codestral-mamba"], - "pixtral-12b": ["pixtral-12b-2409","pixtral-12b","pixtral-12b-latest", "openrouter:mistralai/pixtral-12b"], - - # nousresearch - "hermes-2-dpo": "openrouter:nousresearch/nous-hermes-2-mixtral-8x7b-dpo", - "hermes-2-pro": "openrouter:nousresearch/hermes-2-pro-llama-3-8b", - "hermes-3-70b": "openrouter:nousresearch/hermes-3-llama-3.1-70b", - "hermes-3-405b": "openrouter:nousresearch/hermes-3-llama-3.1-405b", - "deephermes-3-8b": "openrouter:nousresearch/deephermes-3-llama-3-8b-preview:free", - "deephermes-3-24b": "openrouter:nousresearch/deephermes-3-mistral-24b-preview:free", - - # microsoft - "phi-3-mini": "openrouter:microsoft/phi-3-mini-128k-instruct", - "phi-3-medium": "openrouter:microsoft/phi-3-medium-128k-instruct", - "phi-3.5-mini": "openrouter:microsoft/phi-3.5-mini-128k-instruct", - "phi-4": "openrouter:microsoft/phi-4", - "phi-4-multimodal": "openrouter:microsoft/phi-4-multimodal-instruct", - "phi-4-reasoning": "openrouter:microsoft/phi-4-reasoning:free", - "phi-4-reasoning-plus": ["openrouter:microsoft/phi-4-reasoning-plus:free", "openrouter:microsoft/phi-4-reasoning-plus"], - - "wizardlm-2-8x22b": "openrouter:microsoft/wizardlm-2-8x22b", - - "mai-ds-r1": "openrouter:microsoft/mai-ds-r1:free", - - # anthropic - "claude-3.7-sonnet": ["claude-3-7-sonnet-20250219", "claude-3-7-sonnet-latest", "openrouter:anthropic/claude-3.7-sonnet", "openrouter:anthropic/claude-3.7-sonnet:beta",], - "claude-3.7-sonnet-thinking": "openrouter:anthropic/claude-3.7-sonnet:thinking", - "claude-3.5-haiku": ["openrouter:anthropic/claude-3.5-haiku:beta", "openrouter:anthropic/claude-3.5-haiku", "openrouter:anthropic/claude-3.5-haiku-20241022:beta", "openrouter:anthropic/claude-3.5-haiku-20241022"], - "claude-3.5-sonnet": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", "claude-3-5-sonnet-20240620", "openrouter:anthropic/claude-3.5-sonnet-20240620:beta", "openrouter:anthropic/claude-3.5-sonnet-20240620", "openrouter:anthropic/claude-3.5-sonnet:beta", "openrouter:anthropic/claude-3.5-sonnet",], - "claude-3-haiku": ["claude-3-haiku-20240307", "openrouter:anthropic/claude-3-haiku:beta", "openrouter:anthropic/claude-3-haiku"], - "claude-3-opus": ["openrouter:anthropic/claude-3-opus:beta", "openrouter:anthropic/claude-3-opus"], - "claude-3-sonnet": ["openrouter:anthropic/claude-3-sonnet:beta", "openrouter:anthropic/claude-3-sonnet"], - "claude-2.1": ["openrouter:anthropic/claude-2.1:beta", "openrouter:anthropic/claude-2.1"], - "claude-2": ["openrouter:anthropic/claude-2:beta", "openrouter:anthropic/claude-2",], - "claude-2.0": ["openrouter:anthropic/claude-2.0:beta", "openrouter:anthropic/claude-2.0"], - - # rekaai - "reka-flash": "openrouter:rekaai/reka-flash-3:free", - - # cohere - "command-r7b": "openrouter:cohere/command-r7b-12-2024", - "command-r-plus": ["openrouter:cohere/command-r-plus-08-2024", "openrouter:cohere/command-r-plus", "openrouter:cohere/command-r-plus-04-2024"], - "command": "openrouter:cohere/command", - "command-r": ["openrouter:cohere/command-r-08-2024", "openrouter:cohere/command-r", "openrouter:cohere/command-r-03-2024"], - "command-a": "openrouter:cohere/command-a", - - # qwen - "qwq-32b": ["openrouter:qwen/qwq-32b-preview", "openrouter:qwen/qwq-32b:free", "openrouter:qwen/qwq-32b"], - "qwen-vl-plus": "openrouter:qwen/qwen-vl-plus", - "qwen-vl-max": "openrouter:qwen/qwen-vl-max", - "qwen-turbo": "openrouter:qwen/qwen-turbo", - "qwen-2.5-vl-72b": ["openrouter:qwen/qwen2.5-vl-72b-instruct:free", "openrouter:qwen/qwen2.5-vl-72b-instruct"], - "qwen-plus": "openrouter:qwen/qwen-plus", - "qwen-max": "openrouter:qwen/qwen-max", - "qwen-2.5-coder-32b": ["openrouter:qwen/qwen-2.5-coder-32b-instruct:free", "openrouter:qwen/qwen-2.5-coder-32b-instruct"], - "qwen-2.5-7b": ["openrouter:qwen/qwen-2.5-7b-instruct:free", "openrouter:qwen/qwen-2.5-7b-instruct"], - "qwen-2.5-72b": ["openrouter:qwen/qwen-2.5-72b-instruct:free", "openrouter:qwen/qwen-2.5-72b-instruct"], - "qwen-2.5-vl-7b": ["openrouter:qwen/qwen-2.5-vl-7b-instruct:free", "openrouter:qwen/qwen-2.5-vl-7b-instruct"], - "qwen-2-72b": "openrouter:qwen/qwen-2-72b-instruct", - "qwen-3-0.6b": "openrouter:qwen/qwen3-0.6b-04-28:free", - "qwen-3-1.7b": "openrouter:qwen/qwen3-1.7b:free", - "qwen-3-4b": "openrouter:qwen/qwen3-4b:free", - "qwen-3-30b": ["openrouter:qwen/qwen3-30b-a3b:free", "openrouter:qwen/qwen3-30b-a3b"], - "qwen-3-8b": ["openrouter:qwen/qwen3-8b:free", "openrouter:qwen/qwen3-8b"], - "qwen-3-14b": ["openrouter:qwen/qwen3-14b:free", "openrouter:qwen/qwen3-14b"], - "qwen-3-32b": ["openrouter:qwen/qwen3-32b:free", "openrouter:qwen/qwen3-32b"], - "qwen-3-235b": ["openrouter:qwen/qwen3-235b-a22b:free", "openrouter:qwen/qwen3-235b-a22b"], - "qwen-2.5-coder-7b": "openrouter:qwen/qwen2.5-coder-7b-instruct", - "qwen-2.5-vl-3b": "openrouter:qwen/qwen2.5-vl-3b-instruct:free", - "qwen-2.5-vl-32b": ["openrouter:qwen/qwen2.5-vl-32b-instruct:free", "openrouter:qwen/qwen2.5-vl-32b-instruct"], - - # deepseek - "deepseek-prover-v2": ["openrouter:deepseek/deepseek-prover-v2:free", "openrouter:deepseek/deepseek-prover-v2"], - "deepseek-v3": "openrouter:deepseek/deepseek-v3-base:free", - "deepseek-v3-0324": ["deepseek-chat", "openrouter:deepseek/deepseek-chat-v3-0324:free", "openrouter:deepseek/deepseek-chat-v3-0324"], - "deepseek-r1-zero": "openrouter:deepseek/deepseek-r1-zero:free", - "deepseek-r1-distill-llama-8b": "openrouter:deepseek/deepseek-r1-distill-llama-8b", - "deepseek-r1-distill-qwen-1.5b": "openrouter:deepseek/deepseek-r1-distill-qwen-1.5b", - "deepseek-r1-distill-qwen-32b": ["openrouter:deepseek/deepseek-r1-distill-qwen-32b:free", "openrouter:deepseek/deepseek-r1-distill-qwen-32b"], - "deepseek-r1-distill-qwen-14b": ["openrouter:deepseek/deepseek-r1-distill-qwen-14b:free","openrouter:deepseek/deepseek-r1-distill-qwen-14b"], - "deepseek-r1-distill-llama-70b": ["openrouter:deepseek/deepseek-r1-distill-llama-70b:free", "openrouter:deepseek/deepseek-r1-distill-llama-70b"], - "deepseek-r1": ["deepseek-reasoner", "openrouter:deepseek/deepseek-r1:free", "openrouter:deepseek/deepseek-r1"], - "deepseek-chat": ["deepseek-chat", "openrouter:deepseek/deepseek-chat:free", "openrouter:deepseek/deepseek-chat"], - "deepseek-coder": ["openrouter:deepseek/deepseek-coder"], - - # inflection - "inflection-3-productivity": "openrouter:inflection/inflection-3-productivity", - "inflection-3-pi": "openrouter:inflection/inflection-3-pi", - - # x-ai - "grok-3-mini": "openrouter:x-ai/grok-3-mini-beta", - "grok-3-beta": "openrouter:x-ai/grok-3-beta", - "grok-2": ["openrouter:x-ai/grok-2-vision-1212", "openrouter:x-ai/grok-2-1212"], - "grok": ["openrouter:x-ai/grok-vision-beta", "openrouter:x-ai/grok-2-vision-1212", "openrouter:x-ai/grok-2-1212", "grok-beta","grok-vision-beta", "openrouter:x-ai/grok-beta", "openrouter:x-ai/grok-3-beta", "openrouter:x-ai/grok-3-mini-beta"], - "grok-beta": ["grok-beta","grok-vision-beta", "openrouter:x-ai/grok-beta", "openrouter:x-ai/grok-3-beta"], - - # perplexity - "sonar-reasoning-pro": "openrouter:perplexity/sonar-reasoning-pro", - "sonar-pro": "openrouter:perplexity/sonar-pro", - "sonar-deep-research": "openrouter:perplexity/sonar-deep-research", - "r1-1776": "openrouter:perplexity/r1-1776", - "sonar-reasoning": "openrouter:perplexity/sonar-reasoning", - "sonar": "openrouter:perplexity/sonar", - "llama-3.1-sonar-small-online": "openrouter:perplexity/llama-3.1-sonar-small-128k-online", - "llama-3.1-sonar-large-online": "openrouter:perplexity/llama-3.1-sonar-large-128k-online", - - # nvidia - "nemotron-49b": ["openrouter:nvidia/llama-3.3-nemotron-super-49b-v1:free", "openrouter:nvidia/llama-3.3-nemotron-super-49b-v1"], - "nemotron-70b": "openrouter:nvidia/llama-3.1-nemotron-70b-instruct", - "nemotron-253b": "openrouter:nvidia/llama-3.1-nemotron-ultra-253b-v1:free", - - # thudm - "glm-4": ["openrouter:thudm/glm-4-32b:free", "openrouter:thudm/glm-4-32b", "openrouter:thudm/glm-4-9b:free",], - "glm-4-32b": ["openrouter:thudm/glm-4-32b:free", "openrouter:thudm/glm-4-32b"], - "glm-z1-32b": ["openrouter:thudm/glm-z1-32b:free", "openrouter:thudm/glm-z1-32b"], - "glm-4-9b": "openrouter:thudm/glm-4-9b:free", - "glm-z1-9b": "openrouter:thudm/glm-z1-9b:free", - "glm-z1-rumination-32b": "openrouter:thudm/glm-z1-rumination-32b", - - # minimax - "minimax": "openrouter:minimax/minimax-01", - - # cognitivecomputations - "dolphin-3.0-r1-24b": "openrouter:cognitivecomputations/dolphin3.0-r1-mistral-24b:free", - "dolphin-3.0-24b": "openrouter:cognitivecomputations/dolphin3.0-mistral-24b:free", - "dolphin-8x22b": "openrouter:cognitivecomputations/dolphin-mixtral-8x22b", - - # agentica-org - "deepcoder-14b": "openrouter:agentica-org/deepcoder-14b-preview:free", - - # moonshotai - "kimi-vl-thinking": "openrouter:moonshotai/kimi-vl-a3b-thinking:free", - "moonlight-16b": "openrouter:moonshotai/moonlight-16b-a3b-instruct:free", - - # featherless - "qwerky-72b": "openrouter:featherless/qwerky-72b:free", - - # liquid - "lfm-7b": "openrouter:liquid/lfm-7b", - "lfm-3b": "openrouter:liquid/lfm-3b", - "lfm-40b": "openrouter:liquid/lfm-40b", - - # arcee-ai - #"": "openrouter:arcee-ai/caller-large", - #"": "openrouter:arcee-ai/spotlight", - #"": "openrouter:arcee-ai/maestro-reasoning", - #"": "openrouter:arcee-ai/virtuoso-large", - #"": "openrouter:arcee-ai/coder-large", - #"": "openrouter:arcee-ai/virtuoso-medium-v2", - #"": "openrouter:arcee-ai/arcee-blitz", - - # inception - #"": "openrouter:inception/mercury-coder-small-beta", - - # opengvlab - #"": "openrouter:opengvlab/internvl3-14b:free", - #"": "openrouter:opengvlab/internvl3-2b:free", - - # tngtech - #"": "openrouter:tngtech/deepseek-r1t-chimera:free", - - # shisa-ai - #"": "openrouter:shisa-ai/shisa-v2-llama3.3-70b:free", - - # eleutherai - #"": "openrouter:eleutherai/llemma_7b", - - # shisa-ai - #"": "openrouter:alfredpros/codellama-7b-instruct-solidity", - - # arliai - #"": "openrouter:arliai/qwq-32b-arliai-rpr-v1:free", - - # all-hands - #"": "openrouter:all-hands/openhands-lm-32b-v0.1", - - # scb10x - #"": "openrouter:scb10x/llama3.1-typhoon2-8b-instruct", - #"": "openrouter:scb10x/llama3.1-typhoon2-70b-instruct", - - # bytedance-research - #"": "openrouter:bytedance-research/ui-tars-72b:free", - - # open-r1 - #"": "openrouter:open-r1/olympiccoder-32b:free", - - # ai21 - #"": "openrouter:ai21/jamba-1.6-large", - #"": "openrouter:ai21/jamba-1.6-mini", - #"": "openrouter:ai21/jamba-instruct", - - # thedrummer - #"": "openrouter:thedrummer/anubis-pro-105b-v1", - #"": "openrouter:thedrummer/skyfall-36b-v2", - #"": "openrouter:thedrummer/unslopnemo-12b", - #"": "openrouter:thedrummer/rocinante-12b", - - # aion-labs - #"": "openrouter:aion-labs/aion-1.0", - #"": "openrouter:aion-labs/aion-1.0-mini", - #"": "openrouter:aion-labs/aion-rp-llama-3.1-8b", - - # sao10k - #"": "openrouter:sao10k/l3.3-euryale-70b", - #"": "openrouter:sao10k/l3.1-euryale-70b", - #"": "openrouter:sao10k/l3-lunaris-8b", - #"": "openrouter:sao10k/l3-euryale-70b", - #"": "openrouter:sao10k/fimbulvetr-11b-v2", - - # eva-unit-01 - #"": "openrouter:eva-unit-01/eva-llama-3.33-70b", - #"": "openrouter:eva-unit-01/eva-qwen-2.5-72b", - #"": "openrouter:eva-unit-01/eva-qwen-2.5-32b", - - # amazon - #"": "openrouter:amazon/nova-lite-v1", - #"": "openrouter:amazon/nova-micro-v1", - #"": "openrouter:amazon/nova-pro-v1", - - # infermatic - #"": "openrouter:infermatic/mn-inferor-12b", - - # raifle - #"": "openrouter:raifle/sorcererlm-8x22b", - - # neversleep - #"": "openrouter:neversleep/llama-3.1-lumimaid-70b", - #"": "openrouter:neversleep/llama-3.1-lumimaid-8b", - #"": "openrouter:neversleep/llama-3-lumimaid-70b", - #"": "openrouter:neversleep/llama-3-lumimaid-8b:extended", - #"": "openrouter:neversleep/llama-3-lumimaid-8b", - #"": "openrouter:neversleep/noromaid-20b", - - # anthracite-org - #"": "openrouter:anthracite-org/magnum-v2-72b", - #"": "openrouter:anthracite-org/magnum-v4-72b", - - # aetherwiing - #"": "openrouter:aetherwiing/mn-starcannon-12b", - - # nothingiisreal - #"": "openrouter:nothingiisreal/mn-celeste-12b", - - # alpindale - #"": "openrouter:alpindale/magnum-72b", - #"": "openrouter:alpindale/goliath-120b", - - # 01-ai - #"": "openrouter:01-ai/yi-large", - - # allenai - #"": "openrouter:allenai/olmo-7b-instruct", - - # sophosympatheia - #"": "openrouter:sophosympatheia/midnight-rose-70b", - - # undi95 - #"": "openrouter:undi95/toppy-m-7b", - - # jondurbin - #"": "openrouter:jondurbin/airoboros-l2-70b", - - # pygmalionai - #"": "openrouter:pygmalionai/mythalion-13b", - - # mancer - #"": "openrouter:mancer/weaver", - - # undi95 - #"": "openrouter:undi95/remm-slerp-l2-13b", - - # gryphe - #"": "openrouter:gryphe/mythomax-l2-13b", - } - - # Token expiration time in seconds (30 minutes) - TOKEN_EXPIRATION = 30 * 60 - - # Rate limit handling - MAX_RETRIES = 3 - RETRY_DELAY = 5 # seconds - RATE_LIMIT_DELAY = 60 # seconds - - # Make classes available at the class level for easier access - Conversation = Conversation - AuthData = AuthData - - # Class-level auth data cache to reduce signup requests - _shared_auth_data = {} - - @staticmethod - def get_driver_for_model(model: str) -> str: - """Determine the appropriate driver based on the model name.""" - if model in PuterJS.openai_models: - return "openai-completion" - elif model in PuterJS.claude_models: - return "claude" - elif model in PuterJS.mistral_models: - return "mistral" - elif model in PuterJS.xai_models: - return "xai" - elif model in PuterJS.deepseek_models: - return "deepseek" - elif model in PuterJS.gemini_models: - return "gemini" - elif model in PuterJS.openrouter_models: - return "openrouter" - else: - # Default to OpenAI for unknown models - return "openai-completion" - - @staticmethod - def format_messages_with_images(messages: Messages, media: MediaListType = None) -> Messages: - """ - Format messages to include image data in the proper format for vision models. - - Args: - messages: List of message dictionaries - media: List of tuples containing (image_data, image_name) - - Returns: - Formatted messages with image content - """ - if not media: - return messages - - # Create a copy of messages to avoid modifying the original - formatted_messages = messages.copy() - - # Find the last user message to add images to - for i in range(len(formatted_messages) - 1, -1, -1): - if formatted_messages[i]["role"] == "user": - user_msg = formatted_messages[i] - - # Convert to content list format if it's a string - if isinstance(user_msg["content"], str): - text_content = user_msg["content"] - user_msg["content"] = [{"type": "text", "text": text_content}] - elif not isinstance(user_msg["content"], list): - # Initialize as empty list if not already a list or string - user_msg["content"] = [] - - # Add image content - for image_data, image_name in media: - if isinstance(image_data, str) and (image_data.startswith("http://") or image_data.startswith("https://")): - # Direct URL - user_msg["content"].append({ - "type": "image_url", - "image_url": {"url": image_data} - }) - else: - # Convert to data URI - image_uri = to_data_uri(image_data, image_name) - user_msg["content"].append({ - "type": "image_url", - "image_url": {"url": image_uri} - }) - - formatted_messages[i] = user_msg - break - - return formatted_messages - - @classmethod - async def _create_temporary_account(cls, session: ClientSession, proxy: str = None) -> Dict[str, str]: - """ - Create a temporary account with retry logic and rate limit handling. - - Args: - session: The aiohttp ClientSession - proxy: Optional proxy URL - - Returns: - Dict containing auth_token - - Raises: - RateLimitError: If rate limited after retries - """ - signup_headers = { - "Content-Type": "application/json", - "host": "puter.com", # Kept the previous fix for Host header - "connection": "keep-alive", - "sec-ch-ua-platform": "macOS", - "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36", - "sec-ch-ua": '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"', - "sec-ch-ua-mobile": "?0", - "accept": "*/*", - "origin": "https://puter.com", - "sec-fetch-site": "same-site", - "sec-fetch-mode": "cors", - "sec-fetch-dest": "empty", - "referer": "https://puter.com/", - "accept-encoding": "gzip", - "accept-language": "en-US,en;q=0.9" - } - - signup_data = { - "is_temp": True, - "client_id": str(uuid.uuid4()) # Changed to generate a standard UUID - } - - for attempt in range(cls.MAX_RETRIES): - try: - async with session.post( - "https://puter.com/signup", - headers=signup_headers, - json=signup_data, - proxy=proxy, - timeout=30 - ) as signup_response: - if signup_response.status == 429: - # Rate limited, wait and retry - retry_after = int(signup_response.headers.get('Retry-After', cls.RATE_LIMIT_DELAY)) - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(retry_after) - continue - else: - # from ..errors import RateLimitError (ensure this import path is correct for your project) - raise Exception(f"Rate limited by Puter.js API. Try again after {retry_after} seconds.") # Placeholder if RateLimitError not accessible - - if signup_response.status != 200: - error_text = await signup_response.text() - if attempt < cls.MAX_RETRIES - 1: - # Wait before retrying - await asyncio.sleep(cls.RETRY_DELAY * (attempt + 1)) - continue - else: - raise Exception(f"Failed to create temporary account. Status: {signup_response.status}, Details: {error_text}") - - try: - return await signup_response.json() - except Exception as e: - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(cls.RETRY_DELAY) - continue - else: - raise Exception(f"Failed to parse signup response as JSON: {e}") - - except Exception as e: - if attempt < cls.MAX_RETRIES - 1: - # Exponential backoff - await asyncio.sleep(cls.RETRY_DELAY * (2 ** attempt)) - continue - else: - raise e - - # Should not reach here, but just in case - raise Exception("Failed to create temporary account after multiple retries") - - @classmethod - async def _get_app_token(cls, session: ClientSession, auth_token: str, proxy: str = None) -> Dict[str, str]: - """ - Get app token with retry logic and rate limit handling. - - Args: - session: The aiohttp ClientSession - auth_token: The auth token from signup - proxy: Optional proxy URL - - Returns: - Dict containing app_token - - Raises: - RateLimitError: If rate limited after retries - """ - app_token_headers = { - "host": "api.puter.com", - "connection": "keep-alive", - "authorization": f"Bearer {auth_token}", - "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36", - "accept": "*/*", - "origin": "https://puter.com", - "sec-fetch-site": "same-site", - "sec-fetch-mode": "cors", - "sec-fetch-dest": "empty", - "referer": "https://puter.com/", - "accept-encoding": "gzip", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json" - } - - # Randomize origin slightly to avoid detection - origins = ["http://docs.puter.com", "https://docs.puter.com", "https://puter.com"] - app_token_data = {"origin": random.choice(origins)} - - for attempt in range(cls.MAX_RETRIES): - try: - async with session.post( - "https://api.puter.com/auth/get-user-app-token", - headers=app_token_headers, - json=app_token_data, - proxy=proxy, - timeout=30 - ) as app_token_response: - if app_token_response.status == 429: - # Rate limited, wait and retry - retry_after = int(app_token_response.headers.get('Retry-After', cls.RATE_LIMIT_DELAY)) - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(retry_after) - continue - else: - raise RateLimitError(f"Rate limited by Puter.js API. Try again after {retry_after} seconds.") - - if app_token_response.status != 200: - error_text = await app_token_response.text() - if attempt < cls.MAX_RETRIES - 1: - # Wait before retrying - await asyncio.sleep(cls.RETRY_DELAY * (attempt + 1)) - continue - else: - raise Exception(f"Failed to get app token. Status: {app_token_response.status}, Details: {error_text}") - - try: - return await app_token_response.json() - except Exception as e: - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(cls.RETRY_DELAY) - continue - else: - raise Exception(f"Failed to parse app token response as JSON: {e}") - - except Exception as e: - if attempt < cls.MAX_RETRIES - 1: - # Exponential backoff - await asyncio.sleep(cls.RETRY_DELAY * (2 ** attempt)) - continue - else: - raise e - - # Should not reach here, but just in case - raise Exception("Failed to get app token after multiple retries") - - @classmethod - def get_model(cls, model: str) -> str: - """Get the internal model name from the user-provided model name.""" - - if not model: - return cls.default_model - - # Check if the model exists directly in our models list - if model in cls.models: - return model - - # Check if there's an alias for this model - if model in cls.model_aliases: - alias = cls.model_aliases[model] - # If the alias is a list, randomly select one of the options - if isinstance(alias, list): - selected_model = random.choice(alias) - debug.log(f"PuterJS: Selected model '{selected_model}' from alias '{model}'") - return selected_model - debug.log(f"PuterJS: Using model '{alias}' for alias '{model}'") - return alias - - raise ModelNotFoundError(f"Model {model} not found") - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - stream: bool = True, - conversation: Optional[JsonConversation] = None, - return_conversation: bool = True, - media: MediaListType = None, # Add media parameter for images - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - # Check if we need to use a vision model - has_images = False - if media is not None and len(media) > 0: - has_images = True - # If images are present and model doesn't support vision, switch to default vision model - if model not in cls.vision_models: - model = cls.default_vision_model - - # Check for image URLs in messages - if not has_images: - for msg in messages: - if msg["role"] == "user": - content = msg.get("content", "") - if isinstance(content, list): - for item in content: - if item.get("type") == "image_url": - has_images = True - if model not in cls.vision_models: - model = cls.default_vision_model - break - elif isinstance(content, str): - # Check for URLs in the text - urls = re.findall(r'https?://\S+\.(jpg|jpeg|png|gif|webp)', content, re.IGNORECASE) - if urls: - has_images = True - if model not in cls.vision_models: - model = cls.default_vision_model - break - - # Check if the conversation is of the correct type - if conversation is not None and not isinstance(conversation, cls.Conversation): - # Convert generic JsonConversation to our specific Conversation class - new_conversation = cls.Conversation(model) - new_conversation.message_history = conversation.message_history.copy() if hasattr(conversation, 'message_history') else messages.copy() - conversation = new_conversation - - # Initialize or update conversation - if conversation is None: - conversation = cls.Conversation(model) - # Format messages with images if needed - if has_images and media: - conversation.message_history = cls.format_messages_with_images(messages, media) - else: - conversation.message_history = messages.copy() - else: - # Update message history with new messages - if has_images and media: - formatted_messages = cls.format_messages_with_images(messages, media) - for msg in formatted_messages: - if msg not in conversation.message_history: - conversation.message_history.append(msg) - else: - for msg in messages: - if msg not in conversation.message_history: - conversation.message_history.append(msg) - - # Get the authentication data for this specific model - auth_data = conversation.get_auth_for_model(model, cls) - - # Check if we can use shared auth data - if model in cls._shared_auth_data and cls._shared_auth_data[model].is_valid(cls.TOKEN_EXPIRATION): - # Copy shared auth data to conversation - shared_auth = cls._shared_auth_data[model] - auth_data.auth_token = shared_auth.auth_token - auth_data.app_token = shared_auth.app_token - auth_data.created_at = shared_auth.created_at - auth_data.tokens_valid = shared_auth.tokens_valid - - # Check if rate limited - if auth_data.is_rate_limited(): - wait_time = auth_data.rate_limited_until - time.time() - if wait_time > 0: - raise RateLimitError(f"Rate limited. Please try again in {int(wait_time)} seconds.") - - async with ClientSession() as session: - # Step 1: Create a temporary account (if needed) - if not auth_data.is_valid(cls.TOKEN_EXPIRATION): - try: - # Try to authenticate - signup_data = await cls._create_temporary_account(session, proxy) - auth_data.auth_token = signup_data.get("token") - - if not auth_data.auth_token: - raise RuntimeError(f"Error: No auth token in response for model {model}") - - # Get app token - app_token_data = await cls._get_app_token(session, auth_data.auth_token, proxy) - auth_data.app_token = app_token_data.get("token") - - if not auth_data.app_token: - raise RuntimeError(f"Error: No app token in response for model {model}") - - # Mark tokens as valid - auth_data.created_at = time.time() - auth_data.tokens_valid = True - - # Update shared auth data - cls._shared_auth_data[model] = auth_data - - except RateLimitError as e: - # Set rate limit and inform user - auth_data.set_rate_limit(cls.RATE_LIMIT_DELAY) - raise e - except Exception as e: - raise RuntimeError(f"Error during authentication for model {model}: {str(e)}") - - # Step 3: Make the chat request with proper image handling - try: - chat_headers = { - "host": "api.puter.com", - "connection": "keep-alive", - "authorization": f"Bearer {auth_data.app_token}", - "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36", - "content-type": "application/json;charset=UTF-8", - # Set appropriate accept header based on stream mode - "accept": "text/event-stream" if stream else "*/*", - "origin": "http://docs.puter.com", - "sec-fetch-site": "cross-site", - "sec-fetch-mode": "cors", - "sec-fetch-dest": "empty", - "referer": "http://docs.puter.com/", - "accept-encoding": "gzip", - "accept-language": "en-US,en;q=0.9" - } - - # Determine the appropriate driver based on the model - driver = cls.get_driver_for_model(model) - - # Prepare messages for the API request - processed_messages = conversation.message_history - - # Special handling for direct image URLs in the HTML example - if has_images and not any(isinstance(msg.get("content"), list) for msg in processed_messages): - # This handles the case where an image URL is passed directly to puter.ai.chat() - # as in the HTML example: puter.ai.chat("What do you see?", "https://assets.puter.site/doge.jpeg") - for i, msg in enumerate(processed_messages): - if msg["role"] == "user": - # Check if there are image URLs in the media parameter - if media and len(media) > 0: - # Format with the media parameter - processed_messages = cls.format_messages_with_images([msg], media) - else: - # Check for URLs in the text content - content = msg.get("content", "") - if isinstance(content, str): - urls = re.findall(r'https?://\S+\.(jpg|jpeg|png|gif|webp)', content, re.IGNORECASE) - if urls: - # Extract URLs from the text - text_parts = [] - image_urls = [] - - # Simple URL extraction - words = content.split() - for word in words: - if re.match(r'https?://\S+\.(jpg|jpeg|png|gif|webp)', word, re.IGNORECASE): - image_urls.append(word) - else: - text_parts.append(word) - - # Create formatted message with text and images - formatted_content = [] - if text_parts: - formatted_content.append({ - "type": "text", - "text": " ".join(text_parts) - }) - - for url in image_urls: - formatted_content.append({ - "type": "image_url", - "image_url": {"url": url} - }) - - processed_messages[i]["content"] = formatted_content - break - - chat_data = { - "interface": "puter-chat-completion", - "driver": driver, - "test_mode": False, - "method": "complete", - "args": { - "messages": processed_messages, - "model": model, - "stream": stream, - "max_tokens": kwargs.get("max_tokens", 4096) - } - } - - # Add any additional parameters from kwargs - for key, value in kwargs.items(): - if key not in ["messages", "model", "stream", "max_tokens"]: - chat_data["args"][key] = value - - # Try the chat request with retries - for attempt in range(cls.MAX_RETRIES): - try: - async with session.post( - cls.api_endpoint, - headers=chat_headers, - json=chat_data, - proxy=proxy, - timeout=120 # Longer timeout for vision requests - ) as response: - if response.status == 429: - # Rate limited, set rate limit and inform user - retry_after = int(response.headers.get('Retry-After', cls.RATE_LIMIT_DELAY)) - auth_data.set_rate_limit(retry_after) - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(min(retry_after, 10)) # Wait but cap at 10 seconds for retries - continue - else: - raise RateLimitError(f"Rate limited by Puter.js API. Try again after {retry_after} seconds.") - - # Handle authentication errors - if response.status in [401, 403]: - error_text = await response.text() - auth_data.invalidate() # Mark tokens as invalid - if attempt < cls.MAX_RETRIES - 1: - # Try to get new tokens - signup_data = await cls._create_temporary_account(session, proxy) - auth_data.auth_token = signup_data.get("token") - - app_token_data = await cls._get_app_token(session, auth_data.auth_token, proxy) - auth_data.app_token = app_token_data.get("token") - - auth_data.created_at = time.time() - auth_data.tokens_valid = True - - # Update shared auth data - cls._shared_auth_data[model] = auth_data - - # Retry with new token - continue - else: - raise Exception(f"Authentication failed after {cls.MAX_RETRIES} attempts: {error_text}") - - if response.status != 200: - error_text = await response.text() - if attempt < cls.MAX_RETRIES - 1: - # Wait before retrying - await asyncio.sleep(cls.RETRY_DELAY * (attempt + 1)) - continue - else: - raise Exception(f"Chat request failed. Status: {response.status}, Details: {error_text}") - - # Process successful response - if stream: - # Process the streaming response - full_response = "" - buffer = "" - - # Use iter_any() to process chunks as they arrive - async for chunk in response.content.iter_any(): - if chunk: - try: - chunk_text = chunk.decode('utf-8') - buffer += chunk_text - - # Process complete lines in the buffer - lines = buffer.split('\n') - # Keep the last potentially incomplete line in the buffer - buffer = lines.pop() if lines else "" - - for line in lines: - line = line.strip() - if not line: - continue - - # Handle different streaming formats - if line.startswith("data: "): - line = line[6:] # Remove "data: " prefix - - # Skip "[DONE]" marker - if line == "[DONE]": - continue - - try: - data = json.loads(line) - - # OpenAI format - if "choices" in data: - choice = data["choices"][0] - delta = choice.get("delta", {}) - content = delta.get("content", "") - if content: - full_response += content - yield content - # Puter.js format - elif "type" in data and data["type"] == "text": - content = data.get("text", "") - if content: - full_response += content - yield content - except json.JSONDecodeError: - # Not valid JSON, might be a plain text response - if line and line != "[DONE]": - full_response += line - yield line - else: - # Try to parse as JSON directly - try: - data = json.loads(line) - if "type" in data and data["type"] == "text": - content = data.get("text", "") - if content: - full_response += content - yield content - except json.JSONDecodeError: - # Not valid JSON, might be a plain text response - if line and line != "[DONE]": - full_response += line - yield line - except UnicodeDecodeError: - continue - - # Process any remaining data in the buffer - if buffer: - if buffer.startswith("data: "): - buffer = buffer[6:] # Remove "data: " prefix - - if buffer != "[DONE]": - try: - data = json.loads(buffer) - if "choices" in data: - choice = data["choices"][0] - delta = choice.get("delta", {}) - content = delta.get("content", "") - if content: - full_response += content - yield content - elif "type" in data and data["type"] == "text": - content = data.get("text", "") - if content: - full_response += content - yield content - except json.JSONDecodeError: - # Not valid JSON, might be a plain text response - if buffer and buffer != "[DONE]": - full_response += buffer - yield buffer - - # Add the assistant's response to the conversation history - if full_response: - conversation.message_history.append({ - "role": "assistant", - "content": full_response - }) - - # Return the conversation object if requested - if return_conversation: - yield conversation - - # Signal completion - yield FinishReason("stop") - else: - # Process non-streaming response - try: - response_json = await response.json() - except Exception as e: - error_text = await response.text() - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(cls.RETRY_DELAY) - continue - else: - raise Exception(f"Failed to parse chat response as JSON: {error_text}") - - if response_json.get("success") is True: - # Extract the content from the response - content = response_json.get("result", {}).get("message", {}).get("content", "") - - # Add the assistant's response to the conversation history - if content: - conversation.message_history.append({ - "role": "assistant", - "content": content - }) - - yield content - - # Return the conversation object if requested - if return_conversation: - yield conversation - - # Signal completion - yield FinishReason("stop") - else: - # Handle error in response - error_msg = response_json.get("error", {}).get("message", "Unknown error") - - # Check for rate limiting or auth errors in the error message - if "rate" in error_msg.lower() or "limit" in error_msg.lower(): - auth_data.set_rate_limit() - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(cls.RETRY_DELAY) - continue - else: - raise RateLimitError(f"Rate limited: {error_msg}") - - if "auth" in error_msg.lower() or "token" in error_msg.lower(): - auth_data.invalidate() - if attempt < cls.MAX_RETRIES - 1: - # Try to get new tokens - signup_data = await cls._create_temporary_account(session, proxy) - auth_data.auth_token = signup_data.get("token") - - app_token_data = await cls._get_app_token(session, auth_data.auth_token, proxy) - auth_data.app_token = app_token_data.get("token") - - auth_data.created_at = time.time() - auth_data.tokens_valid = True - - # Update headers with new token - chat_headers["authorization"] = f"Bearer {auth_data.app_token}" - - # Update shared auth data - cls._shared_auth_data[model] = auth_data - - # Retry with new token - continue - - # For other errors, retry or raise - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(cls.RETRY_DELAY) - continue - else: - yield f"Error: {error_msg}" - - # If we get here, we've successfully processed the response - break - - except RateLimitError as e: - # Set rate limit and inform user - auth_data.set_rate_limit(cls.RATE_LIMIT_DELAY) - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(min(cls.RATE_LIMIT_DELAY, 10)) # Wait but cap at 10 seconds for retries - continue - else: - raise RuntimeError(str(e)) - - except Exception as e: - # For network errors or other exceptions - if "token" in str(e).lower() or "auth" in str(e).lower(): - auth_data.invalidate() - - if attempt < cls.MAX_RETRIES - 1: - await asyncio.sleep(cls.RETRY_DELAY * (attempt + 1)) - continue - else: - raise RuntimeError(str(e)) - - except Exception as e: - # If any error occurs outside the retry loop - if "token" in str(e).lower() or "auth" in str(e).lower(): - auth_data.invalidate() - raise RuntimeError(str(e)) diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py index 2b51f39d..d1a659f6 100644 --- a/g4f/Provider/not_working/__init__.py +++ b/g4f/Provider/not_working/__init__.py @@ -25,7 +25,6 @@ from .MagickPen import MagickPen from .Phind import Phind from .Poe import Poe from .Prodia import Prodia -from .PuterJS import PuterJS from .Raycast import Raycast from .RubiksAI import RubiksAI from .Theb import Theb diff --git a/g4f/Provider/openai/models.py b/g4f/Provider/openai/models.py index 99858a8d..9d581222 100644 --- a/g4f/Provider/openai/models.py +++ b/g4f/Provider/openai/models.py @@ -1,6 +1,6 @@ default_model = "auto" default_image_model = "dall-e-3" image_models = [default_image_model] -text_models = [default_model, "gpt-4", "gpt-4.1", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-preview", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"] +text_models = [default_model, "gpt-4", "gpt-4.1", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"] vision_models = text_models models = text_models + image_models \ No newline at end of file diff --git a/g4f/Provider/template/Puter.py b/g4f/Provider/template/Puter.py deleted file mode 100644 index cb16d12e..00000000 --- a/g4f/Provider/template/Puter.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import annotations - -from ...typing import Messages, AsyncResult -from ...providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin - -class Puter(AsyncGeneratorProvider, ProviderModelMixin): - label = "Puter.js AI (live)" - working = True - models = [ - {"group": "ChatGPT", "models": [ - "gpt-4o-mini", - "gpt-4o", - "gpt-4.1", - "gpt-4.1-mini", - "gpt-4.1-nano", - "gpt-4.5-preview" - ]}, - {"group": "O Models", "models": [ - "o1", - "o1-mini", - "o1-pro", - "o3", - "o3-mini", - "o4-mini" - ]}, - {"group": "Anthropic Claude", "models": [ - "claude-3-7-sonnet", - "claude-3-5-sonnet" - ]}, - {"group": "Deepseek", "models": [ - "deepseek-chat", - "deepseek-reasoner" - ]}, - {"group": "Google Gemini", "models": [ - "gemini-2.0-flash", - "gemini-1.5-flash" - ]}, - {"group": "Meta Llama", "models": [ - "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", - "meta-llama/Meta-Llama--70B-Instruct-Turbo", - "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo" - ]}, - {"group": "Other Models", "models": [ - "mistral-large-latest", - "pixtral-large-latest", - "codestral-latest", - "google/gemma-2-27b-it", - "grok-beta" - ]} - ] - - @classmethod - def get_grouped_models(cls) -> dict[str, list[str]]: - return cls.models - - def get_models(cls) -> list[str]: - models = [] - for model in cls.models: - if "models" in model: - models.extend(model["models"]) - else: - models.append(model) - return models - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - **kwargs - ) -> AsyncResult: - raise NotImplementedError() \ No newline at end of file diff --git a/g4f/Provider/template/__init__.py b/g4f/Provider/template/__init__.py index 124c0b08..758bc5b4 100644 --- a/g4f/Provider/template/__init__.py +++ b/g4f/Provider/template/__init__.py @@ -1,3 +1,2 @@ from .BackendApi import BackendApi -from .OpenaiTemplate import OpenaiTemplate -from .Puter import Puter \ No newline at end of file +from .OpenaiTemplate import OpenaiTemplate \ No newline at end of file diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 849ee2ba..4c453d11 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -46,10 +46,10 @@ class Api: "label": model.split(":")[-1] if provider.__name__ == "AnyProvider" else model, "default": model == provider.default_model, "vision": model in provider.vision_models, - "audio": model in provider.audio_models, + "audio": False if provider.audio_models is None else model in provider.audio_models, "video": model in provider.video_models, "image": model in provider.image_models, - "count": provider.models_count.get(model), + "count": False if provider.models_count is None else provider.models_count.get(model), } if provider in Provider.__map__: provider = Provider.__map__[provider] diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index b8fa27ff..105ac1e4 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -408,7 +408,6 @@ class Backend_Api(Api): mime_type = secure_filename(mime_type) if safe_search[0] in mime_type: self.match_files[search][file] = self.match_files[search].get(file, 0) + 1 - break for tag in safe_search: if tag in file.lower(): self.match_files[search][file] = self.match_files[search].get(file, 0) + 1 diff --git a/g4f/providers/response.py b/g4f/providers/response.py index 63451efb..f8c56096 100644 --- a/g4f/providers/response.py +++ b/g4f/providers/response.py @@ -163,7 +163,19 @@ class ToolCalls(HiddenResponse): return self.list class Usage(JsonMixin, HiddenResponse): - pass + def __init__( + self, + promptTokens: int = None, + completionTokens: int = None, + **kwargs + ): + if promptTokens is not None: + kwargs["prompt_tokens"] = promptTokens + if completionTokens is not None: + kwargs["completion_tokens"] = completionTokens + if "total_tokens" not in kwargs and "prompt_tokens" in kwargs and "completion_tokens" in kwargs: + kwargs["total_tokens"] = kwargs["prompt_tokens"] + kwargs["completion_tokens"] + return super().__init__(**kwargs) class AuthResult(JsonMixin, HiddenResponse): pass