feat: Add LMArenaBeta provider and update existing providers

- Introduced a new provider class `LMArenaBeta` in `g4f/Provider/LMArenaBeta.py` with capabilities for text and image models.
- Updated `g4f/Provider/Cloudflare.py` to remove an unused import of `Cookies`.
- Modified `g4f/Provider/PollinationsAI.py` to change the condition for checking the action in the `next` command.
- Added a new provider `PuterJS` in `g4f/Provider/PuterJS.py` with various model handling and authentication logic.
- Removed the old `PuterJS` implementation from `g4f/Provider/not_working/PuterJS.py`.
- Updated `g4f/Provider/__init__.py` to include the new `LMArenaBeta` and `PuterJS` providers.
- Changed the label of `HarProvider` in `g4f/Provider/har/__init__.py` to "LMArena (Har)".
- Adjusted the model list in `g4f/Provider/openai/models.py` to ensure consistency in model definitions.
- Updated the API response handling in `g4f/providers/response.py` to calculate total tokens in the `Usage` class constructor.
This commit is contained in:
hlohaus 2025-05-29 03:42:17 +02:00
parent cb840e3cdf
commit c6fead0313
17 changed files with 602 additions and 1319 deletions

View file

@ -12,6 +12,7 @@ g4f.debug.logging = True
def read_code(text):
if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text):
return match.group("code")
return text
def input_command():
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
@ -119,7 +120,8 @@ And replace "gpt-3.5-turbo" with `model`.
stream=True,
):
print(chunk, end="", flush=True)
response.append(chunk)
if not isinstance(chunk, Exception):
response.append(str(chunk))
print()
response = "".join(response)

View file

@ -3,7 +3,7 @@ from __future__ import annotations
import asyncio
import json
from ..typing import AsyncResult, Messages, Cookies
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin, get_running_loop
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
from ..requests import DEFAULT_HEADERS, has_nodriver, has_curl_cffi

128
g4f/Provider/LMArenaBeta.py Normal file

File diff suppressed because one or more lines are too long

View file

@ -458,7 +458,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
yield FinishReason(finish_reason)
if reasoning:
yield Reasoning(status="Done")
if "action" in kwargs and "tools" not in data and "response_format" not in data:
if kwargs.get("action") == "next":
data = {
"model": model,
"messages": messages + FOLLOWUPS_DEVELOPER_MESSAGE,

446
g4f/Provider/PuterJS.py Normal file
View file

@ -0,0 +1,446 @@
from __future__ import annotations
import random
import json
import requests
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages, MediaListType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..providers.response import FinishReason, Usage, Reasoning, ToolCalls
from ..tools.media import render_messages
from ..requests import see_stream, raise_for_status
from ..errors import ResponseError, ModelNotFoundError, MissingAuthError
from . import debug
class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
label = "Puter.js"
url = "https://docs.puter.com/playground"
login_url = "https://github.com/HeyPuter/puter-cli"
api_endpoint = "https://api.puter.com/drivers/call"
working = True
needs_auth = True
default_model = 'gpt-4o'
default_vision_model = default_model
openai_models = [default_vision_model,"gpt-4o-mini", "o1", "o1-mini", "o1-pro", "o3", "o3-mini", "o4-mini", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4.5-preview"]
claude_models = ["claude-3-7-sonnet-20250219", "claude-3-7-sonnet-latest", "claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", "claude-3-5-sonnet-20240620", "claude-3-haiku-20240307"]
mistral_models = ["ministral-3b-2410","ministral-3b-latest","ministral-8b-2410","ministral-8b-latest","open-mistral-7b","mistral-tiny","mistral-tiny-2312","open-mixtral-8x7b","mistral-small","mistral-small-2312","open-mixtral-8x22b","open-mixtral-8x22b-2404","mistral-large-2411","mistral-large-latest","pixtral-large-2411","pixtral-large-latest","mistral-large-pixtral-2411","codestral-2501","codestral-latest","codestral-2412","codestral-2411-rc5","pixtral-12b-2409","pixtral-12b","pixtral-12b-latest","mistral-small-2503","mistral-small-latest"]
xai_models = ["grok-beta", "grok-vision-beta"]
deepseek_models = ["deepseek-chat","deepseek-reasoner"]
gemini_models = ["gemini-1.5-flash","gemini-2.0-flash"]
model_aliases = {
### mistral_models ###
"mixtral-8x22b": ["open-mixtral-8x22b", "open-mixtral-8x22b-2404"],
"pixtral-large": ["pixtral-large-2411","pixtral-large-latest", "mistral-large-pixtral-2411"],
### openrouter_models ###
# llama
"llama-2-70b": "openrouter:meta-llama/llama-2-70b-chat",
"llama-3-8b": "openrouter:meta-llama/llama-3-8b-instruct",
"llama-3-70b": "openrouter:meta-llama/llama-3-70b-instruct",
"llama-3.1-8b": ["openrouter:meta-llama/llama-3.1-8b-instruct:free", "openrouter:meta-llama/llama-3.1-8b-instruct"],
"llama-3.1-70b": "openrouter:meta-llama/llama-3.1-70b-instruct",
"llama-3.1-405b": ["openrouter:meta-llama/llama-3.1-405b:free", "openrouter:meta-llama/llama-3.1-405b", "openrouter:meta-llama/llama-3.1-405b-instruct"],
"llama-3.2-1b": ["openrouter:meta-llama/llama-3.2-1b-instruct:free", "openrouter:meta-llama/llama-3.2-1b-instruct"],
"llama-3.2-3b": ["openrouter:meta-llama/llama-3.2-3b-instruct:free","openrouter:meta-llama/llama-3.2-3b-instruct"],
"llama-3.2-11b": ["openrouter:meta-llama/llama-3.2-11b-vision-instruct:free", "openrouter:meta-llama/llama-3.2-11b-vision-instruct"],
"llama-3.2-90b": "openrouter:meta-llama/llama-3.2-90b-vision-instruct",
"llama-3.3-8b": "openrouter:meta-llama/llama-3.3-8b-instruct:free",
"llama-3.3-70b": ["openrouter:meta-llama/llama-3.3-70b-instruct:free", "openrouter:meta-llama/llama-3.3-70b-instruct"],
"llama-4-maverick": ["openrouter:meta-llama/llama-4-maverick:free", "openrouter:meta-llama/llama-4-maverick"],
"llama-4-scout": ["openrouter:meta-llama/llama-4-scout:free", "openrouter:meta-llama/llama-4-scout"],
# google (gemini)
"gemini-1.5-flash": ["gemini-1.5-flash", "openrouter:google/gemini-flash-1.5", "gemini-flash-1.5-8b"],
"gemini-1.5-8b-flash": "openrouter:google/gemini-flash-1.5-8b",
"gemini-1.5-pro": "openrouter:google/gemini-pro-1.5",
"gemini-2.0-flash": ["gemini-2.0-flash", "openrouter:google/gemini-2.0-flash-lite-001", "openrouter:google/gemini-2.0-flash-001", "openrouter:google/gemini-2.0-flash-exp:free"],
"gemini-2.5-pro": ["openrouter:google/gemini-2.5-pro-preview", "openrouter:google/gemini-2.5-pro-exp-03-25"],
"gemini-2.5-flash": "openrouter:google/gemini-2.5-flash-preview",
"gemini-2.5-flash-thinking": "openrouter:google/gemini-2.5-flash-preview:thinking",
# google (gemma)
"gemma-2-9b": ["openrouter:google/gemma-2-9b-it:free","openrouter:google/gemma-2-9b-it"],
"gemma-2-27b": "openrouter:google/gemma-2-27b-it",
"gemma-3-1b": "openrouter:google/gemma-3-1b-it:free",
"gemma-3-4b": ["openrouter:google/gemma-3-4b-it:free", "openrouter:google/gemma-3-4b-it"],
"gemma-3-12b": ["openrouter:google/gemma-3-12b-it:free", "openrouter:google/gemma-3-12b-it"],
"gemma-3-27b": ["openrouter:google/gemma-3-27b-it:free", "openrouter:google/gemma-3-27b-it"],
# openai (gpt-3.5)
"gpt-3.5-turbo": ["openrouter:openai/gpt-3.5-turbo-0613", "openrouter:openai/gpt-3.5-turbo-1106", "openrouter:openai/gpt-3.5-turbo-0125", "openrouter:openai/gpt-3.5-turbo", "openrouter:openai/gpt-3.5-turbo-instruct", "openrouter:openai/gpt-3.5-turbo-16k"],
# openai (gpt-4)
"gpt-4": ["openrouter:openai/gpt-4-1106-preview", "openrouter:openai/gpt-4-32k", "openrouter:openai/gpt-4-32k-0314", "openrouter:openai/gpt-4", "openrouter:openai/gpt-4-0314",],
"gpt-4-turbo": ["openrouter:openai/gpt-4-turbo", "openrouter:openai/gpt-4-turbo-preview"],
# openai (gpt-4o)
"gpt-4o": ["gpt-4o", "openrouter:openai/gpt-4o-2024-08-06", "openrouter:openai/gpt-4o-2024-11-20", "openrouter:openai/chatgpt-4o-latest", "openrouter:openai/gpt-4o", "openrouter:openai/gpt-4o:extended", "openrouter:openai/gpt-4o-2024-05-13",],
"gpt-4o-search": "openrouter:openai/gpt-4o-search-preview",
"gpt-4o-mini": ["gpt-4o-mini", "openrouter:openai/gpt-4o-mini", "openrouter:openai/gpt-4o-mini-2024-07-18"],
"gpt-4o-mini-search": "openrouter:openai/gpt-4o-mini-search-preview",
# openai (o1)
"o1": ["o1", "openrouter:openai/o1", "openrouter:openai/o1-preview", "openrouter:openai/o1-preview-2024-09-12"],
"o1-mini": ["o1-mini", "openrouter:openai/o1-mini", "openrouter:openai/o1-mini-2024-09-12"],
"o1-pro": ["o1-pro", "openrouter:openai/o1-pro"],
# openai (o3)
"o3": ["o3", "openrouter:openai/o3"],
"o3-mini": ["o3-mini", "openrouter:openai/o3-mini", "openrouter:openai/o3-mini-high"],
"o3-mini-high": "openrouter:openai/o3-mini-high",
# openai (o4)
"o4-mini": ["o4-mini", "openrouter:openai/o4-mini"],
"o4-mini-high": "openrouter:openai/o4-mini-high",
# openai (gpt-4.1)
"gpt-4.1": ["gpt-4.1", "openrouter:openai/gpt-4.1"],
"gpt-4.1-mini": ["gpt-4.1-mini", "openrouter:openai/gpt-4.1-mini"],
"gpt-4.1-nano": ["gpt-4.1-nano", "openrouter:openai/gpt-4.1-nano"],
# openai (gpt-4.5)
"gpt-4.5": ["gpt-4.5-preview", "openrouter:openai/gpt-4.5-preview"],
# mistralai
"mistral-large": ["openrouter:mistralai/mistral-large", "openrouter:mistralai/mistral-large-2411", "openrouter:mistralai/mistral-large-2407", "openrouter:mistralai/pixtral-large-2411"],
"mistral-medium": ["openrouter:mistralai/mistral-medium", "openrouter:mistralai/mistral-medium-3"],
"mistral-small": ["mistral-small", "mistral-small-2312", "mistral-small-2503","mistral-small-latest", "openrouter:mistralai/mistral-small", "openrouter:mistralai/mistral-small-3.1-24b-instruct:free", "openrouter:mistralai/mistral-small-3.1-24b-instruct", "openrouter:mistralai/mistral-small-24b-instruct-2501:free", "openrouter:mistralai/mistral-small-24b-instruct-2501"],
"mistral-tiny": ["mistral-tiny", "mistral-tiny-2312", "openrouter:mistralai/mistral-tiny"],
"mistral-7b": ["open-mistral-7b", "openrouter:mistralai/mistral-7b-instruct", "openrouter:mistralai/mistral-7b-instruct:free", "openrouter:mistralai/mistral-7b-instruct-v0.1", "openrouter:mistralai/mistral-7b-instruct-v0.2", "openrouter:mistralai/mistral-7b-instruct-v0.3",],
"mixtral-8x7b": ["open-mixtral-8x7b", "openrouter:mistralai/mixtral-8x7b-instruct"],
"mixtral-8x22b": ["open-mixtral-8x22b", "open-mixtral-8x22b-2404", "openrouter:mistralai/mixtral-8x7b-instruct", "openrouter:mistralai/mixtral-8x22b-instruct"],
"ministral-8b": ["ministral-8b-2410", "ministral-8b-latest", "openrouter:mistral/ministral-8b", "openrouter:mistralai/ministral-8b"],
"mistral-nemo": ["openrouter:mistralai/mistral-nemo:free", "openrouter:mistralai/mistral-nemo"],
"ministral-3b": ["ministral-3b-2410", "ministral-3b-latest", "openrouter:mistralai/ministral-3b"],
"mistral-saba": "openrouter:mistralai/mistral-saba",
"codestral": ["codestral-2501","codestral-latest","codestral-2412","codestral-2411-rc5", "openrouter:mistralai/codestral-2501", "openrouter:mistralai/codestral-mamba"],
"pixtral-12b": ["pixtral-12b-2409","pixtral-12b","pixtral-12b-latest", "openrouter:mistralai/pixtral-12b"],
# nousresearch
"hermes-2-dpo": "openrouter:nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
"hermes-2-pro": "openrouter:nousresearch/hermes-2-pro-llama-3-8b",
"hermes-3-70b": "openrouter:nousresearch/hermes-3-llama-3.1-70b",
"hermes-3-405b": "openrouter:nousresearch/hermes-3-llama-3.1-405b",
"deephermes-3-8b": "openrouter:nousresearch/deephermes-3-llama-3-8b-preview:free",
"deephermes-3-24b": "openrouter:nousresearch/deephermes-3-mistral-24b-preview:free",
# microsoft
"phi-3-mini": "openrouter:microsoft/phi-3-mini-128k-instruct",
"phi-3-medium": "openrouter:microsoft/phi-3-medium-128k-instruct",
"phi-3.5-mini": "openrouter:microsoft/phi-3.5-mini-128k-instruct",
"phi-4": "openrouter:microsoft/phi-4",
"phi-4-multimodal": "openrouter:microsoft/phi-4-multimodal-instruct",
"phi-4-reasoning": "openrouter:microsoft/phi-4-reasoning:free",
"phi-4-reasoning-plus": ["openrouter:microsoft/phi-4-reasoning-plus:free", "openrouter:microsoft/phi-4-reasoning-plus"],
"wizardlm-2-8x22b": "openrouter:microsoft/wizardlm-2-8x22b",
"mai-ds-r1": "openrouter:microsoft/mai-ds-r1:free",
# anthropic
"claude-3.7-sonnet": ["claude-3-7-sonnet-20250219", "claude-3-7-sonnet-latest", "openrouter:anthropic/claude-3.7-sonnet", "openrouter:anthropic/claude-3.7-sonnet:beta",],
"claude-3.7-sonnet-thinking": "openrouter:anthropic/claude-3.7-sonnet:thinking",
"claude-3.5-haiku": ["openrouter:anthropic/claude-3.5-haiku:beta", "openrouter:anthropic/claude-3.5-haiku", "openrouter:anthropic/claude-3.5-haiku-20241022:beta", "openrouter:anthropic/claude-3.5-haiku-20241022"],
"claude-3.5-sonnet": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", "claude-3-5-sonnet-20240620", "openrouter:anthropic/claude-3.5-sonnet-20240620:beta", "openrouter:anthropic/claude-3.5-sonnet-20240620", "openrouter:anthropic/claude-3.5-sonnet:beta", "openrouter:anthropic/claude-3.5-sonnet",],
"claude-3-haiku": ["claude-3-haiku-20240307", "openrouter:anthropic/claude-3-haiku:beta", "openrouter:anthropic/claude-3-haiku"],
"claude-3-opus": ["openrouter:anthropic/claude-3-opus:beta", "openrouter:anthropic/claude-3-opus"],
"claude-3-sonnet": ["openrouter:anthropic/claude-3-sonnet:beta", "openrouter:anthropic/claude-3-sonnet"],
"claude-2.1": ["openrouter:anthropic/claude-2.1:beta", "openrouter:anthropic/claude-2.1"],
"claude-2": ["openrouter:anthropic/claude-2:beta", "openrouter:anthropic/claude-2",],
"claude-2.0": ["openrouter:anthropic/claude-2.0:beta", "openrouter:anthropic/claude-2.0"],
# rekaai
"reka-flash": "openrouter:rekaai/reka-flash-3:free",
# cohere
"command-r7b": "openrouter:cohere/command-r7b-12-2024",
"command-r-plus": ["openrouter:cohere/command-r-plus-08-2024", "openrouter:cohere/command-r-plus", "openrouter:cohere/command-r-plus-04-2024"],
"command": "openrouter:cohere/command",
"command-r": ["openrouter:cohere/command-r-08-2024", "openrouter:cohere/command-r", "openrouter:cohere/command-r-03-2024"],
"command-a": "openrouter:cohere/command-a",
# qwen
"qwq-32b": ["openrouter:qwen/qwq-32b-preview", "openrouter:qwen/qwq-32b:free", "openrouter:qwen/qwq-32b"],
"qwen-vl-plus": "openrouter:qwen/qwen-vl-plus",
"qwen-vl-max": "openrouter:qwen/qwen-vl-max",
"qwen-turbo": "openrouter:qwen/qwen-turbo",
"qwen-2.5-vl-72b": ["openrouter:qwen/qwen2.5-vl-72b-instruct:free", "openrouter:qwen/qwen2.5-vl-72b-instruct"],
"qwen-plus": "openrouter:qwen/qwen-plus",
"qwen-max": "openrouter:qwen/qwen-max",
"qwen-2.5-coder-32b": ["openrouter:qwen/qwen-2.5-coder-32b-instruct:free", "openrouter:qwen/qwen-2.5-coder-32b-instruct"],
"qwen-2.5-7b": ["openrouter:qwen/qwen-2.5-7b-instruct:free", "openrouter:qwen/qwen-2.5-7b-instruct"],
"qwen-2.5-72b": ["openrouter:qwen/qwen-2.5-72b-instruct:free", "openrouter:qwen/qwen-2.5-72b-instruct"],
"qwen-2.5-vl-7b": ["openrouter:qwen/qwen-2.5-vl-7b-instruct:free", "openrouter:qwen/qwen-2.5-vl-7b-instruct"],
"qwen-2-72b": "openrouter:qwen/qwen-2-72b-instruct",
"qwen-3-0.6b": "openrouter:qwen/qwen3-0.6b-04-28:free",
"qwen-3-1.7b": "openrouter:qwen/qwen3-1.7b:free",
"qwen-3-4b": "openrouter:qwen/qwen3-4b:free",
"qwen-3-30b": ["openrouter:qwen/qwen3-30b-a3b:free", "openrouter:qwen/qwen3-30b-a3b"],
"qwen-3-8b": ["openrouter:qwen/qwen3-8b:free", "openrouter:qwen/qwen3-8b"],
"qwen-3-14b": ["openrouter:qwen/qwen3-14b:free", "openrouter:qwen/qwen3-14b"],
"qwen-3-32b": ["openrouter:qwen/qwen3-32b:free", "openrouter:qwen/qwen3-32b"],
"qwen-3-235b": ["openrouter:qwen/qwen3-235b-a22b:free", "openrouter:qwen/qwen3-235b-a22b"],
"qwen-2.5-coder-7b": "openrouter:qwen/qwen2.5-coder-7b-instruct",
"qwen-2.5-vl-3b": "openrouter:qwen/qwen2.5-vl-3b-instruct:free",
"qwen-2.5-vl-32b": ["openrouter:qwen/qwen2.5-vl-32b-instruct:free", "openrouter:qwen/qwen2.5-vl-32b-instruct"],
# deepseek
"deepseek-prover-v2": ["openrouter:deepseek/deepseek-prover-v2:free", "openrouter:deepseek/deepseek-prover-v2"],
"deepseek-v3": "openrouter:deepseek/deepseek-v3-base:free",
"deepseek-v3-0324": ["deepseek-chat", "openrouter:deepseek/deepseek-chat-v3-0324:free", "openrouter:deepseek/deepseek-chat-v3-0324"],
"deepseek-r1-zero": "openrouter:deepseek/deepseek-r1-zero:free",
"deepseek-r1-distill-llama-8b": "openrouter:deepseek/deepseek-r1-distill-llama-8b",
"deepseek-r1-distill-qwen-1.5b": "openrouter:deepseek/deepseek-r1-distill-qwen-1.5b",
"deepseek-r1-distill-qwen-32b": ["openrouter:deepseek/deepseek-r1-distill-qwen-32b:free", "openrouter:deepseek/deepseek-r1-distill-qwen-32b"],
"deepseek-r1-distill-qwen-14b": ["openrouter:deepseek/deepseek-r1-distill-qwen-14b:free","openrouter:deepseek/deepseek-r1-distill-qwen-14b"],
"deepseek-r1-distill-llama-70b": ["openrouter:deepseek/deepseek-r1-distill-llama-70b:free", "openrouter:deepseek/deepseek-r1-distill-llama-70b"],
"deepseek-r1": ["deepseek-reasoner", "openrouter:deepseek/deepseek-r1:free", "openrouter:deepseek/deepseek-r1"],
"deepseek-chat": ["deepseek-chat", "openrouter:deepseek/deepseek-chat:free", "openrouter:deepseek/deepseek-chat"],
"deepseek-coder": ["openrouter:deepseek/deepseek-coder"],
# inflection
"inflection-3-productivity": "openrouter:inflection/inflection-3-productivity",
"inflection-3-pi": "openrouter:inflection/inflection-3-pi",
# x-ai
"grok-3-mini": "openrouter:x-ai/grok-3-mini-beta",
"grok-3-beta": "openrouter:x-ai/grok-3-beta",
"grok-2": ["openrouter:x-ai/grok-2-vision-1212", "openrouter:x-ai/grok-2-1212"],
"grok": ["openrouter:x-ai/grok-vision-beta", "openrouter:x-ai/grok-2-vision-1212", "openrouter:x-ai/grok-2-1212", "grok-beta","grok-vision-beta", "openrouter:x-ai/grok-beta", "openrouter:x-ai/grok-3-beta", "openrouter:x-ai/grok-3-mini-beta"],
"grok-beta": ["grok-beta","grok-vision-beta", "openrouter:x-ai/grok-beta", "openrouter:x-ai/grok-3-beta"],
# perplexity
"sonar-reasoning-pro": "openrouter:perplexity/sonar-reasoning-pro",
"sonar-pro": "openrouter:perplexity/sonar-pro",
"sonar-deep-research": "openrouter:perplexity/sonar-deep-research",
"r1-1776": "openrouter:perplexity/r1-1776",
"sonar-reasoning": "openrouter:perplexity/sonar-reasoning",
"sonar": "openrouter:perplexity/sonar",
"llama-3.1-sonar-small-online": "openrouter:perplexity/llama-3.1-sonar-small-128k-online",
"llama-3.1-sonar-large-online": "openrouter:perplexity/llama-3.1-sonar-large-128k-online",
# nvidia
"nemotron-49b": ["openrouter:nvidia/llama-3.3-nemotron-super-49b-v1:free", "openrouter:nvidia/llama-3.3-nemotron-super-49b-v1"],
"nemotron-70b": "openrouter:nvidia/llama-3.1-nemotron-70b-instruct",
"nemotron-253b": "openrouter:nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
# thudm
"glm-4": ["openrouter:thudm/glm-4-32b:free", "openrouter:thudm/glm-4-32b", "openrouter:thudm/glm-4-9b:free",],
"glm-4-32b": ["openrouter:thudm/glm-4-32b:free", "openrouter:thudm/glm-4-32b"],
"glm-z1-32b": ["openrouter:thudm/glm-z1-32b:free", "openrouter:thudm/glm-z1-32b"],
"glm-4-9b": "openrouter:thudm/glm-4-9b:free",
"glm-z1-9b": "openrouter:thudm/glm-z1-9b:free",
"glm-z1-rumination-32b": "openrouter:thudm/glm-z1-rumination-32b",
# minimax
"minimax": "openrouter:minimax/minimax-01",
# cognitivecomputations
"dolphin-3.0-r1-24b": "openrouter:cognitivecomputations/dolphin3.0-r1-mistral-24b:free",
"dolphin-3.0-24b": "openrouter:cognitivecomputations/dolphin3.0-mistral-24b:free",
"dolphin-8x22b": "openrouter:cognitivecomputations/dolphin-mixtral-8x22b",
# agentica-org
"deepcoder-14b": "openrouter:agentica-org/deepcoder-14b-preview:free",
# moonshotai
"kimi-vl-thinking": "openrouter:moonshotai/kimi-vl-a3b-thinking:free",
"moonlight-16b": "openrouter:moonshotai/moonlight-16b-a3b-instruct:free",
# featherless
"qwerky-72b": "openrouter:featherless/qwerky-72b:free",
# liquid
"lfm-7b": "openrouter:liquid/lfm-7b",
"lfm-3b": "openrouter:liquid/lfm-3b",
"lfm-40b": "openrouter:liquid/lfm-40b",
}
@classmethod
def get_models(cls) -> list[str]:
if not cls.models:
try:
url = "https://api.puter.com/puterai/chat/models/"
cls.models = requests.get(url).json().get("models", [])
except Exception as e:
debug.log(f"PuterJS: Failed to fetch models from API: {e}")
cls.models = list(cls.model_aliases.keys())
cls.vision_models = []
for model in cls.models:
for tag in ["vision", "multimodal", "gpt", "o1", "o3", "o4"]:
if tag in model:
cls.vision_models.append(model)
return cls.models
@staticmethod
def get_driver_for_model(model: str) -> str:
"""Determine the appropriate driver based on the model name."""
if model in PuterJS.openai_models:
return "openai-completion"
elif model in PuterJS.claude_models:
return "claude"
elif model in PuterJS.mistral_models:
return "mistral"
elif model in PuterJS.xai_models:
return "xai"
elif model in PuterJS.deepseek_models:
return "deepseek"
elif model in PuterJS.gemini_models:
return "gemini"
elif "openrouter:" in model:
return "openrouter"
else:
# Default to OpenAI for unknown models
return "openai-completion"
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Check if the model exists directly in our models list
if model in cls.models:
return model
# Check if there's an alias for this model
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"PuterJS: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"PuterJS: Using model '{alias}' for alias '{model}'")
return alias
raise ModelNotFoundError(f"Model {model} not found")
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = True,
api_key: str = None,
media: MediaListType = None,
**kwargs
) -> AsyncResult:
if not api_key:
raise MissingAuthError("API key is required for Puter.js API")
# Check if we need to use a vision model
if not model and media is not None and len(media) > 0:
model = cls.default_vision_model
# Check for image URLs in messages
if not model:
for msg in messages:
if msg["role"] == "user":
content = msg.get("content", "")
if isinstance(content, list):
for item in content:
if item.get("type") == "image_url":
model = cls.default_vision_model
break
# Get the model name from the user-provided model
try:
model = cls.get_model(model)
except ModelNotFoundError:
pass
async with ClientSession() as session:
headers = {
"authorization": f"Bearer {api_key}",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
"content-type": "application/json;charset=UTF-8",
# Set appropriate accept header based on stream mode
"accept": "text/event-stream" if stream else "*/*",
"origin": "http://docs.puter.com",
"sec-fetch-site": "cross-site",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "http://docs.puter.com/",
"accept-encoding": "gzip",
"accept-language": "en-US,en;q=0.9"
}
# Determine the appropriate driver based on the model
driver = cls.get_driver_for_model(model)
json_data = {
"interface": "puter-chat-completion",
"driver": driver,
"test_mode": False,
"method": "complete",
"args": {
"messages": list(render_messages(messages, media)),
"model": model,
"stream": stream,
**kwargs
}
}
async with session.post(
cls.api_endpoint,
headers=headers,
json=json_data,
proxy=proxy
) as response:
await raise_for_status(response)
mime_type = response.headers.get("content-type", "")
if mime_type.startswith("text/plain"):
yield await response.text()
return
elif mime_type.startswith("text/event-stream"):
reasoning = False
async for result in see_stream(response.content):
if "error" in result:
raise ResponseError(result["error"].get("message", result["error"]))
if result.get("usage") is not None:
yield Usage(**result["usage"])
choices = result.get("choices", [{}])
choice = choices.pop() if choices else {}
content = choice.get("delta", {}).get("content")
if content:
yield content
tool_calls = choice.get("delta", {}).get("tool_calls")
if tool_calls:
yield ToolCalls(choice["delta"]["tool_calls"])
reasoning_content = choice.get("delta", {}).get("reasoning_content")
if reasoning_content:
reasoning = True
yield Reasoning(reasoning_content)
finish_reason = choice.get("finish_reason")
if finish_reason:
yield FinishReason(finish_reason)
if reasoning:
yield Reasoning(status="Done")
elif mime_type.startswith("application/json"):
result = await response.json()
if "choices" in result:
choice = result["choices"][0]
message = choice.get("message", {})
content = message.get("content", "")
if content:
yield content
if "tool_calls" in message:
yield ToolCalls(message["tool_calls"])
else:
raise ResponseError(result)
if result.get("usage") is not None:
yield Usage(**result["usage"])
finish_reason = choice.get("finish_reason")
if finish_reason:
yield FinishReason(finish_reason)
elif mime_type.startswith("application/x-ndjson"):
async for line in response.content:
data = json.loads(line)
if data.get("type") == "text":
yield data.get("text", "")
else:
raise ResponseError(f"Unexpected content type: {mime_type}")

View file

@ -10,7 +10,7 @@ try:
except ImportError as e:
debug.error("Deprecated providers not loaded:", e)
from .needs_auth import *
from .template import OpenaiTemplate, BackendApi, Puter
from .template import OpenaiTemplate, BackendApi
from .hf import HuggingFace, HuggingChat, HuggingFaceAPI, HuggingFaceInference, HuggingFaceMedia
from .har import HarProvider
try:
@ -53,6 +53,7 @@ from .GizAI import GizAI
from .ImageLabs import ImageLabs
from .LambdaChat import LambdaChat
from .LegacyLMArena import LegacyLMArena
from .LMArenaBeta import LMArenaBeta
from .OIVSCodeSer2 import OIVSCodeSer2
from .OIVSCodeSer5 import OIVSCodeSer5
from .OIVSCodeSer0501 import OIVSCodeSer0501
@ -61,6 +62,7 @@ from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .PollinationsAI import PollinationsAI
from .PollinationsImage import PollinationsImage
from .PuterJS import PuterJS
from .TeachAnything import TeachAnything
from .You import You
from .Websim import Websim

View file

@ -15,7 +15,7 @@ from ..helper import get_last_user_message
from ..openai.har_file import get_headers
class HarProvider(AsyncGeneratorProvider, ProviderModelMixin):
label = "Legacy LM Arena (Har)"
label = "LMArena (Har)"
url = "https://legacy.lmarena.ai"
api_endpoint = "/queue/join?"
working = True

View file

@ -126,7 +126,7 @@ class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
status=update[2].get('options', {}).get('title'))
is_thinking = True
elif update[2].get('type') == 'text':
yield Reasoning(update[2].get('content'))
yield update[2].get('content')
is_thinking = False
elif isinstance(update, list) and isinstance(update[1], list) and len(
update[1]) > 4:

View file

@ -235,7 +235,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
image_prompt = response_part = None
last_content = ""
youtube_ids = []
async for line in response.content:
for line in (await response.text()).split("\n"):
try:
try:
line = json.loads(line)

File diff suppressed because one or more lines are too long

View file

@ -25,7 +25,6 @@ from .MagickPen import MagickPen
from .Phind import Phind
from .Poe import Poe
from .Prodia import Prodia
from .PuterJS import PuterJS
from .Raycast import Raycast
from .RubiksAI import RubiksAI
from .Theb import Theb

View file

@ -1,6 +1,6 @@
default_model = "auto"
default_image_model = "dall-e-3"
image_models = [default_image_model]
text_models = [default_model, "gpt-4", "gpt-4.1", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-preview", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"]
text_models = [default_model, "gpt-4", "gpt-4.1", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"]
vision_models = text_models
models = text_models + image_models

View file

@ -1,72 +0,0 @@
from __future__ import annotations
from ...typing import Messages, AsyncResult
from ...providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Puter(AsyncGeneratorProvider, ProviderModelMixin):
label = "Puter.js AI (live)"
working = True
models = [
{"group": "ChatGPT", "models": [
"gpt-4o-mini",
"gpt-4o",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4.5-preview"
]},
{"group": "O Models", "models": [
"o1",
"o1-mini",
"o1-pro",
"o3",
"o3-mini",
"o4-mini"
]},
{"group": "Anthropic Claude", "models": [
"claude-3-7-sonnet",
"claude-3-5-sonnet"
]},
{"group": "Deepseek", "models": [
"deepseek-chat",
"deepseek-reasoner"
]},
{"group": "Google Gemini", "models": [
"gemini-2.0-flash",
"gemini-1.5-flash"
]},
{"group": "Meta Llama", "models": [
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
"meta-llama/Meta-Llama--70B-Instruct-Turbo",
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo"
]},
{"group": "Other Models", "models": [
"mistral-large-latest",
"pixtral-large-latest",
"codestral-latest",
"google/gemma-2-27b-it",
"grok-beta"
]}
]
@classmethod
def get_grouped_models(cls) -> dict[str, list[str]]:
return cls.models
def get_models(cls) -> list[str]:
models = []
for model in cls.models:
if "models" in model:
models.extend(model["models"])
else:
models.append(model)
return models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
**kwargs
) -> AsyncResult:
raise NotImplementedError()

View file

@ -1,3 +1,2 @@
from .BackendApi import BackendApi
from .OpenaiTemplate import OpenaiTemplate
from .Puter import Puter
from .OpenaiTemplate import OpenaiTemplate

View file

@ -46,10 +46,10 @@ class Api:
"label": model.split(":")[-1] if provider.__name__ == "AnyProvider" else model,
"default": model == provider.default_model,
"vision": model in provider.vision_models,
"audio": model in provider.audio_models,
"audio": False if provider.audio_models is None else model in provider.audio_models,
"video": model in provider.video_models,
"image": model in provider.image_models,
"count": provider.models_count.get(model),
"count": False if provider.models_count is None else provider.models_count.get(model),
}
if provider in Provider.__map__:
provider = Provider.__map__[provider]

View file

@ -408,7 +408,6 @@ class Backend_Api(Api):
mime_type = secure_filename(mime_type)
if safe_search[0] in mime_type:
self.match_files[search][file] = self.match_files[search].get(file, 0) + 1
break
for tag in safe_search:
if tag in file.lower():
self.match_files[search][file] = self.match_files[search].get(file, 0) + 1

View file

@ -163,7 +163,19 @@ class ToolCalls(HiddenResponse):
return self.list
class Usage(JsonMixin, HiddenResponse):
pass
def __init__(
self,
promptTokens: int = None,
completionTokens: int = None,
**kwargs
):
if promptTokens is not None:
kwargs["prompt_tokens"] = promptTokens
if completionTokens is not None:
kwargs["completion_tokens"] = completionTokens
if "total_tokens" not in kwargs and "prompt_tokens" in kwargs and "completion_tokens" in kwargs:
kwargs["total_tokens"] = kwargs["prompt_tokens"] + kwargs["completion_tokens"]
return super().__init__(**kwargs)
class AuthResult(JsonMixin, HiddenResponse):
pass