feat: enhance provider support and add PuterJS provider (#2999)

* feat: enhance provider support and add PuterJS provider

- Add new PuterJS provider with extensive model support and authentication handling
- Add three new OIVSCode providers (OIVSCodeSer2, OIVSCodeSer5, OIVSCodeSer0501)
- Fix Blackbox provider with improved model handling and session generation
- Update model aliases across multiple providers for consistency
- Mark DDG provider as not working
- Move TypeGPT to not_working directory
- Fix model name formatting in DeepInfraChat and other providers (qwen3 → qwen-3)
- Add get_model method to LambdaChat and other providers for better model alias handling
- Add ModelNotFoundError import to providers that need it
- Update model definitions in models.py with new providers and aliases
- Fix client/stubs.py to allow arbitrary types in ChatCompletionMessage

* Fix conflicts g4f/Provider/needs_auth/Grok.py

* fix: update Blackbox provider default settings

- Changed  parameter to use only the passed value without fallback to 1024
- Set  to  instead of  in request payload

* feat: add WeWordle provider with gpt-4 support

- Created new WeWordle.py provider file implementing AsyncGeneratorProvider
- Added WeWordle class with API endpoint at wewordle.org/gptapi/v1/web/turbo
- Set provider properties: working=True, needs_auth=False, supports_stream=True
- Configured default_model as 'gpt-4' with retry mechanism for API requests
- Implemented URL sanitization logic to handle malformed URLs
- Added response parsing for different JSON response formats
- Added WeWordle to Provider/__init__.py imports
- Added WeWordle to default model providers list in models.py
- Added WeWordle to gpt_4 best_provider list in models.py

* feat: add DocsBot provider with GPT-4o support

- Added new DocsBot.py provider file implementing AsyncGeneratorProvider and ProviderModelMixin
- Created Conversation class extending JsonConversation to track conversation state
- Implemented create_async_generator method with support for:
  - Streaming and non-streaming responses
  - System messages
  - Message history
  - Image handling via data URIs
  - Conversation tracking
- Added DocsBot to Provider/__init__.py imports
- Added DocsBot to default and default_vision model providers in models.py
- Added DocsBot as a provider for gpt_4o model in models.py
- Set default_model and vision support to 'gpt-4o'
- Implemented API endpoint communication with docsbot.ai

* feat: add OpenAIFM provider and update audio model references

- Added new OpenAIFM provider in g4f/Provider/audio/OpenAIFM.py for text-to-speech functionality
- Updated PollinationsAI.py to rename "gpt-4o-audio" to "gpt-4o-mini-audio"
- Added OpenAIFM to audio provider imports in g4f/Provider/audio/__init__.py
- Modified save_response_media() in g4f/image/copy_images.py to handle source_url separately from media_url
- Added new gpt_4o_mini_tts AudioModel in g4f/models.py with OpenAIFM as best provider
- Updated ModelUtils dictionary in models.py to include both gpt_4o_mini_audio and gpt_4o_mini_tts

* fix: improve PuterJS provider and add Gemini to best providers

- Changed client_id generation in PuterJS from time-based to UUID format
- Fixed duplicate json import in PuterJS.py
- Added uuid module import in PuterJS.py
- Changed host header from "api.puter.com" to "puter.com"
- Modified error handling to use Exception instead of RateLimitError
- Added Gemini to best_provider list for gemini-2.5-flash model
- Added Gemini to best_provider list for gemini-2.5-pro model
- Fixed missing newline at end of Gemini.py file

---------

Co-authored-by: kqlio67 <kqlio67.noreply.github.com>
This commit is contained in:
kqlio67 2025-05-19 09:44:15 +00:00 committed by GitHub
parent 8943850c62
commit f4cd4890d3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 2778 additions and 270 deletions

View file

@ -10,7 +10,7 @@ Usage:
python -m etc.tool.commit [options] python -m etc.tool.commit [options]
Options: Options:
--model MODEL Specify the AI model to use (default: claude-3.7-sonnet) --model MODEL Specify the AI model to use
--edit Edit the generated commit message before committing --edit Edit the generated commit message before committing
--no-commit Generate message only without committing --no-commit Generate message only without committing
--list-models List available AI models and exit --list-models List available AI models and exit

View file

@ -17,7 +17,7 @@ from ..image import to_data_uri
from .helper import render_messages from .helper import render_messages
from ..providers.response import JsonConversation from ..providers.response import JsonConversation
from ..tools.media import merge_media from ..tools.media import merge_media
from ..errors import RateLimitError from ..errors import RateLimitError, ModelNotFoundError
from .. import debug from .. import debug
class Conversation(JsonConversation): class Conversation(JsonConversation):
@ -251,7 +251,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"gemma-3-1b": "Gemma 3 1B", "gemma-3-1b": "Gemma 3 1B",
"gemma-3-27b": "Gemma 3 27B", "gemma-3-27b": "Gemma 3 27B",
"gemma-3-4b": "Gemma 3 4B", "gemma-3-4b": "Gemma 3 4B",
"kimi-vl-a3b-thinking": "Kimi VL A3B Thinking", "kimi-vl-thinking": "Kimi VL A3B Thinking",
"llama-3.1-8b": "Llama 3.1 8B Instruct", "llama-3.1-8b": "Llama 3.1 8B Instruct",
"nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1", "nemotron-253b": "Llama 3.1 Nemotron Ultra 253B v1",
"llama-3.2-11b": "Llama 3.2 11B Vision Instruct", "llama-3.2-11b": "Llama 3.2 11B Vision Instruct",
@ -263,8 +263,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"llama-4-scout": "Llama 4 Scout", "llama-4-scout": "Llama 4 Scout",
"mistral-7b": "Mistral 7B Instruct", "mistral-7b": "Mistral 7B Instruct",
"mistral-nemo": "Mistral Nemo", "mistral-nemo": "Mistral Nemo",
"mistral-small-24b": "Mistral Small 3", "mistral-small": ["Mistral Small 3", "Mistral-Small-24B-Instruct-2501", "Mistral Small 3.1 24B"],
"mistral-small-24b": "Mistral-Small-24B-Instruct-2501", "mistral-small-24b": ["Mistral Small 3", "Mistral-Small-24B-Instruct-2501"],
"mistral-small-3.1-24b": "Mistral Small 3.1 24B", "mistral-small-3.1-24b": "Mistral Small 3.1 24B",
"molmo-7b": "Molmo 7B D", "molmo-7b": "Molmo 7B D",
"moonlight-16b": "Moonlight 16B A3B Instruct", "moonlight-16b": "Moonlight 16B A3B Instruct",
@ -284,6 +284,30 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B", "deepseek-r1-distill-qwen-14b": "R1 Distill Qwen 14B",
"deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B", "deepseek-r1-distill-qwen-32b": "R1 Distill Qwen 32B",
} }
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Check if the model exists directly in our models list
if model in cls.models:
return model
# Check if there's an alias for this model
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"Blackbox: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"Blackbox: Using model '{alias}' for alias '{model}'")
return alias
raise ModelNotFoundError(f"Model {model} not found")
@classmethod @classmethod
def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 365) -> dict: def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 365) -> dict:
@ -382,6 +406,25 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
chars = string.ascii_letters + string.digits chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(length)) return ''.join(random.choice(chars) for _ in range(length))
@classmethod
def generate_session_data(cls) -> dict:
"""
Generate a complete session data object with random email and proper format.
Returns:
dict: A complete session data object ready to be used in API requests
"""
# Generate random email
chars = string.ascii_lowercase + string.digits
random_team = ''.join(random.choice(chars) for _ in range(8))
request_email = f"{random_team}@blackbox.ai"
# Generate session with the email
session_data = cls.generate_session(request_email)
debug.log(f"Blackbox: Using generated session with email {request_email}")
return session_data
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
cls, cls,
@ -437,15 +480,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"fileText": "", "fileText": "",
"title": "" "title": ""
} }
# Generate a new email for each request instead of using the one stored in conversation
chars = string.ascii_lowercase + string.digits
random_team = ''.join(random.choice(chars) for _ in range(8))
request_email = f"{random_team}@blackbox.ai"
# Generate a session with the new email
session_data = cls.generate_session(request_email)
debug.log(f"Blackbox: Using generated session with email {request_email}")
data = { data = {
"messages": current_messages, "messages": current_messages,
@ -484,7 +518,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"occupation": "", "occupation": "",
"traits": [] "traits": []
}, },
"session": session_data, "webSearchModeOption": {
"autoMode": False,
"webMode": False,
"offlineMode": False
},
"session": cls.generate_session_data(),
"isPremium": True, "isPremium": True,
"subscriptionCache": { "subscriptionCache": {
"expiryTimestamp": None, "expiryTimestamp": None,
@ -495,6 +534,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}, },
"beastMode": False, "beastMode": False,
"reasoningMode": False, "reasoningMode": False,
"workspaceId": "",
"webSearchMode": False "webSearchMode": False
} }

View file

@ -26,7 +26,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://duckduckgo.com" url = "https://duckduckgo.com"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat" api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
status_url = "https://duckduckgo.com/duckchat/v1/status" status_url = "https://duckduckgo.com/duckchat/v1/status"
working = True working = False
needs_auth = False needs_auth = False
supports_stream = True supports_stream = True
supports_system_message = True supports_system_message = True
@ -38,6 +38,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4o": default_model, "gpt-4o": default_model,
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"claude-3-haiku": "claude-3-haiku-20240307", "claude-3-haiku": "claude-3-haiku-20240307",
"mistral-small": "mistralai/Mistral-Small-24B-Instruct-2501",
"mistral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501", "mistral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
} }
models = [default_model, "o3-mini"] + list(model_aliases.keys()) models = [default_model, "o3-mini"] + list(model_aliases.keys())

View file

@ -45,10 +45,11 @@ class DeepInfraChat(OpenaiTemplate):
] + vision_models ] + vision_models
model_aliases = { model_aliases = {
"deepseek-prover-v2-671b": "deepseek-ai/DeepSeek-Prover-V2-671B", "deepseek-prover-v2-671b": "deepseek-ai/DeepSeek-Prover-V2-671B",
"qwen3-235b": "Qwen/Qwen3-235B-A22B", "deepseek-prover-v2": "deepseek-ai/DeepSeek-Prover-V2-671B",
"qwen3-30b": "Qwen/Qwen3-30B-A3B", "qwen-3-235b": "Qwen/Qwen3-235B-A22B",
"qwen3-32b": "Qwen/Qwen3-32B", "qwen-3-30b": "Qwen/Qwen3-30B-A3B",
"qwen3-14b": "Qwen/Qwen3-14B", "qwen-3-32b": "Qwen/Qwen3-32B",
"qwen-3-14b": "Qwen/Qwen3-14B",
"llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", "llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"llama-4-maverick-17b": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", "llama-4-maverick-17b": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct", "llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
@ -65,6 +66,7 @@ class DeepInfraChat(OpenaiTemplate):
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct", "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
"deepseek-v3": default_model, "deepseek-v3": default_model,
"mistral-small": "mistralai/Mistral-Small-24B-Instruct-2501",
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501", "mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
"deepseek-r1-turbo": "deepseek-ai/DeepSeek-R1-Turbo", "deepseek-r1-turbo": "deepseek-ai/DeepSeek-R1-Turbo",
"deepseek-r1": "deepseek-ai/DeepSeek-R1", "deepseek-r1": "deepseek-ai/DeepSeek-R1",

190
g4f/Provider/DocsBot.py Normal file

File diff suppressed because one or more lines are too long

View file

@ -10,6 +10,8 @@ from ..requests import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt, get_last_user_message from .helper import format_prompt, get_last_user_message
from ..providers.response import JsonConversation, TitleGeneration, Reasoning, FinishReason from ..providers.response import JsonConversation, TitleGeneration, Reasoning, FinishReason
from ..errors import ModelNotFoundError
from .. import debug
class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin): class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "Lambda Chat" label = "Lambda Chat"
@ -32,12 +34,35 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
] ]
model_aliases = { model_aliases = {
"deepseek-v3": default_model, "deepseek-v3": default_model,
"hermes-3": "hermes-3-llama-3.1-405b-fp8", "hermes-3-405b": ["hermes3-405b-fp8-128k", "hermes-3-llama-3.1-405b-fp8"],
"hermes-3-405b": "hermes3-405b-fp8-128k",
"nemotron-70b": "llama3.1-nemotron-70b-instruct", "nemotron-70b": "llama3.1-nemotron-70b-instruct",
"qwen-2.5-coder-32b": "qwen25-coder-32b-instruct" "qwen-2.5-coder-32b": "qwen25-coder-32b-instruct"
} }
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Check if the model exists directly in our models list
if model in cls.models:
return model
# Check if there's an alias for this model
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"PuterJS: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"PuterJS: Using model '{alias}' for alias '{model}'")
return alias
raise ModelNotFoundError(f"Model {model} not found")
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
cls, model: str, messages: Messages, cls, model: str, messages: Messages,

View file

@ -303,7 +303,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
# xAI # xAI
"grok-3-reason": "grok-3-reason", "grok-3-reason": "grok-3-reason",
"o3-mini": "o3-mini-2025-01-31", "o3-mini": "o3-mini-2025-01-31",
"qwen3-235b": "qwen3-235b-a22b", "qwen-3-235b": "qwen3-235b-a22b",
} }
_auth_code = None _auth_code = None

View file

@ -0,0 +1,39 @@
from __future__ import annotations
import secrets
import string
from .template import OpenaiTemplate
class OIVSCodeSer0501(OpenaiTemplate):
label = "OI VSCode Server 0501"
url = "https://oi-vscode-server-0501.onrender.com"
api_base = "https://oi-vscode-server-0501.onrender.com/v1"
api_endpoint = "https://oi-vscode-server-0501.onrender.com/v1/chat/completions"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4.1-mini"
default_vision_model = default_model
vision_models = [default_vision_model]
models = vision_models
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
# Generate a random user ID similar to the JavaScript code
userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
return {
"Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
"userid": userid,
**(
{"Authorization": f"Bearer {api_key}"}
if api_key else {}
),
**({} if headers is None else headers)
}

View file

@ -0,0 +1,39 @@
from __future__ import annotations
import secrets
import string
from .template import OpenaiTemplate
class OIVSCodeSer2(OpenaiTemplate):
label = "OI VSCode Server 2"
url = "https://oi-vscode-server-2.onrender.com"
api_base = "https://oi-vscode-server-2.onrender.com/v1"
api_endpoint = "https://oi-vscode-server-2.onrender.com/v1/chat/completions"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini"
default_vision_model = default_model
vision_models = [default_vision_model]
models = vision_models
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
# Generate a random user ID similar to the JavaScript code
userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
return {
"Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
"userid": userid,
**(
{"Authorization": f"Bearer {api_key}"}
if api_key else {}
),
**({} if headers is None else headers)
}

View file

@ -0,0 +1,39 @@
from __future__ import annotations
import secrets
import string
from .template import OpenaiTemplate
class OIVSCodeSer5(OpenaiTemplate):
label = "OI VSCode Server 5"
url = "https://oi-vscode-server-5.onrender.com"
api_base = "https://oi-vscode-server-5.onrender.com/v1"
api_endpoint = "https://oi-vscode-server-5.onrender.com/v1/chat/completions"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4.1-mini"
default_vision_model = default_model
vision_models = [default_vision_model]
models = vision_models
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
# Generate a random user ID similar to the JavaScript code
userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
return {
"Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
"userid": userid,
**(
{"Authorization": f"Bearer {api_key}"}
if api_key else {}
),
**({} if headers is None else headers)
}

View file

@ -125,7 +125,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
#"bidara": "bidara", # Personas #"bidara": "bidara", # Personas
### Audio Models ### ### Audio Models ###
"gpt-4o-audio": "openai-audio", "gpt-4o-mini-audio": "openai-audio",
### Image Models ### ### Image Models ###
"sdxl-turbo": "turbo", "sdxl-turbo": "turbo",

1242
g4f/Provider/PuterJS.py Normal file

File diff suppressed because one or more lines are too long

157
g4f/Provider/WeWordle.py Normal file
View file

@ -0,0 +1,157 @@
from __future__ import annotations
import json
import asyncio
import re
from typing import Union
from aiohttp import ClientSession, ClientResponse, ClientResponseError, ClientConnectorError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class WeWordle(AsyncGeneratorProvider, ProviderModelMixin):
label = "WeWordle"
url = "https://chat-gpt.com"
api_endpoint = "https://wewordle.org/gptapi/v1/web/turbo"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4'
models = [default_model]
MAX_RETRIES = 3
INITIAL_RETRY_DELAY_SECONDS = 5
MAX_RETRY_DELAY_SECONDS = 60
POST_REQUEST_DELAY_SECONDS = 1
@staticmethod
async def iter_any(response: ClientResponse):
if response.headers.get("Transfer-Encoding") == "chunked" or \
response.headers.get("Content-Type") == "text/event-stream":
async for chunk in response.content:
if chunk:
yield chunk.decode()
else:
content = await response.text()
yield content
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
raw_url = cls.api_endpoint
request_url = raw_url
markdown_link_match = re.search(r'\]\((https?://[^\)]+)\)', raw_url)
if markdown_link_match:
actual_url = markdown_link_match.group(1)
request_url = actual_url
elif not (raw_url.startswith("http://") or raw_url.startswith("https://")):
if "%5B" in raw_url and "%5D" in raw_url and "%28" in raw_url and "%29" in raw_url:
try:
import urllib.parse
decoded_url_outer = urllib.parse.unquote(raw_url)
markdown_link_match_decoded = re.search(r'\]\((https?://[^\)]+)\)', decoded_url_outer)
if markdown_link_match_decoded:
actual_url = markdown_link_match_decoded.group(1)
request_url = actual_url
else:
raise ValueError(f"Invalid API endpoint URL format: {raw_url}")
except Exception as e:
raise ValueError(f"Invalid API endpoint URL format: {raw_url}")
elif not (raw_url.startswith("http://") or raw_url.startswith("https://")):
raise ValueError(f"Invalid API endpoint URL format: {raw_url}")
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"dnt": "1",
"origin": "https://chat-gpt.com",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://chat-gpt.com/",
"sec-ch-ua": "\"Not.A/Brand\";v=\"99\", \"Chromium\";v=\"136\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36"
}
if isinstance(messages, list) and all(isinstance(m, dict) and "role" in m and "content" in m for m in messages):
data_payload = {"messages": messages, "model": model, **kwargs}
else:
data_payload = {
"messages": messages,
"model": model,
**kwargs
}
retries = 0
current_delay = cls.INITIAL_RETRY_DELAY_SECONDS
async with ClientSession(headers=headers) as session:
while retries <= cls.MAX_RETRIES:
try:
async with session.post(request_url, json=data_payload, proxy=proxy) as response:
if response.status == 429:
pass
response.raise_for_status()
async for chunk in cls.iter_any(response):
try:
json_data = json.loads(chunk)
if isinstance(json_data, dict):
if "message" in json_data and isinstance(json_data["message"], dict) and "content" in json_data["message"]:
yield json_data["message"]["content"]
elif "choices" in json_data and isinstance(json_data["choices"], list) and \
json_data["choices"] and isinstance(json_data["choices"][0], dict) and \
"message" in json_data["choices"][0] and isinstance(json_data["choices"][0]["message"], dict) and \
"content" in json_data["choices"][0]["message"]:
yield json_data["choices"][0]["message"]["content"]
elif "limit" in json_data and json_data["limit"] == 0:
if "error" in json_data and isinstance(json_data["error"], dict) and "message" in json_data["error"]:
raise ValueError(f"API error: {json_data['error']['message']}")
else:
yield chunk
else:
yield chunk
except json.JSONDecodeError:
yield chunk
await asyncio.sleep(cls.POST_REQUEST_DELAY_SECONDS)
return
except ClientResponseError as e:
if e.status == 429:
await asyncio.sleep(current_delay)
retries += 1
current_delay = min(current_delay * 2, cls.MAX_RETRY_DELAY_SECONDS)
if retries > cls.MAX_RETRIES:
raise
else:
raise
except ClientConnectorError as e:
await asyncio.sleep(current_delay)
retries += 1
current_delay = min(current_delay * 2, cls.MAX_RETRY_DELAY_SECONDS)
if retries > cls.MAX_RETRIES:
raise
except Exception as e:
raise
raise Exception(f"Failed to get response from {request_url} after multiple retries")

View file

@ -43,6 +43,7 @@ from .Cloudflare import Cloudflare
from .Copilot import Copilot from .Copilot import Copilot
from .DDG import DDG from .DDG import DDG
from .DeepInfraChat import DeepInfraChat from .DeepInfraChat import DeepInfraChat
from .DocsBot import DocsBot
from .DuckDuckGo import DuckDuckGo from .DuckDuckGo import DuckDuckGo
from .Dynaspark import Dynaspark from .Dynaspark import Dynaspark
from .Free2GPT import Free2GPT from .Free2GPT import Free2GPT
@ -52,15 +53,19 @@ from .ImageLabs import ImageLabs
from .LambdaChat import LambdaChat from .LambdaChat import LambdaChat
from .Liaobots import Liaobots from .Liaobots import Liaobots
from .LMArenaProvider import LMArenaProvider from .LMArenaProvider import LMArenaProvider
from .OIVSCodeSer2 import OIVSCodeSer2
from .OIVSCodeSer5 import OIVSCodeSer5
from .OIVSCodeSer0501 import OIVSCodeSer0501
from .PerplexityLabs import PerplexityLabs from .PerplexityLabs import PerplexityLabs
from .Pi import Pi from .Pi import Pi
from .Pizzagpt import Pizzagpt from .Pizzagpt import Pizzagpt
from .PuterJS import PuterJS
from .PollinationsAI import PollinationsAI from .PollinationsAI import PollinationsAI
from .PollinationsImage import PollinationsImage from .PollinationsImage import PollinationsImage
from .TeachAnything import TeachAnything from .TeachAnything import TeachAnything
from .TypeGPT import TypeGPT
from .You import You from .You import You
from .Websim import Websim from .Websim import Websim
from .WeWordle import WeWordle
from .Yqcloud import Yqcloud from .Yqcloud import Yqcloud
import sys import sys

View file

@ -0,0 +1,149 @@
from __future__ import annotations
try:
has_openaifm = True
except ImportError:
has_openaifm = False
from aiohttp import ClientSession
from urllib.parse import urlencode
import json
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_last_message
from ...image.copy_images import save_response_media
class OpenAIFM(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI.fm"
url = "https://www.openai.fm"
api_endpoint = "https://www.openai.fm/api/generate"
working = has_openaifm
default_model = 'gpt-4o-mini-tts'
default_audio_model = default_model
default_voice = 'coral'
voices = ['alloy', 'ash', 'ballad', default_voice, 'echo', 'fable', 'onyx', 'nova', 'sage', 'shimmer', 'verse']
audio_models = {default_audio_model: voices}
models = [default_audio_model]
friendly = """Affect/personality: A cheerful guide
Tone: Friendly, clear, and reassuring, creating a calm atmosphere and making the listener feel confident and comfortable.
Pronunciation: Clear, articulate, and steady, ensuring each instruction is easily understood while maintaining a natural, conversational flow.
Pause: Brief, purposeful pauses after key instructions (e.g., "cross the street" and "turn right") to allow time for the listener to process the information and follow along.
Emotion: Warm and supportive, conveying empathy and care, ensuring the listener feels guided and safe throughout the journey."""
patient_teacher = """Accent/Affect: Warm, refined, and gently instructive, reminiscent of a friendly art instructor.
Tone: Calm, encouraging, and articulate, clearly describing each step with patience.
Pacing: Slow and deliberate, pausing often to allow the listener to follow instructions comfortably.
Emotion: Cheerful, supportive, and pleasantly enthusiastic; convey genuine enjoyment and appreciation of art.
Pronunciation: Clearly articulate artistic terminology (e.g., "brushstrokes," "landscape," "palette") with gentle emphasis.
Personality Affect: Friendly and approachable with a hint of sophistication; speak confidently and reassuringly, guiding users through each painting step patiently and warmly."""
noir_detective = """Affect: a mysterious noir detective
Tone: Cool, detached, but subtly reassuringlike they've seen it all and know how to handle a missing package like it's just another case.
Delivery: Slow and deliberate, with dramatic pauses to build suspense, as if every detail matters in this investigation.
Emotion: A mix of world-weariness and quiet determination, with just a hint of dry humor to keep things from getting too grim.
Punctuation: Short, punchy sentences with ellipses and dashes to create rhythm and tension, mimicking the inner monologue of a detective piecing together clues."""
cowboy = """Voice: Warm, relaxed, and friendly, with a steady cowboy drawl that feels approachable.
Punctuation: Light and natural, with gentle pauses that create a conversational rhythm without feeling rushed.
Delivery: Smooth and easygoing, with a laid-back pace that reassures the listener while keeping things clear.
Phrasing: Simple, direct, and folksy, using casual, familiar language to make technical support feel more personable.
Tone: Lighthearted and welcoming, with a calm confidence that puts the caller at ease."""
calm = """Voice Affect: Calm, composed, and reassuring; project quiet authority and confidence.
Tone: Sincere, empathetic, and gently authoritativeexpress genuine apology while conveying competence.
Pacing: Steady and moderate; unhurried enough to communicate care, yet efficient enough to demonstrate professionalism.
Emotion: Genuine empathy and understanding; speak with warmth, especially during apologies ("I'm very sorry for any disruption...").
Pronunciation: Clear and precise, emphasizing key reassurances ("smoothly," "quickly," "promptly") to reinforce confidence.
Pauses: Brief pauses after offering assistance or requesting details, highlighting willingness to listen and support."""
scientific_style = """Voice: Authoritative and precise, with a measured, academic tone.
Tone: Formal and analytical, maintaining objectivity while conveying complex information.
Pacing: Moderate and deliberate, allowing time for complex concepts to be processed.
Pronunciation: Precise articulation of technical terms and scientific vocabulary.
Pauses: Strategic pauses after introducing new concepts to allow for comprehension.
Emotion: Restrained enthusiasm for discoveries and findings, conveying intellectual curiosity."""
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
prompt: str = None,
audio: dict = {},
**kwargs
) -> AsyncResult:
# Retrieve parameters from the audio dictionary
voice = audio.get("voice", kwargs.get("voice", cls.default_voice))
instructions = audio.get("instructions", kwargs.get("instructions", cls.friendly))
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"pragma": "no-cache",
"sec-fetch-dest": "audio",
"sec-fetch-mode": "no-cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
"referer": cls.url
}
# Using prompts or formatting messages
text = get_last_message(messages, prompt)
params = {
"input": text,
"prompt": instructions,
"voice": voice
}
async with ClientSession(headers=headers) as session:
# Print the full URL with parameters
full_url = f"{cls.api_endpoint}?{urlencode(params)}"
async with session.get(
cls.api_endpoint,
params=params,
proxy=proxy
) as response:
response.raise_for_status()
async for chunk in save_response_media(response, text, [model, voice]):
yield chunk

View file

@ -1,3 +1,4 @@
from .EdgeTTS import EdgeTTS from .EdgeTTS import EdgeTTS
from .gTTS import gTTS from .gTTS import gTTS
from .MarkItDown import MarkItDown from .MarkItDown import MarkItDown
from .OpenAIFM import OpenAIFM

View file

@ -8,6 +8,9 @@ from ...requests import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_last_user_message from ..helper import format_prompt, get_last_user_message
from ...providers.response import JsonConversation, TitleGeneration from ...providers.response import JsonConversation, TitleGeneration
from ...errors import ModelNotFoundError
from ... import debug
class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin): class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
label = "CohereForAI C4AI Command" label = "CohereForAI C4AI Command"
@ -26,12 +29,37 @@ class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
"command-r7b-12-2024", "command-r7b-12-2024",
"command-r7b-arabic-02-2025", "command-r7b-arabic-02-2025",
] ]
model_aliases = {
"command-a": default_model,
"command-r-plus": ["command-r-plus-08-2024", "command-r-plus"],
"command-r": "command-r-08-2024",
"command-r7b": ["command-r7b-12-2024", "command-r7b-arabic-02-2025"],
}
@classmethod @classmethod
def get_model(cls, model: str, **kwargs) -> str: def get_model(cls, model: str) -> str:
if model in cls.model_aliases.values(): """Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Check if the model exists directly in our models list
if model in cls.models:
return model return model
return super().get_model(model, **kwargs)
# Check if there's an alias for this model
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"PuterJS: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"PuterJS: Using model '{alias}' for alias '{model}'")
return alias
raise ModelNotFoundError(f"Model {model} not found")
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View file

@ -34,8 +34,13 @@ class Qwen_Qwen_3(AsyncGeneratorProvider, ProviderModelMixin):
"qwen3-0.6b", "qwen3-0.6b",
} }
model_aliases = { model_aliases = {
"qwen3-235b": default_model, "qwen-3-235b": default_model,
"qwen3-30b": "qwen3-30b-a3b" "qwen-3-30b": "qwen3-30b-a3b",
"qwen-3-32b": "qwen3-32b",
"qwen-3-14b": "qwen3-14b",
"qwen-3-4b": "qwen3-4b",
"qwen-3-1.7b": "qwen3-1.7b",
"qwen-3-0.6b": "qwen3-0.6b"
} }
@classmethod @classmethod

View file

@ -25,7 +25,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin):
working = True working = True
use_nodriver = True use_nodriver = True
supports_stream = True supports_stream = True
default_model = "MiniMax" default_model = "minimax"
@classmethod @classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator: async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:

View file

@ -489,4 +489,4 @@ async def rotate_1psidts(url, cookies: dict, proxy: str | None = None) -> str:
"domain": GOOGLE_COOKIE_DOMAIN, "domain": GOOGLE_COOKIE_DOMAIN,
} for k, v in cookies.items()])) } for k, v in cookies.items()]))
if new_1psidts: if new_1psidts:
return new_1psidts return new_1psidts

View file

@ -150,4 +150,4 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
except json.JSONDecodeError: except json.JSONDecodeError:
continue continue
# if conversation_id is not None: # if conversation_id is not None:
# yield Conversation(conversation_id) # yield Conversation(conversation_id)

View file

@ -2,14 +2,15 @@ from __future__ import annotations
import requests import requests
from .template import OpenaiTemplate from ..template import OpenaiTemplate
from .. import debug from ...errors import ModelNotFoundError
from ... import debug
class TypeGPT(OpenaiTemplate): class TypeGPT(OpenaiTemplate):
label = "TypeGpt" label = "TypeGpt"
url = "https://chat.typegpt.net" url = "https://chat.typegpt.net"
api_base = "https://chat.typegpt.net/api/openai/v1" api_base = "https://chat.typegpt.net/api/openai/v1"
working = True working = False
headers = { headers = {
"accept": "application/json, text/event-stream", "accept": "application/json, text/event-stream",
"accept-language": "de,en-US;q=0.9,en;q=0.8", "accept-language": "de,en-US;q=0.9,en;q=0.8",
@ -43,4 +44,4 @@ class TypeGPT(OpenaiTemplate):
except Exception as e: except Exception as e:
cls.models = cls.fallback_models cls.models = cls.fallback_models
debug.log(f"Error fetching models: {e}") debug.log(f"Error fetching models: {e}")
return cls.models return cls.models

View file

@ -29,4 +29,5 @@ from .ReplicateHome import ReplicateHome
from .RobocodersAPI import RobocodersAPI from .RobocodersAPI import RobocodersAPI
from .RubiksAI import RubiksAI from .RubiksAI import RubiksAI
from .Theb import Theb from .Theb import Theb
from .TypeGPT import TypeGPT
from .Upstage import Upstage from .Upstage import Upstage

View file

@ -135,7 +135,9 @@ class ChatCompletionMessage(BaseModel):
content: str content: str
reasoning_content: Optional[str] = None reasoning_content: Optional[str] = None
tool_calls: list[ToolCallModel] = None tool_calls: list[ToolCallModel] = None
model_config = {"arbitrary_types_allowed": True}
@classmethod @classmethod
def model_construct(cls, content: str, reasoning_content: list[Reasoning] = None, tool_calls: list = None): def model_construct(cls, content: str, reasoning_content: list[Reasoning] = None, tool_calls: list = None):
return super().model_construct(role="assistant", content=content, **filter_none(tool_calls=tool_calls, reasoning_content=reasoning_content)) return super().model_construct(role="assistant", content=content, **filter_none(tool_calls=tool_calls, reasoning_content=reasoning_content))
@ -314,4 +316,4 @@ class ImagesResponse(BaseModel):
model=model, model=model,
provider=provider, provider=provider,
created=created created=created
) )

View file

@ -66,15 +66,19 @@ async def save_response_media(response: StreamResponse, prompt: str, tags: list[
with open(target_path, 'wb') as f: with open(target_path, 'wb') as f:
async for chunk in response.iter_content() if hasattr(response, "iter_content") else response.content.iter_any(): async for chunk in response.iter_content() if hasattr(response, "iter_content") else response.content.iter_any():
f.write(chunk) f.write(chunk)
# Base URL without request parameters
media_url = f"/media/{filename}" media_url = f"/media/{filename}"
if response.method == "GET":
media_url = f"{media_url}?url={str(response.url)}" # Save the original URL in the metadata, but not in the file path itself
source_url = str(response.url) if response.method == "GET" else None
if content_type.startswith("audio/"): if content_type.startswith("audio/"):
yield AudioResponse(media_url, text=prompt) yield AudioResponse(media_url, text=prompt, source_url=source_url)
elif content_type.startswith("video/"): elif content_type.startswith("video/"):
yield VideoResponse(media_url, prompt) yield VideoResponse(media_url, prompt, source_url=source_url)
else: else:
yield ImageResponse(media_url, prompt) yield ImageResponse(media_url, prompt, source_url=source_url)
def get_filename(tags: list[str], alt: str, extension: str, image: str) -> str: def get_filename(tags: list[str], alt: str, extension: str, image: str) -> str:
return "".join(( return "".join((

File diff suppressed because it is too large Load diff