refactor: remove deprecated providers, rename LMArenaProvider, update LMArena and models

- Deleted multiple deprecated providers including Acytoo, AiAsk, AiService, Aibn, Aivvm, Berlin, ChatAnywhere, ChatgptDuo, CodeLinkAva, Cromicle, DfeHub, EasyChat, FakeGpt, FastGpt, Forefront, GPTalk, GeekGpt, GetGpt, H2o, Hashnode, Myshell, NoowAi, Opchatgpts, OpenAssistant, V50, Vitalentum, VoiGpt, Wewordle, Wuguokai, Ylokh, Yqcloud, and corresponding deprecated/__init__.py
- Renamed LMArenaProvider.py to LMArena.py and incorporated its functionality with enhancements, including updated model lists, aliases, comprehensive model discovery, payload building, and asynchronous generator for completions
- Removed LMArenaProvider import and added LMArena import in Provider/__init__.py
- Modified Blackbox provider:
  - Removed generate_session_data method and updated generate_session to use fixed email
    - Updated session payload usage in send request to use generate_session without email argument
      - Added asyncMode flag set to False in session payload
      - In DeepInfraChat, removed model aliases for "llama-4-maverick-17b" and "llama-4-scout-17b"
      - In PollinationsAI, updated model aliases: replaced "command-r-plus-08-2024" with "command-r-plus"; added "gpt-image" and "grok-3-mini" aliases
      - In LambdaChat, added "llama-3.3-70b" mapping to "llama3.3-70b-instruct"
      - In hf_space:
        - Deleted Qwen_QVQ_72B and Voodoohop_Flux1Schnell providers
          - Updated model_aliases in Qwen_Qwen_2_5_Max to fix model alias key from "qwen-2-5-max" to "qwen-2.5-max"
            - Changed model_aliases in StabilityAI_SD35Large from "stable-diffusion-3.5-large" to "sd-3.5-large"
              - Removed imports of deleted providers in hf_space/__init__.py and updated defaults accordingly
              - In BingCreateImages moved import to relative from .bing.create_images
              - Moved bing directory into needs_auth directory and updated imports accordingly
              - Changed PuterJS provider:
                - Changed working flag from True to False
                  - Changed return_conversation default from False to True in create_async_generator
                    - Changed yield error messages to raising exceptions for authentication and rate limits
                    - Modified models.py:
                      - Added ModelRegistry class for dynamic registration and lookup of Model instances
                        - Modified Model dataclass to auto-register instances on initialization via ModelRegistry
                          - Adjusted imports and removed PuterJS from lists of providers and best_provider assignments
                            - Replaced many references of PuterJS as best_provider with LMArena or IterListProvider including core models like gpt-3.5-turbo, gpt-4, gpt-4o, llama series, mistral, hermes, Microsoft phi, gemini, anthopic claude, cohere, qwen, deepseek, and others
                              - Fixed aliases and model names (e.g., "qwen-2-5-max" to "qwen-2.5-max")
                                - Removed outdated or deprecated model definitions referencing PuterJS
                                - Updated HarProvider label to "LM Arena (Har)" from "LM Arena"
                                - Removed deprecated providers imports from Provider/__init__.py and not_working directory imports updated accordingly
                                - Various exact functions impacted: create_async_generator in providers Blackbox, LMArena, PollinationsAI, LambdaChat, PuterJS; model aliases and model definitions in models.py; Provider package __init__.py files; BingCreateImages import; and deletions of numerous deprecated providers and not_working providers.
This commit is contained in:
kqlio67 2025-05-25 21:38:12 +03:00
parent f65617ee1e
commit 4f2bf3048b
66 changed files with 1157 additions and 4588 deletions

View file

@ -310,18 +310,18 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
raise ModelNotFoundError(f"Model {model} not found")
@classmethod
def generate_session(cls, email: str, id_length: int = 21, days_ahead: int = 365) -> dict:
def generate_session(cls, id_length: int = 21, days_ahead: int = 365) -> dict:
"""
Generate a dynamic session with proper ID and expiry format using a specific email.
Generate a dynamic session with proper ID and expiry format using a fixed email.
Args:
email: The email to use for this session
id_length: Length of the numeric ID (default: 21)
days_ahead: Number of days ahead for expiry (default: 365)
Returns:
dict: A session dictionary with user information and expiry
"""
# Generate numeric ID
numeric_id = ''.join(random.choice('0123456789') for _ in range(id_length))
@ -337,7 +337,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
return {
"user": {
"name": "BLACKBOX AI",
"email": email,
"email": "blackboxapp@blackboxai.tech",
"image": image_url,
"id": numeric_id
},
@ -406,25 +406,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(length))
@classmethod
def generate_session_data(cls) -> dict:
"""
Generate a complete session data object with random email and proper format.
Returns:
dict: A complete session data object ready to be used in API requests
"""
# Generate random email
chars = string.ascii_lowercase + string.digits
random_team = ''.join(random.choice(chars) for _ in range(8))
request_email = f"{random_team}@blackbox.ai"
# Generate session with the email
session_data = cls.generate_session(request_email)
debug.log(f"Blackbox: Using generated session with email {request_email}")
return session_data
@classmethod
async def create_async_generator(
cls,
@ -523,7 +504,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"webMode": False,
"offlineMode": False
},
"session": cls.generate_session_data(),
"session": cls.generate_session(),
"isPremium": True,
"subscriptionCache": {
"expiryTimestamp": None,
@ -535,6 +516,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"beastMode": False,
"reasoningMode": False,
"workspaceId": "",
"asyncMode": False,
"webSearchMode": False
}

View file

@ -51,9 +51,7 @@ class DeepInfraChat(OpenaiTemplate):
"qwen-3-32b": "Qwen/Qwen3-32B",
"qwen-3-14b": "Qwen/Qwen3-14B",
"llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"llama-4-maverick-17b": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"llama-4-scout-17b": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"phi-4-reasoning-plus": "microsoft/phi-4-reasoning-plus",
#"": "meta-llama/Llama-Guard-4-12B",
"qwq-32b": "Qwen/QwQ-32B",

579
g4f/Provider/LMArena.py Normal file
View file

@ -0,0 +1,579 @@
from __future__ import annotations
import json
import uuid
import sys
from ..typing import AsyncResult, Messages, MediaListType
from ..requests import StreamSession, StreamResponse, FormData, raise_for_status
from ..providers.response import JsonConversation, FinishReason
from ..tools.media import merge_media
from ..image import to_bytes, is_accepted_format
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_last_user_message
from ..errors import ModelNotFoundError
from .. import debug
class LMArena(AsyncGeneratorProvider, ProviderModelMixin):
label = "LM Arena"
url = "https://lmarena.ai"
api_endpoint = "/queue/join?"
working = True
default_model = "chatgpt-4o-latest-20250326"
models = []
# Models from HAR data (manually added)
har_models = [
"chatgpt-4o-latest-20250326", "gemini-2.5-pro-preview-05-06", "o3-2025-04-16",
"o4-mini-2025-04-16", "qwen3-235b-a22b", "mistral-medium-2505",
"gemini-2.5-flash-preview-04-17", "gpt-4.1-2025-04-14",
"llama-4-maverick-03-26-experimental", "grok-3-preview-02-24",
"claude-3-7-sonnet-20250219", "claude-3-7-sonnet-20250219-thinking-32k",
"deepseek-v3-0324", "llama-4-maverick-17b-128e-instruct",
"llama-4-scout-17b-16e-instruct", "gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14"
]
# Models from JS data (manually added)
js_models = [
"gemini-2.0-flash-001", "gemini-2.0-flash-lite-preview-02-05",
"gemma-3-27b-it", "gemma-3-12b-it", "gemma-3-4b-it",
"deepseek-r1", "claude-3-5-sonnet-20241022", "o3-mini"
]
# Updated vision models list from JS data
vision_models = [
"gemini-2.5-pro-preview-05-06", "o3-2025-04-16", "o4-mini-2025-04-16",
"mistral-medium-2505", "gemini-2.5-flash-preview-04-17", "gpt-4.1-2025-04-14",
"claude-3-7-sonnet-20250219", "claude-3-7-sonnet-20250219-thinking-32k",
"llama-4-maverick-17b-128e-instruct", "llama-4-scout-17b-16e-instruct",
"gpt-4.1-mini-2025-04-14", "gpt-4.1-nano-2025-04-14", "gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05", "gemma-3-27b-it", "claude-3-5-sonnet-20241022",
"gpt-4o-mini-2024-07-18", "gpt-4o-2024-11-20", "gpt-4o-2024-08-06",
"gpt-4o-2024-05-13", "mistral-small-3.1-24b-instruct-2503",
"claude-3-5-sonnet-20240620", "amazon-nova-pro-v1.0", "amazon-nova-lite-v1.0",
"qwen2.5-vl-32b-instruct", "qwen2.5-vl-72b-instruct", "gemini-1.5-pro-002",
"gemini-1.5-flash-002", "gemini-1.5-flash-8b-001", "gemini-1.5-pro-001",
"gemini-1.5-flash-001", "pixtral-large-2411", "step-1o-vision-32k-highres",
"claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229",
"qwen-vl-max-1119", "qwen-vl-max-0809", "reka-core-20240904",
"reka-flash-20240904", "c4ai-aya-vision-32b", "pixtral-12b-2409"
]
model_aliases = {
# Existing aliases
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
"claude-3.7-sonnet-thinking": "claude-3-7-sonnet-20250219-thinking-32k",
"gpt-4o": ["chatgpt-4o-latest-20250326", "chatgpt-4o-latest-20250129", "chatgpt-4o-latest-20241120", "chatgpt-4o-latest-20240903", "chatgpt-4o-latest-20240808", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20"],
"grok-3": ["early-grok-3", "grok-3-preview-02-24",],
"gemini-2.0-flash-thinking": ["gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-flash-thinking-exp-1219",],
"gemini-2.0-pro": "gemini-2.0-pro-exp-02-05",
#"gemini-exp": ["gemini-exp-1206", "gemini-exp-1121", "gemini-exp-1114",],
#"deepseek-r1": "deepseek-r1",
"gemini-2.0-flash": ["gemini-2.0-flash-001", "gemini-2.0-flash-exp", "gemini-2.0-flash-lite-preview-02-05",],
"o1": ["o1-2024-12-17", "o1-preview",],
"qwen-2.5-max": "qwen2.5-max",
"o3-mini": ["o3-mini-high", "o3-mini",],
"o3": "o3-2025-04-16",
"o4-mini": "o4-mini-2025-04-16",
"deepseek-v3": ["deepseek-v3", "deepseek-v3-0324"],
#"deepseek-v3-0324": "deepseek-v3-0324",
"qwen-plus": ["qwen-plus-0125", "qwen-plus-0828", "qwen-plus-0125-exp"],
"glm-4-plus": ["glm-4-plus-0111", "glm-4-plus",],
#"step-2": ["step-2-16k-exp-202412", "step-2-16k-202502"],
#"step-1o": "step-1o-vision-32k-highres",
#"o1-mini": "o1-mini",
"gemini-1.5-pro": ["gemini-1.5-pro-002", "gemini-1.5-pro-exp-0827", "gemini-1.5-pro-exp-0801", "gemini-1.5-pro-001", "gemini-1.5-pro-api-0409-preview",],
"grok-2": "grok-2-2024-08-13",
#"yi": "yi-lightning",
"claude-3.5-sonnet": ["claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20240620"],
"qwen-2.5-plus": "qwen2.5-plus-1127",
"deepseek-v2.5": ["deepseek-v2.5-1210", "deepseek-v2.5",],
#"athene-v2": "athene-v2-chat",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gemini-1.5-flash": ["gemini-1.5-flash-002", "gemini-1.5-flash-exp-0827", "gemini-1.5-flash-001", "gemini-1.5-flash-8b-001", "gemini-1.5-flash-8b-exp-0827"],
"llama-3.1-405b": ["llama-3.1-405b-instruct-bf16", "llama-3.1-405b-instruct-fp8",],
"nemotron-70b": "llama-3.1-nemotron-70b-instruct",
#"gemini-advanced": "gemini-advanced-0514",
"grok-2-mini": "grok-2-mini-2024-08-13",
#"yi-lite": "yi-lightning-lite",
"qwen-max": ["qwen-max-0919", "qwen-max-0428", "qwen-max-2025-01-25"],
"qwen-2.5-72b": "qwen2.5-72b-instruct",
"qwen-2.5-vl-32b": "qwen2.5-vl-32b-instruct",
"qwen-2.5-vl-72b": "qwen2.5-vl-72b-instruct",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"llama-3.3-70b": "llama-3.3-70b-instruct",
"nemotron-49b": "llama-3.3-nemotron-49b-super-v1",
"mistral-large": ["mistral-large-2407", "mistral-large-2411", "mistral-large-2402",],
"mistral-medium": ["mistral-medium", "mistral-medium-2505",],
"pixtral-large": "pixtral-large-2411",
#"pixtral-12b": "pixtral-12b-2409",
#"athene-70b": "athene-70b-0725",
"gpt-4": ["gpt-4-1106-preview", "gpt-4-0125-preview", "gpt-4-0314", "gpt-4-0613",],
"gpt-4.1": "gpt-4.1-2025-04-14",
"gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano": "gpt-4.1-nano-2025-04-14",
"llama-3.1-70b": "llama-3.1-70b-instruct",
"nemotron-253b": "llama-3.1-nemotron-ultra-253b-v1",
"claude-3-opus": "claude-3-opus-20240229",
#"amazon-nova-pro-v1.0": "amazon-nova-pro-v1.0",
"tulu-3-70b": "llama-3.1-tulu-3-70b",
#"yi-large-preview": "yi-large-preview",
"claude-3.5-haiku": "claude-3-5-haiku-20241022",
"reka-core": ["reka-core-20240904", "reka-core-20240722", "reka-core-20240501",],
"gemma-2-27b": "gemma-2-27b-it",
"gemma-3-27b": "gemma-3-27b-it",
"gemma-3-12b": "gemma-3-12b-it",
"gemma-3-4b": "gemma-3-4b-it",
"deepseek-v2": "deepseek-v2-api-0628",
"qwen-2.5-coder-32b": "qwen2.5-coder-32b-instruct",
#"amazon-nova-lite-v1.0": "amazon-nova-lite-v1.0",
"gemma-2-9b": ["gemma-2-9b-it-simpo", "gemma-2-9b-it",],
"command-r-plus": ["command-r-plus-08-2024", "command-r-plus",],
"command-a": "command-a-03-2025",
"deepseek-coder-v2": ["deepseek-coder-v2-0724", "deepseek-coder-v2",],
#"yi-large": "yi-large",
"nemotron-51b": "llama-3.1-nemotron-51b-instruct",
"mistral-small-24b": "mistral-small-24b-instruct-2501",
"mistral-small-3.1-24b": "mistral-small-3.1-24b-instruct-2503",
#"c4ai-aya-expanse-32b": "c4ai-aya-expanse-32b",
#"c4ai-aya-vision-32b": "c4ai-aya-vision-32b",
"nemotron-4-340b": ["nemotron-4-340b-instruct", "nemotron-4-340b"],
#"bard-jan-24-gemini-pro": "bard-jan-24-gemini-pro",
"glm-4": ["glm-4-0520", "glm-4-0116",],
"llama-3-70b": "llama-3-70b-instruct",
"llama-4-maverick": ["llama-4-maverick-03-26-experimental", "llama-4-maverick-17b-128e-instruct"],
"llama-4-scout": "llama-4-scout-17b-16e-instruct",
"reka-flash": ["reka-flash-20240904", "reka-flash-20240722", "reka-flash-preview-20240611", "reka-flash-21b-20240226-online", "reka-flash-21b-20240226",],
"phi-4": "phi-4",
"claude-3-sonnet": "claude-3-sonnet-20240229",
#"amazon-nova-micro-v1.0": "amazon-nova-micro-v1.0",
#"hunyuan-standard-256k": "hunyuan-standard-256k",
#"hunyuan-turbos": ["hunyuan-turbos-20250416", "hunyuan-turbos-20250226"],
"qwen-2-72b": "qwen2-72b-instruct",
"qwen-3-235b": "qwen3-235b-a22b",
"qwen-3-30b": "qwen3-30b-a3b",
"qwen-3-32b": "qwen3-32b",
"tulu-3-8b": "llama-3.1-tulu-3-8b",
#"ministral-8b": "ministral-8b-2410",
#"c4ai-aya-expanse-8b": "c4ai-aya-expanse-8b",
"command-r": ["command-r-08-2024", "command-r",],
"codestral": "codestral-2405",
"claude-3-haiku": "claude-3-haiku-20240307",
#"jamba-1.5-mini": "jamba-1.5-mini",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"qwen-1.5-110b": "qwen1.5-110b-chat",
#"yi-1.5-34b-chat": "yi-1.5-34b-chat",
"qwq-32b": "qwq-32b-preview",
#"p2l-router-7b": "p2l-router-7b",
"llama-3-8b": "llama-3-8b-instruct",
#"internlm2_5-20b-chat": "internlm2_5-20b-chat",
#"olmo-2-32b": "olmo-2-0325-32b-instruct",
#"claude-1": "claude-1",
"qwen-1.5-72b": "qwen1.5-72b-chat",
"gemma-2-2b": "gemma-2-2b-it",
#"jamba-1.5-large": "jamba-1.5-large",
#"eureka-chatbot": "eureka-chatbot",
"qwen-vl-max": ["qwen-vl-max-1119", "qwen-vl-max-0809"],
"gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
"gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
"mixtral-8x22b": "mixtral-8x22b-instruct-v0.1",
#"claude-2": ["claude-2.0", "claude-2.1"],
#"gemini-pro-dev": "gemini-pro-dev-api",
#"zephyr-orpo-141b": "zephyr-orpo-141b-A35b-v0.1",
"qwen-1.5-32b": "qwen1.5-32b-chat",
"qwen-1.5-14b": "qwen1.5-14b-chat",
"qwen-1.5-7b": "qwen1.5-7b-chat",
"qwen-1.5-4b": "qwen1.5-4b-chat",
"mistral-next": "mistral-next",
"phi-3-medium": "phi-3-medium-4k-instruct",
"phi-3-small": "phi-3-small-8k-instruct",
"phi-3-mini": ["phi-3-mini-4k-instruct-june-2024", "phi-3-mini-4k-instruct", "phi-3-mini-128k-instruct"],
"tulu-2-70b": "tulu-2-dpo-70b",
"llama-2-70b": ["llama-2-70b-chat", "llama2-70b-steerlm-chat"],
"llama-2-13b": "llama-2-13b-chat",
"llama-2-7b": "llama-2-7b-chat",
#"openchat-3.5": ["openchat-3.5-0106", "openchat-3.5"],
#"vicuna-33b": "vicuna-33b",
#"vicuna-13b": "vicuna-13b",
#"vicuna-7b": "vicuna-7b",
#"snowflake-arctic": "snowflake-arctic-instruct",
#"starling-lm-7b": ["starling-lm-7b-beta", "starling-lm-7b-alpha"],
"hermes-2-dpo": "nous-hermes-2-mixtral-8x7b-dpo",
#"pplx-70b-online": "pplx-70b-online",
"pplx-7b-online":"pplx-7b-online",
"deepseek-67b": "deepseek-llm-67b-chat",
"openhermes-2.5-7b": "openhermes-2.5-mistral-7b",
"mistral-7b": ["mistral-7b-instruct-v0.2", "mistral-7b-instruct"],
#"dolphin-2.2.1-7b": "dolphin-2.2.1-mistral-7b",
#"solar-10.7b": "solar-10.7b-instruct-v1.0",
#"wizardlm-70b": "wizardlm-70b",
#"wizardlm-13b": "wizardlm-13b",
"llama-3.2-3b": "llama-3.2-3b-instruct",
"llama-3.2-1b": "llama-3.2-1b-instruct",
#"zephyr-7b": ["zephyr-7b-beta", "zephyr-7b-alpha"],
#"smollm-2-1.7b": "smollm2-1.7b-instruct",
#"mpt-30b": "mpt-30b-chat",
#"mpt-7b": "mpt-7b-chat",
"codellama-34b": "codellama-34b-instruct",
"codellama-70b": "codellama-70b-instruct",
"qwen-14b": "qwen-14b-chat",
#"falcon-180b": "falcon-180b-chat",
#"guanaco-33b": "guanaco-33b",
#"stripedhyena-nous-7b": "stripedhyena-nous-7b",
#"olmo-7b": "olmo-7b-instruct",
#"palm-2": "palm-2",
"gpt-3.5-turbo": ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-0314", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-1106"],
#"glm-3-6b": "chatglm3-6b",
#"glm-2-6b": "chatglm2-6b",
#"glm-6b": "chatglm-6b",
#"gpt4all-13b": "gpt4all-13b-snoozy",
"mixtral-8x7b": "mixtral-8x7b-instruct-v0.1",
#"yi-34b": "yi-34b-chat",
#"claude": "claude-instant-1",
#"gemini-pro": "gemini-pro",
"dbrx-instruct": "dbrx-instruct-preview",
#"rwkv-4-14b": "RWKV-4-Raven-14B",
#"alpaca-13b": "alpaca-13b",
#"oasst-pythia-12b": "oasst-pythia-12b",
#"fastchat-t5-3b": "fastchat-t5-3b",
#"stablelm-7b": "stablelm-tuned-alpha-7b",
#"dolly-v2-12b": "dolly-v2-12b",
#"llama-13b": "llama-13b",
}
@classmethod
def get_models(cls):
"""Get models with improved fallback sources"""
try:
# Try to fetch models from Google Storage first
url = "https://storage.googleapis.com/public-arena-no-cors/p2l-explorer/data/overall/arena.json"
import requests
data = requests.get(url, timeout=5).json()
leaderboard_models = [model[0] for model in data["leaderboard"]]
# Combine models from all sources and remove duplicates
all_models = list(set(leaderboard_models + cls.har_models + cls.js_models))
if all_models:
# Ensure default model is at index 0
if cls.default_model in all_models:
all_models.remove(cls.default_model)
all_models.insert(0, cls.default_model)
cls.models = all_models
return cls.models
except Exception as e:
# Log the error and fall back to alternative sources
print(f"Failed to fetch models from Google Storage: {str(e)}", file=sys.stderr)
# First fallback: Use combined har_models and js_models
combined_models = list(set(cls.har_models + cls.js_models))
if combined_models:
if cls.default_model in combined_models:
combined_models.remove(cls.default_model)
combined_models.insert(0, cls.default_model)
cls.models = combined_models
return cls.models
# Second fallback: Use vision_models
if not cls.models:
models = cls.vision_models.copy()
if cls.default_model not in models:
models.insert(0, cls.default_model)
cls.models = models
return cls.models
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Check if the model exists directly in our models list
if model in cls.models:
return model
# Check if there's an alias for this model
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"LMArena: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"LMArena: Using model '{alias}' for alias '{model}'")
return alias
raise ModelNotFoundError(f"Model {model} not found")
@classmethod
def _build_payloads(cls, model_id: str, session_hash: str, text: str, files: list, max_tokens: int, temperature: float, top_p: float):
"""Build payloads for new conversations"""
first_payload = {
"data": [
None,
model_id,
{"text": text, "files": files},
{
"text_models": [model_id],
"all_text_models": [model_id],
"vision_models": [],
"all_vision_models": [],
"image_gen_models": [],
"all_image_gen_models": [],
"search_models": [],
"all_search_models": [],
"models": [model_id],
"all_models": [model_id],
"arena_type": "text-arena"
}
],
"event_data": None,
"fn_index": 119,
"trigger_id": 159,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 120,
"trigger_id": 159,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 121,
"trigger_id": 159,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
def _build_continuation_payloads(cls, model_id: str, session_hash: str, text: str, max_tokens: int, temperature: float, top_p: float):
"""Renamed from _build_second_payloads for clarity"""
first_payload = {
"data":[None,model_id,text,{
"text_models":[model_id],
"all_text_models":[model_id],
"vision_models":[],
"image_gen_models":[],
"all_image_gen_models":[],
"search_models":[],
"all_search_models":[],
"models":[model_id],
"all_models":[model_id],
"arena_type":"text-arena"}],
"event_data": None,
"fn_index": 122,
"trigger_id": 157,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 123,
"trigger_id": 157,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 124,
"trigger_id": 157,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
media: MediaListType = None,
max_tokens: int = 2048,
temperature: float = 0.7,
top_p: float = 1,
conversation: JsonConversation = None,
return_conversation: bool = True,
**kwargs
) -> AsyncResult:
async def read_response(response: StreamResponse):
returned_data = ""
async for line in response.iter_lines():
if not line.startswith(b"data: "):
continue
try:
json_data = json.loads(line[6:])
# Process data based on message type
if json_data.get("msg") == "process_generating":
output_data = json_data.get("output", {}).get("data", [])
if len(output_data) > 1 and output_data[1]:
# Extract content from various response formats
data = output_data[1]
content = None
if isinstance(data, list):
if data and data[0] == "replace" and len(data) > 2:
content = data[2]
elif data and isinstance(data[0], list) and len(data[0]) > 2:
content = data[0][2]
if content:
# Clean up content
if content.endswith(""):
content = content[:-2]
if content == '<span class="cursor"></span> ' or content == 'update':
continue
if content.startswith(returned_data):
content = content[len(returned_data):]
if content:
returned_data += content
yield content
# Process completed messages may also contain content
elif json_data.get("msg") == "process_completed":
output_data = json_data.get("output", {}).get("data", [])
if len(output_data) > 1 and isinstance(output_data[1], list):
for item in output_data[1]:
if isinstance(item, list) and len(item) > 1:
content = item[1]
if content and content != returned_data and content != '<span class="cursor"></span> ':
if content.endswith(""):
content = content[:-2]
new_content = content
if content.startswith(returned_data):
new_content = content[len(returned_data):]
if new_content:
returned_data = content
yield new_content
except Exception as e:
print(f"Error parsing response: {str(e)}", file=sys.stderr)
continue
if model in cls.model_aliases:
model = cls.model_aliases[model]
prompt = get_last_user_message(messages)
async with StreamSession(impersonate="chrome") as session:
# Handle new conversation
if conversation is None:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace("-", ""))
media_objects = []
# Process media if present
if media:
media = list(merge_media(media, messages))
if media:
data = FormData()
for i in range(len(media)):
media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media:
data.add_field(f"files", image, filename=image_name)
# Upload media files
async with session.post(f"{cls.url}/upload", params={"upload_id": conversation.session_hash}, data=data) as response:
await raise_for_status(response)
image_files = await response.json()
# Format media objects for API request
media_objects = [{
"path": image_file,
"url": f"{cls.url}/file={image_file}",
"orig_name": media[i][1],
"size": len(media[i][0]),
"mime_type": is_accepted_format(media[i][0]),
"meta": {
"_type": "gradio.FileData"
}
} for i, image_file in enumerate(image_files)]
# Build payloads for new conversation
first_payload, second_payload, third_payload = cls._build_payloads(
model, conversation.session_hash, prompt, media_objects,
max_tokens, temperature, top_p
)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# Send the three required requests
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# Stream the response
stream_url = f"{cls.url}/queue/data?session_hash={conversation.session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
count = 0
async for chunk in read_response(response):
count += 1
yield chunk
if count == 0:
raise RuntimeError("No response from server.")
# Return conversation object for future interactions
if return_conversation:
yield conversation
# Yield finish reason if we hit token limit
if count >= max_tokens:
yield FinishReason("length")
# Handle continuation of existing conversation
else:
# Build payloads for conversation continuation
first_payload, second_payload, third_payload = cls._build_continuation_payloads(
model, conversation.session_hash, prompt, max_tokens, temperature, top_p
)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# Send the three required requests
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# Stream the response
stream_url = f"{cls.url}/queue/data?session_hash={conversation.session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
count = 0
async for chunk in read_response(response):
count += 1
yield chunk
if count == 0:
raise RuntimeError("No response from server.")
# Yield finish reason if we hit token limit
if count >= max_tokens:
yield FinishReason("length")

View file

@ -1,371 +0,0 @@
from __future__ import annotations
import json
import uuid
import requests
from ..typing import AsyncResult, Messages, MediaListType
from ..requests import StreamSession, FormData, raise_for_status
from ..providers.response import FinishReason, JsonConversation
from ..tools.media import merge_media
from ..image import to_bytes, is_accepted_format
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin
from .helper import get_last_user_message
from .. import debug
class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "LM Arena"
url = "https://lmarena.ai"
api_endpoint = "/queue/join?"
working = False
default_model = "gpt-4o"
model_aliases = {default_model: "chatgpt-4o-latest-20250326"}
models = [
default_model,
"o3-2025-04-16",
"o4-mini-2025-04-16",
"gpt-4.1-2025-04-14",
"gemini-2.5-pro-exp-03-25",
"llama-4-maverick-03-26-experimental",
"grok-3-preview-02-24",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-20250219-thinking-32k",
"deepseek-v3-0324",
"llama-4-maverick-17b-128e-instruct",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05",
"gemma-3-27b-it",
"gemma-3-12b-it",
"gemma-3-4b-it",
"deepseek-r1",
"claude-3-5-sonnet-20241022",
"o3-mini",
"llama-3.3-70b-instruct",
"gpt-4o-mini-2024-07-18",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"command-a-03-2025",
"qwq-32b",
"p2l-router-7b",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-20240620",
"doubao-1.5-pro-32k-250115",
"doubao-1.5-vision-pro-32k-250115",
"mistral-small-24b-instruct-2501",
"phi-4",
"amazon-nova-pro-v1.0",
"amazon-nova-lite-v1.0",
"amazon-nova-micro-v1.0",
"cobalt-exp-beta-v3",
"cobalt-exp-beta-v4",
"qwen-max-2025-01-25",
"qwen-plus-0125-exp",
"qwen2.5-vl-32b-instruct",
"qwen2.5-vl-72b-instruct",
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-1.5-flash-8b-001",
"gemini-1.5-pro-001",
"gemini-1.5-flash-001",
"llama-3.1-405b-instruct-bf16",
"llama-3.3-nemotron-49b-super-v1",
"llama-3.1-nemotron-ultra-253b-v1",
"llama-3.1-nemotron-70b-instruct",
"llama-3.1-70b-instruct",
"llama-3.1-8b-instruct",
"hunyuan-standard-2025-02-10",
"hunyuan-large-2025-02-10",
"hunyuan-standard-vision-2024-12-31",
"hunyuan-turbo-0110",
"hunyuan-turbos-20250226",
"mistral-large-2411",
"pixtral-large-2411",
"mistral-large-2407",
"llama-3.1-nemotron-51b-instruct",
"granite-3.1-8b-instruct",
"granite-3.1-2b-instruct",
"step-2-16k-exp-202412",
"step-2-16k-202502",
"step-1o-vision-32k-highres",
"yi-lightning",
"glm-4-plus",
"glm-4-plus-0111",
"jamba-1.5-large",
"jamba-1.5-mini",
"gemma-2-27b-it",
"gemma-2-9b-it",
"gemma-2-2b-it",
"eureka-chatbot",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"nemotron-4-340b",
"llama-3-70b-instruct",
"llama-3-8b-instruct",
"qwen2.5-plus-1127",
"qwen2.5-coder-32b-instruct",
"qwen2.5-72b-instruct",
"qwen-max-0919",
"qwen-vl-max-1119",
"qwen-vl-max-0809",
"llama-3.1-tulu-3-70b",
"olmo-2-0325-32b-instruct",
"gpt-3.5-turbo-0125",
"reka-core-20240904",
"reka-flash-20240904",
"c4ai-aya-expanse-32b",
"c4ai-aya-expanse-8b",
"c4ai-aya-vision-32b",
"command-r-plus-08-2024",
"command-r-08-2024",
"codestral-2405",
"mixtral-8x22b-instruct-v0.1",
"mixtral-8x7b-instruct-v0.1",
"pixtral-12b-2409",
"ministral-8b-2410"
]
vision_models = [
"o3-2025-04-16",
"o4-mini-2025-04-16",
"gpt-4.1-2025-04-14",
"gemini-2.5-pro-exp-03-25",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-20250219-thinking-32k",
"llama-4-maverick-17b-128e-instruct",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05",
"claude-3-5-sonnet-20241022",
"gpt-4o-mini-2024-07-18",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"claude-3-5-sonnet-20240620",
"doubao-1.5-vision-pro-32k-250115",
"amazon-nova-pro-v1.0",
"amazon-nova-lite-v1.0",
"qwen2.5-vl-32b-instruct",
"qwen2.5-vl-72b-instruct",
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-1.5-flash-8b-001",
"gemini-1.5-pro-001",
"gemini-1.5-flash-001",
"hunyuan-standard-vision-2024-12-31",
"pixtral-large-2411",
"step-1o-vision-32k-highres",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"qwen-vl-max-1119",
"qwen-vl-max-0809",
"reka-core-20240904",
"reka-flash-20240904",
"c4ai-aya-vision-32b",
"pixtral-12b-2409"
]
_args: dict = None
@classmethod
def get_models(cls) -> list[str]:
if not cls.models:
url = "https://storage.googleapis.com/public-arena-no-cors/p2l-explorer/data/overall/arena.json"
data = requests.get(url).json()
cls.models = [model[0] for model in data["leaderboard"]]
return cls.models
@classmethod
def _build_payloads(cls, model_id: str, session_hash: str, text: str, files: list, max_tokens: int, temperature: float, top_p: float):
first_payload = {
"data": [
None,
model_id,
{"text": text, "files": files},
{
"text_models": [model_id],
"all_text_models": [model_id],
"vision_models": [],
"all_vision_models": [],
"image_gen_models": [],
"all_image_gen_models": [],
"search_models": [],
"all_search_models": [],
"models": [model_id],
"all_models": [model_id],
"arena_type": "text-arena"
}
],
"event_data": None,
"fn_index": 117,
"trigger_id": 159,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 118,
"trigger_id": 159,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 119,
"trigger_id": 159,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
def _build_second_payloads(cls, model_id: str, session_hash: str, text: str, max_tokens: int, temperature: float, top_p: float):
first_payload = {
"data":[None,model_id,text,{
"text_models":[model_id],
"all_text_models":[model_id],
"vision_models":[],
"image_gen_models":[],
"all_image_gen_models":[],
"search_models":[],
"all_search_models":[],
"models":[model_id],
"all_models":[model_id],
"arena_type":"text-arena"}],
"event_data": None,
"fn_index": 120,
"trigger_id": 157,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 121,
"trigger_id": 157,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 122,
"trigger_id": 157,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
media: MediaListType = None,
conversation: JsonConversation = None,
return_conversation: bool = True,
max_tokens: int = 2048,
temperature: float = 0.7,
top_p: float = 1,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = cls.default_model
if model in cls.model_aliases:
model = cls.model_aliases[model]
prompt = get_last_user_message(messages)
new_conversation = False
if conversation is None:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace("-", ""))
new_conversation = True
async with StreamSession(impersonate="chrome") as session:
if new_conversation:
media = list(merge_media(media, messages))
if media:
data = FormData()
for i in range(len(media)):
media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media:
data.add_field(f"files", image, filename=image_name)
async with session.post(f"{cls.url}/upload", params={"upload_id": conversation.session_hash}, data=data) as response:
await raise_for_status(response)
image_files = await response.json()
media = [{
"path": image_file,
"url": f"{cls.url}/file={image_file}",
"orig_name": media[i][1],
"size": len(media[i][0]),
"mime_type": is_accepted_format(media[i][0]),
"meta": {
"_type": "gradio.FileData"
}
} for i, image_file in enumerate(image_files)]
first_payload, second_payload, third_payload = cls._build_payloads(model, conversation.session_hash, prompt, media, max_tokens, temperature, top_p)
else:
first_payload, second_payload, third_payload = cls._build_second_payloads(model, conversation.session_hash, prompt, max_tokens, temperature, top_p)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# POST 1
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# POST 2
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# POST 3
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# Long stream GET
async def sse_stream():
stream_url = f"{cls.url}/queue/data?session_hash={conversation.session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
text_position = 0
count = 0
async for line in response.iter_lines():
if line.startswith(b"data: "):
try:
msg = json.loads(line[6:])
except Exception as e:
raise RuntimeError(f"Failed to decode JSON from stream: {line}", e)
if msg.get("msg") == "process_generating":
data = msg["output"]["data"][1]
if data:
data = data[0]
if len(data) > 2:
if isinstance(data[2], list):
data[2] = data[2][-1]
content = data[2][text_position:]
if content.endswith(""):
content = content[:-2]
if content:
count += 1
yield count, content
text_position += len(content)
elif msg.get("msg") == "close_stream":
break
elif msg.get("msg") not in ("process_completed", "process_starts", "estimation"):
debug.log(f"Unexpected message: {msg}")
count = 0
async for count, chunk in sse_stream():
yield chunk
if count == 0:
raise RuntimeError("No response from server.")
if return_conversation:
yield conversation
if count == max_tokens:
yield FinishReason("length")

View file

@ -37,6 +37,7 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
"deepseek-v3": default_model,
"hermes-3-405b": ["hermes3-405b-fp8-128k", "hermes-3-llama-3.1-405b-fp8"],
"nemotron-70b": "llama3.1-nemotron-70b-instruct",
"llama-3.3-70b": "llama3.3-70b-instruct-fp8",
"qwen-2.5-coder-32b": "qwen25-coder-32b-instruct"
}

View file

@ -96,13 +96,12 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4.1": "openai-large",
"o4-mini": "openai-reasoning",
"gpt-4.1-mini": "openai",
"command-r-plus-08-2024": "command-r",
"command-r-plus": "command-r",
"gemini-2.5-flash": "gemini",
"gemini-2.0-flash-thinking": "gemini-thinking",
"qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama",
"llama-4-scout": "llamascout",
"llama-4-scout-17b": "llamascout",
"mistral-small-3.1-24b": "mistral",
"deepseek-r1": "deepseek-reasoning-large",
"deepseek-r1-distill-llama-70b": "deepseek-reasoning-large",
@ -123,12 +122,14 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"deepseek-v3": "deepseek",
"deepseek-v3-0324": "deepseek",
#"bidara": "bidara", # Personas
"grok-3-mini": "grok",
### Audio Models ###
"gpt-4o-mini-audio": "openai-audio",
### Image Models ###
"sdxl-turbo": "turbo",
"gpt-image": "gptimage",
}
@classmethod

View file

@ -51,15 +51,13 @@ from .FreeGpt import FreeGpt
from .GizAI import GizAI
from .ImageLabs import ImageLabs
from .LambdaChat import LambdaChat
from .Liaobots import Liaobots
from .LMArenaProvider import LMArenaProvider
from .LMArena import LMArena
from .OIVSCodeSer2 import OIVSCodeSer2
from .OIVSCodeSer5 import OIVSCodeSer5
from .OIVSCodeSer0501 import OIVSCodeSer0501
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .PuterJS import PuterJS
from .PollinationsAI import PollinationsAI
from .PollinationsImage import PollinationsImage
from .TeachAnything import TeachAnything

View file

@ -1,51 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class Acytoo(AsyncGeneratorProvider):
url = 'https://chat.acytoo.com'
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
async with session.post(
f'{cls.url}/api/completions',
proxy=proxy,
json=_create_payload(messages, **kwargs)
) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
def _create_header():
return {
'accept': '*/*',
'content-type': 'application/json',
}
def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
return {
'key' : '',
'model' : 'gpt-3.5-turbo',
'messages' : messages,
'temperature' : temperature,
'password' : ''
}

View file

@ -1,46 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider):
url = "https://e.aiask.me"
supports_message_history = True
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/plain, */*",
"origin": cls.url,
"referer": f"{cls.url}/chat",
}
async with ClientSession(headers=headers) as session:
data = {
"continuous": True,
"id": "fRMSQtuHl91A4De9cCvKD",
"list": messages,
"models": "0",
"prompt": "",
"temperature": kwargs.get("temperature", 0.5),
"title": "",
}
buffer = ""
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
buffer += chunk.decode()
if not rate_limit.startswith(buffer):
yield buffer
buffer = ""
elif buffer == rate_limit:
raise RuntimeError("Rate limit reached")

View file

@ -1,39 +0,0 @@
from __future__ import annotations
import requests
from ...typing import Any, CreateResult, Messages
from ..base_provider import AbstractProvider
class AiService(AbstractProvider):
url = "https://aiservice.vercel.app/"
working = False
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
**kwargs: Any,
) -> CreateResult:
base = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
headers = {
"accept": "*/*",
"content-type": "text/plain;charset=UTF-8",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"Referer": "https://aiservice.vercel.app/chat",
}
data = {"input": base}
url = "https://aiservice.vercel.app/api/chat/answer"
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
yield response.json()["data"]

View file

@ -1,46 +0,0 @@
from __future__ import annotations
import time
import hashlib
from ...typing import AsyncResult, Messages
from ...requests import StreamSession
from ..base_provider import AsyncGeneratorProvider
class Aibn(AsyncGeneratorProvider):
url = "https://aibn.cc"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
timestamp = int(time.time())
data = {
"messages": messages,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
"time": timestamp
}
async with session.post(f"{cls.url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View file

@ -1,73 +0,0 @@
from __future__ import annotations
import requests
import json
from ..base_provider import AbstractProvider
from ...typing import CreateResult, Messages
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}
class Aivvm(AbstractProvider):
url = 'https://chat.aivvm.com'
supports_stream = True
working = False
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
def create_completion(cls,
model: str,
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
raise ValueError(f"Model is not supported: {model}")
json_data = {
"model" : models[model],
"messages" : messages,
"key" : "",
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
data = json.dumps(json_data)
headers = {
"accept" : "text/event-stream",
"accept-language" : "en-US,en;q=0.9",
"content-type" : "application/json",
"content-length" : str(len(data)),
"sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"sec-gpc" : "1",
"referrer" : "https://chat.aivvm.com/",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}
response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=4096):
try:
yield chunk.decode("utf-8")
except UnicodeDecodeError:
yield chunk.decode("unicode-escape")

View file

@ -1,78 +0,0 @@
from __future__ import annotations
import secrets
import uuid
import json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class Berlin(AsyncGeneratorProvider):
url = "https://ai.berlin4h.top"
working = False
supports_gpt_35_turbo = True
_token = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "ai.berlin4h.top",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
}
async with ClientSession(headers=headers) as session:
if not cls._token:
data = {
"account": '免费使用GPT3.5模型@163.com',
"password": '659e945c2d004686bad1a75b708c962f'
}
async with session.post(f"{cls.url}/api/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._token = (await response.json())["data"]["token"]
headers = {
"token": cls._token
}
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"parentMessageId": str(uuid.uuid4()),
"options": {
"model": model,
"temperature": 0,
"presence_penalty": 0,
"frequency_penalty": 0,
"max_tokens": 1888,
**kwargs
},
}
async with session.post(f"{cls.url}/api/chat/completions", json=data, proxy=proxy, headers=headers) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.strip():
try:
yield json.loads(chunk)["content"]
except:
raise RuntimeError(f"Response: {chunk.decode()}")

View file

@ -1,54 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class ChatAnywhere(AsyncGeneratorProvider):
url = "https://chatanywhere.cn"
supports_gpt_35_turbo = True
supports_message_history = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
temperature: float = 0.5,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Authorization": "",
"Connection": "keep-alive",
"TE": "trailers"
}
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
data = {
"list": messages,
"id": "s1_qYuOLXjI3rEpc7WHfQ",
"title": messages[-1]["content"],
"prompt": "",
"temperature": temperature,
"models": "61490748",
"continuous": True
}
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()

View file

@ -1,47 +0,0 @@
from __future__ import annotations
from ...typing import Messages
from ...requests import StreamSession
from ..base_provider import AsyncProvider, format_prompt
class ChatgptDuo(AsyncProvider):
url = "https://chatgptduo.com"
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> str:
async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout
) as session:
prompt = format_prompt(messages),
data = {
"prompt": prompt,
"search": prompt,
"purpose": "ask",
}
response = await session.post(f"{cls.url}/", data=data)
response.raise_for_status()
data = response.json()
cls._sources = [{
"title": source["title"],
"url": source["link"],
"snippet": source["snippet"]
} for source in data["results"]]
return data["answer"]
@classmethod
def get_sources(cls):
return cls._sources

View file

@ -1,52 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider
class CodeLinkAva(AsyncGeneratorProvider):
url = "https://ava-ai-ef611.web.app"
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with ClientSession(
headers=headers
) as session:
data = {
"messages": messages,
"temperature": 0.6,
"stream": True,
**kwargs
}
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
response.raise_for_status()
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content

View file

@ -1,50 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from hashlib import sha256
from ...typing import AsyncResult, Messages, Dict
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class Cromicle(AsyncGeneratorProvider):
url: str = 'https://cromicle.top'
working: bool = False
supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
async with session.post(
f'{cls.url}/chat',
proxy=proxy,
json=_create_payload(format_prompt(messages))
) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
def _create_header() -> Dict[str, str]:
return {
'accept': '*/*',
'content-type': 'application/json',
}
def _create_payload(message: str) -> Dict[str, str]:
return {
'message': message,
'token': 'abc',
'hash': sha256('abc'.encode() + message.encode()).hexdigest()
}

View file

@ -1,62 +0,0 @@
from __future__ import annotations
import json
import re
import time
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class DfeHub(AbstractProvider):
url = "https://chat.dfehub.com/"
supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
"authority" : "chat.dfehub.com",
"accept" : "*/*",
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"content-type" : "application/json",
"origin" : "https://chat.dfehub.com",
"referer" : "https://chat.dfehub.com/",
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"x-requested-with" : "XMLHttpRequest",
}
json_data = {
"messages" : messages,
"model" : "gpt-3.5-turbo",
"temperature" : kwargs.get("temperature", 0.5),
"presence_penalty" : kwargs.get("presence_penalty", 0),
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
"top_p" : kwargs.get("top_p", 1),
"stream" : True
}
response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
headers=headers, json=json_data, timeout=3)
for chunk in response.iter_lines():
if b"detail" in chunk:
delay = re.findall(r"\d+\.\d+", chunk.decode())
delay = float(delay[-1])
time.sleep(delay)
yield from DfeHub.create_completion(model, messages, stream, **kwargs)
if b"content" in chunk:
data = json.loads(chunk.decode().split("data: ")[1])
yield (data["choices"][0]["delta"]["content"])

View file

@ -1,89 +0,0 @@
from __future__ import annotations
import json
import random
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class EasyChat(AbstractProvider):
url: str = "https://free.easychat.work"
supports_stream = True
supports_gpt_35_turbo = True
working = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
active_servers = [
"https://chat10.fastgpt.me",
"https://chat9.fastgpt.me",
"https://chat1.fastgpt.me",
"https://chat2.fastgpt.me",
"https://chat3.fastgpt.me",
"https://chat4.fastgpt.me",
"https://gxos1h1ddt.fastgpt.me"
]
server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
headers = {
"authority" : f"{server}".replace("https://", ""),
"accept" : "text/event-stream",
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
"content-type" : "application/json",
"origin" : f"{server}",
"referer" : f"{server}/",
"x-requested-with" : "XMLHttpRequest",
'plugins' : '0',
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'usesearch' : 'false',
'x-requested-with' : 'XMLHttpRequest'
}
json_data = {
"messages" : messages,
"stream" : stream,
"model" : model,
"temperature" : kwargs.get("temperature", 0.5),
"presence_penalty" : kwargs.get("presence_penalty", 0),
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
"top_p" : kwargs.get("top_p", 1)
}
session = requests.Session()
# init cookies from server
session.get(f"{server}/")
response = session.post(f"{server}/api/openai/v1/chat/completions",
headers=headers, json=json_data, stream=stream)
if response.status_code != 200:
raise Exception(f"Error {response.status_code} from server : {response.reason}")
if not stream:
json_data = response.json()
if "choices" in json_data:
yield json_data["choices"][0]["message"]["content"]
else:
raise Exception("No response from server")
else:
for chunk in response.iter_lines():
if b"content" in chunk:
splitData = chunk.decode().split("data:")
if len(splitData) > 1:
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]

View file

@ -1,91 +0,0 @@
from __future__ import annotations
import uuid, time, random, json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_random_string
class FakeGpt(AsyncGeneratorProvider):
url = "https://chat-shared2.zhile.io"
supports_gpt_35_turbo = True
working = False
_access_token = None
_cookie_jar = None
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Accept-Language": "en-US",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
"Referer": "https://chat-shared2.zhile.io/?v=2",
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
"sec-ch-ua-platform": '"Linux"',
"sec-ch-ua-mobile": "?0",
}
async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
if not cls._access_token:
async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
response.raise_for_status()
list = (await response.json())["loads"]
token_ids = [t["token_id"] for t in list]
data = {
"token_key": random.choice(token_ids),
"session_password": get_random_string()
}
async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
response.raise_for_status()
async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
response.raise_for_status()
cls._access_token = (await response.json())["accessToken"]
cls._cookie_jar = session.cookie_jar
headers = {
"Content-Type": "application/json",
"Accept": "text/event-stream",
"X-Authorization": f"Bearer {cls._access_token}",
}
prompt = format_prompt(messages)
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
"metadata": {},
}
],
"parent_message_id": str(uuid.uuid4()),
"model": "text-davinci-002-render-sha",
"plugin_ids": [],
"timezone_offset_min": -120,
"suggestions": [],
"history_and_training_disabled": True,
"arkose_token": "",
"force_paragen": False,
}
last_message = ""
async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
async for line in response.content:
if line.startswith(b"data: "):
line = line[6:]
if line == b"[DONE]":
break
try:
line = json.loads(line)
if line["message"]["metadata"]["message_type"] == "next":
new_message = line["message"]["content"]["parts"][0]
yield new_message[len(last_message):]
last_message = new_message
except:
continue
if not last_message:
raise RuntimeError("No valid response")

View file

@ -1,76 +0,0 @@
from __future__ import annotations
import json
import random
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class FastGpt(AbstractProvider):
url: str = 'https://chat9.fastgpt.me/'
working = False
needs_auth = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
'authority' : 'chat9.fastgpt.me',
'accept' : 'text/event-stream',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'origin' : 'https://chat9.fastgpt.me',
'plugins' : '0',
'pragma' : 'no-cache',
'referer' : 'https://chat9.fastgpt.me/',
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch' : 'false',
'x-requested-with' : 'XMLHttpRequest',
}
json_data = {
'messages' : messages,
'stream' : stream,
'model' : model,
'temperature' : kwargs.get('temperature', 0.5),
'presence_penalty' : kwargs.get('presence_penalty', 0),
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p' : kwargs.get('top_p', 1),
}
subdomain = random.choice([
'jdaen979ew',
'chat9'
])
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
for line in response.iter_lines():
if line:
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get(
'content'
)
if token:
yield token
except:
continue

View file

@ -1,40 +0,0 @@
from __future__ import annotations
import json
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class Forefront(AbstractProvider):
url = "https://forefront.com"
supports_stream = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
json_data = {
"text" : messages[-1]["content"],
"action" : "noauth",
"id" : "",
"parentId" : "",
"workspaceId" : "",
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
"model" : "gpt-4",
"messages" : messages[:-1] if len(messages) > 1 else [],
"internetMode" : "auto",
}
response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
json=json_data, stream=True)
response.raise_for_status()
for token in response.iter_lines():
if b"delta" in token:
yield json.loads(token.decode().split("data: ")[1])["delta"]

View file

@ -1,87 +0,0 @@
from __future__ import annotations
import secrets, time, json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class GPTalk(AsyncGeneratorProvider):
url = "https://gptalk.net"
working = False
supports_gpt_35_turbo = True
_auth = None
used_times = 0
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
timestamp = int(time.time())
headers = {
'authority': 'gptalk.net',
'accept': '*/*',
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2',
'content-type': 'application/json',
'origin': 'https://gptalk.net',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
'x-auth-appid': '2229',
'x-auth-openid': '',
'x-auth-platform': '',
'x-auth-timestamp': f"{timestamp}",
}
async with ClientSession(headers=headers) as session:
if not cls._auth or cls._auth["expires_at"] < timestamp or cls.used_times == 5:
data = {
"fingerprint": secrets.token_hex(16).zfill(32),
"platform": "fingerprint"
}
async with session.post(f"{cls.url}/api/chatgpt/user/login", json=data, proxy=proxy) as response:
response.raise_for_status()
cls._auth = (await response.json())["data"]
cls.used_times = 0
data = {
"content": format_prompt(messages),
"accept": "stream",
"from": 1,
"model": model,
"is_mobile": 0,
"user_agent": headers["user-agent"],
"is_open_ctx": 0,
"prompt": "",
"roid": 111,
"temperature": 0,
"ctx_msg_count": 3,
"created_at": timestamp
}
headers = {
'authorization': f'Bearer {cls._auth["token"]}',
}
async with session.post(f"{cls.url}/api/chatgpt/chatapi/text", json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
token = (await response.json())["data"]["token"]
cls.used_times += 1
last_message = ""
async with session.get(f"{cls.url}/api/chatgpt/chatapi/stream", params={"token": token}, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
if line.startswith(b"data: [DONE]"):
break
message = json.loads(line[6:-1])["content"]
yield message[len(last_message):]
last_message = message

View file

@ -1,73 +0,0 @@
from __future__ import annotations
import requests, json
from ..base_provider import AbstractProvider
from ...typing import CreateResult, Messages
from json import dumps
class GeekGpt(AbstractProvider):
url = 'https://chat.geekgpt.org'
working = False
supports_message_history = True
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
json_data = {
'messages': messages,
'model': model,
'temperature': kwargs.get('temperature', 0.9),
'presence_penalty': kwargs.get('presence_penalty', 0),
'top_p': kwargs.get('top_p', 1),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'stream': True
}
data = dumps(json_data, separators=(',', ':'))
headers = {
'authority': 'ai.fakeopen.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer pk-this-is-a-real-free-pool-token-for-everyone',
'content-type': 'application/json',
'origin': 'https://chat.geekgpt.org',
'referer': 'https://chat.geekgpt.org/',
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
response = requests.post("https://ai.fakeopen.com/v1/chat/completions",
headers=headers, data=data, stream=True)
response.raise_for_status()
for chunk in response.iter_lines():
if b'content' in chunk:
json_data = chunk.decode().replace("data: ", "")
if json_data == "[DONE]":
break
try:
content = json.loads(json_data)["choices"][0]["delta"].get("content")
except Exception as e:
raise RuntimeError(f'error | {e} :', json_data)
if content:
yield content

View file

@ -1,77 +0,0 @@
from __future__ import annotations
import json
import os
import uuid
import requests
# try:
# from Crypto.Cipher import AES
# except ImportError:
# from Cryptodome.Cipher import AES
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class GetGpt(AbstractProvider):
url = 'https://chat.getgpt.world/'
supports_stream = True
working = False
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
'Content-Type' : 'application/json',
'Referer' : 'https://chat.getgpt.world/',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
}
data = json.dumps(
{
'messages' : messages,
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'max_tokens' : kwargs.get('max_tokens', 4000),
'model' : 'gpt-3.5-turbo',
'presence_penalty' : kwargs.get('presence_penalty', 0),
'temperature' : kwargs.get('temperature', 1),
'top_p' : kwargs.get('top_p', 1),
'stream' : True,
'uuid' : str(uuid.uuid4())
}
)
res = requests.post('https://chat.getgpt.world/api/chat/stream',
headers=headers, json={'signature': _encrypt(data)}, stream=True)
res.raise_for_status()
for line in res.iter_lines():
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
yield (line_json['choices'][0]['delta']['content'])
def _encrypt(e: str):
# t = os.urandom(8).hex().encode('utf-8')
# n = os.urandom(8).hex().encode('utf-8')
# r = e.encode('utf-8')
# cipher = AES.new(t, AES.MODE_CBC, n)
# ciphertext = cipher.encrypt(_pad_data(r))
# return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
return
def _pad_data(data: bytes) -> bytes:
# block_size = AES.block_size
# padding_size = block_size - len(data) % block_size
# padding = bytes([padding_size] * padding_size)
# return data + padding
return

View file

@ -1,89 +0,0 @@
from __future__ import annotations
import json
import uuid
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = model if model else cls.model
headers = {"Referer": f"{cls.url}/"}
async with ClientSession(
headers=headers
) as session:
data = {
"ethicsModalAccepted": "true",
"shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt": "",
"activeModel": model,
"searchEnabled": "true",
}
async with session.post(
f"{cls.url}/settings",
proxy=proxy,
data=data
) as response:
response.raise_for_status()
async with session.post(
f"{cls.url}/conversation",
proxy=proxy,
json={"model": model},
) as response:
response.raise_for_status()
conversationId = (await response.json())["conversationId"]
data = {
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.4,
"truncate": 2048,
"max_new_tokens": 1024,
"do_sample": True,
"repetition_penalty": 1.2,
"return_full_text": False,
**kwargs
},
"stream": True,
"options": {
"id": str(uuid.uuid4()),
"response_id": str(uuid.uuid4()),
"is_retry": False,
"use_cache": False,
"web_search_id": "",
},
}
async with session.post(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
start = "data:"
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):-1])
if not line["token"]["special"]:
yield line["token"]["text"]
async with session.delete(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
) as response:
response.raise_for_status()

View file

@ -1,80 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_random_hex
class SearchTypes():
quick = "quick"
code = "code"
websearch = "websearch"
class Hashnode(AsyncGeneratorProvider):
url = "https://hashnode.com"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
_sources = []
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
search_type: str = SearchTypes.websearch,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/rix",
"Content-Type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers",
}
async with ClientSession(headers=headers) as session:
prompt = messages[-1]["content"]
cls._sources = []
if search_type == "websearch":
async with session.post(
f"{cls.url}/api/ai/rix/search",
json={"prompt": prompt},
proxy=proxy,
) as response:
response.raise_for_status()
cls._sources = (await response.json())["result"]
data = {
"chatId": get_random_hex(),
"history": messages,
"prompt": prompt,
"searchType": search_type,
"urlToScan": None,
"searchResults": cls._sources,
}
async with session.post(
f"{cls.url}/api/ai/rix/completion",
json=data,
proxy=proxy,
) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()
@classmethod
def get_sources(cls) -> list:
return [{
"title": source["name"],
"url": source["url"]
} for source in cls._sources]

View file

@ -1,165 +0,0 @@
# not using WS anymore
from __future__ import annotations
import json, uuid, hashlib, time, random
from aiohttp import ClientSession
from aiohttp.http import WSMsgType
import asyncio
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
models = {
"samantha": "1e3be7fe89e94a809408b1154a2ee3e1",
"gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd",
"gpt-4": "01c8de4fbfc548df903712b0922a4e01",
}
class Myshell(AsyncGeneratorProvider):
url = "https://app.myshell.ai/chat"
working = False
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 90,
**kwargs
) -> AsyncResult:
if not model:
bot_id = models["samantha"]
elif model in models:
bot_id = models[model]
else:
raise ValueError(f"Model are not supported: {model}")
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
visitor_id = generate_visitor_id(user_agent)
async with ClientSession(
headers={'User-Agent': user_agent}
) as session:
async with session.ws_connect(
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
autoping=False,
timeout=timeout,
proxy=proxy
) as wss:
# Send and receive hello message
await wss.receive_str()
message = json.dumps({"token": None, "visitorId": visitor_id})
await wss.send_str(f"40/chat,{message}")
await wss.receive_str()
# Fix "need_verify_captcha" issue
await asyncio.sleep(5)
# Create chat message
text = format_prompt(messages)
chat_data = json.dumps(["text_chat",{
"reqId": str(uuid.uuid4()),
"botUid": bot_id,
"sourceFrom": "myshellWebsite",
"text": text,
**generate_signature(text)
}])
# Send chat message
chat_start = "42/chat,"
chat_message = f"{chat_start}{chat_data}"
await wss.send_str(chat_message)
# Receive messages
async for message in wss:
if message.type != WSMsgType.TEXT:
continue
# Ping back
if message.data == "2":
await wss.send_str("3")
continue
# Is not chat message
if not message.data.startswith(chat_start):
continue
data_type, data = json.loads(message.data[len(chat_start):])
if data_type == "text_stream":
if data["data"]["text"]:
yield data["data"]["text"]
elif data["data"]["isFinal"]:
break
elif data_type in ("message_replied", "need_verify_captcha"):
raise RuntimeError(f"Received unexpected message: {data_type}")
def generate_timestamp() -> str:
return str(
int(
str(int(time.time() * 1000))[:-1]
+ str(
sum(
2 * int(digit)
if idx % 2 == 0
else 3 * int(digit)
for idx, digit in enumerate(str(int(time.time() * 1000))[:-1])
)
% 10
)
)
)
def generate_signature(text: str):
timestamp = generate_timestamp()
version = 'v1.0.0'
secret = '8@VXGK3kKHr!u2gA'
data = f"{version}#{text}#{timestamp}#{secret}"
signature = hashlib.md5(data.encode()).hexdigest()
signature = signature[::-1]
return {
"signature": signature,
"timestamp": timestamp,
"version": version
}
def xor_hash(B: str):
r = []
i = 0
def o(e, t):
o_val = 0
for i in range(len(t)):
o_val |= r[i] << (8 * i)
return e ^ o_val
for e in range(len(B)):
t = ord(B[e])
r.insert(0, 255 & t)
if len(r) >= 4:
i = o(i, r)
r = []
if len(r) > 0:
i = o(i, r)
return hex(i)[2:]
def performance() -> str:
t = int(time.time() * 1000)
e = 0
while t == int(time.time() * 1000):
e += 1
return hex(t)[2:] + hex(e)[2:]
def generate_visitor_id(user_agent: str) -> str:
f = performance()
r = hex(int(random.random() * (16**16)))[2:-2]
d = xor_hash(user_agent)
e = hex(1080 * 1920)[2:]
return f"{f}-{r}-{d}-{e}-{f}"

View file

@ -1,66 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string
class NoowAi(AsyncGeneratorProvider):
url = "https://noowai.com"
supports_message_history = True
supports_gpt_35_turbo = True
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "noowai.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": "d49bc3670c3d858458576d75c8ea0f5d",
"session": "N/A",
"chatId": get_random_string(),
"contextId": 25,
"messages": messages,
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break
elif line["type"] == "error":
raise RuntimeError(line["data"])

View file

@ -1,59 +0,0 @@
from __future__ import annotations
import random, string, json
from aiohttp import ClientSession
from ...typing import Messages, AsyncResult
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_random_string
class Opchatgpts(AsyncGeneratorProvider):
url = "https://opchatgpts.net"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None, **kwargs) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-Language" : "de,en-US;q=0.7,en;q=0.3",
"Origin" : cls.url,
"Alt-Used" : "opchatgpts.net",
"Referer" : f"{cls.url}/chatgpt-free-use/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
data = {
"botId": "default",
"chatId": get_random_string(),
"contextId": 28,
"customId": None,
"messages": messages,
"newMessage": messages[-1]["content"],
"session": "N/A",
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break

View file

@ -1,88 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
working = False
model = "OA_SFT_Llama_30B_6"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
cookies: dict = None,
**kwargs
) -> AsyncResult:
if not cookies:
cookies = get_cookies("open-assistant.io")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
chat_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
"parent_id": None
}
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
parent_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"parent_id": parent_id,
"model_config_name": model if model else cls.model,
"sampling_parameters":{
"top_k": 50,
"top_p": None,
"typical_p": None,
"temperature": 0.35,
"repetition_penalty": 1.1111111111111112,
"max_new_tokens": 1024,
**kwargs
},
"plugins":[]
}
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
data = await response.json()
if "id" in data:
message_id = data["id"]
elif "message" in data:
raise RuntimeError(data["message"])
else:
response.raise_for_status()
params = {
'chat_id': chat_id,
'message_id': message_id,
}
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
start = "data: "
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):])
if line["event_type"] == "token":
yield line["text"]
params = {
'chat_id': chat_id,
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()

View file

@ -1,61 +0,0 @@
from __future__ import annotations
import uuid
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class V50(AbstractProvider):
url = 'https://p5.v50.ltd'
supports_gpt_35_turbo = True
supports_stream = False
needs_auth = False
working = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
conversation = (
"\n".join(
f"{message['role']}: {message['content']}" for message in messages
)
+ "\nassistant: "
)
payload = {
"prompt" : conversation,
"options" : {},
"systemMessage" : ".",
"temperature" : kwargs.get("temperature", 0.4),
"top_p" : kwargs.get("top_p", 0.4),
"model" : model,
"user" : str(uuid.uuid4())
}
headers = {
'authority' : 'p5.v50.ltd',
'accept' : 'application/json, text/plain, */*',
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type' : 'application/json',
'origin' : 'https://p5.v50.ltd',
'referer' : 'https://p5.v50.ltd/',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
}
response = requests.post(
"https://p5.v50.ltd/api/chat-process",
json=payload,
headers=headers,
proxies=kwargs.get('proxy', {}),
)
if "https://fk1.v50.ltd" not in response.text:
yield response.text

View file

@ -1,55 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..base_provider import AsyncGeneratorProvider
from ...typing import AsyncResult, Messages
class Vitalentum(AsyncGeneratorProvider):
url = "https://app.vitalentum.io"
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "text/event-stream",
"Accept-language": "de,en-US;q=0.7,en;q=0.3",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
conversation = json.dumps({"history": [{
"speaker": "human" if message["role"] == "user" else "bot",
"text": message["content"],
} for message in messages]})
data = {
"conversation": conversation,
"temperature": 0.7,
**kwargs
}
async with ClientSession(
headers=headers
) as session:
async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content

View file

@ -1,91 +0,0 @@
from __future__ import annotations
import json
import requests
from ..base_provider import AbstractProvider
from ...typing import Messages, CreateResult
class VoiGpt(AbstractProvider):
"""
VoiGpt - A provider for VoiGpt.com
**Note** : to use this provider you have to get your csrf token/cookie from the voigpt.com website
Args:
model: The model to use
messages: The messages to send
stream: Whether to stream the response
proxy: The proxy to use
access_token: The access token to use
**kwargs: Additional keyword arguments
Returns:
A CreateResult object
"""
url = "https://voigpt.com"
working = False
supports_gpt_35_turbo = True
supports_message_history = True
supports_stream = False
_access_token: str = None
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
access_token: str = None,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
if not access_token:
access_token = cls._access_token
if not access_token:
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
"sec-ch-ua": "\"Google Chrome\";v=\"119\", \"Chromium\";v=\"119\", \"Not?A_Brand\";v=\"24\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
}
req_response = requests.get(cls.url, headers=headers)
access_token = cls._access_token = req_response.cookies.get("csrftoken")
headers = {
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6",
"Cookie": f"csrftoken={access_token};",
"Origin": "https://voigpt.com",
"Referer": "https://voigpt.com/",
"Sec-Ch-Ua": "'Google Chrome';v='119', 'Chromium';v='119', 'Not?A_Brand';v='24'",
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": "'Windows'",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"X-Csrftoken": access_token,
}
payload = {
"messages": messages,
}
request_url = f"{cls.url}/generate_response/"
req_response = requests.post(request_url, headers=headers, json=payload)
try:
response = json.loads(req_response.text)
yield response["response"]
except:
raise RuntimeError(f"Response: {req_response.text}")

View file

@ -1,65 +0,0 @@
from __future__ import annotations
import random, string, time
from aiohttp import ClientSession
from ..base_provider import AsyncProvider
class Wewordle(AsyncProvider):
url = "https://wewordle.org"
working = False
supports_gpt_35_turbo = True
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> str:
headers = {
"accept" : "*/*",
"pragma" : "no-cache",
"Content-Type" : "application/json",
"Connection" : "keep-alive"
}
_user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
_app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
data = {
"user" : _user_id,
"messages" : messages,
"subscriber": {
"originalPurchaseDate" : None,
"originalApplicationVersion" : None,
"allPurchaseDatesMillis" : {},
"entitlements" : {"active": {}, "all": {}},
"allPurchaseDates" : {},
"allExpirationDatesMillis" : {},
"allExpirationDates" : {},
"originalAppUserId" : f"$RCAnonymousID:{_app_id}",
"latestExpirationDate" : None,
"requestDate" : _request_date,
"latestExpirationDateMillis" : None,
"nonSubscriptionTransactions" : [],
"originalPurchaseDateMillis" : None,
"managementURL" : None,
"allPurchasedProductIdentifiers": [],
"firstSeen" : _request_date,
"activeSubscriptions" : [],
}
}
async with ClientSession(
headers=headers
) as session:
async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
response.raise_for_status()
content = (await response.json())["message"]["content"]
if content:
return content

View file

@ -1,57 +0,0 @@
from __future__ import annotations
import random
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider, format_prompt
class Wuguokai(AbstractProvider):
url = 'https://chat.wuguokai.xyz'
supports_gpt_35_turbo = True
working = False
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any,
) -> CreateResult:
headers = {
'authority': 'ai-api.wuguokai.xyz',
'accept': 'application/json, text/plain, */*',
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'content-type': 'application/json',
'origin': 'https://chat.wuguokai.xyz',
'referer': 'https://chat.wuguokai.xyz/',
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
}
data ={
"prompt": format_prompt(messages),
"options": {},
"userId": f"#/chat/{random.randint(1,99999999)}",
"usingContext": True
}
response = requests.post(
"https://ai-api20.wuguokai.xyz/api/chat-process",
headers=headers,
timeout=3,
json=data,
proxies=kwargs.get('proxy', {}),
)
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
if response.status_code != 200:
raise Exception(f"Error: {response.status_code} {response.reason}")
if len(_split) > 1:
yield _split[1].strip()
else:
yield _split[0].strip()

View file

@ -1,58 +0,0 @@
from __future__ import annotations
import json
from ...requests import StreamSession
from ..base_provider import AsyncGeneratorProvider
from ...typing import AsyncResult, Messages
class Ylokh(AsyncGeneratorProvider):
url = "https://chat.ylokh.xyz"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
model = model if model else "gpt-3.5-turbo"
headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
data = {
"messages": messages,
"model": model,
"temperature": 1,
"presence_penalty": 0,
"top_p": 1,
"frequency_penalty": 0,
"allow_fallback": True,
"stream": stream,
**kwargs
}
async with StreamSession(
headers=headers,
proxies={"https": proxy},
timeout=timeout
) as session:
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
response.raise_for_status()
if stream:
async for line in response.iter_lines():
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
else:
chat = await response.json()
yield chat["choices"][0]["message"].get("content")

View file

@ -1,61 +0,0 @@
from __future__ import annotations
import random
from ...requests import StreamSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
class Yqcloud(AsyncGeneratorProvider):
url = "https://chat9.yqcloud.top/"
working = False
supports_gpt_35_turbo = True
@staticmethod
async def create_async_generator(
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
**kwargs,
) -> AsyncResult:
async with StreamSession(
headers=_create_header(), proxies={"https": proxy}, timeout=timeout
) as session:
payload = _create_payload(messages, **kwargs)
async with session.post("https://api.aichatos.cloud/api/generateStream", json=payload) as response:
response.raise_for_status()
async for chunk in response.iter_content():
if chunk:
chunk = chunk.decode()
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
raise RuntimeError("IP address is blocked by abuse detection.")
yield chunk
def _create_header():
return {
"accept" : "application/json, text/plain, */*",
"content-type" : "application/json",
"origin" : "https://chat9.yqcloud.top",
"referer" : "https://chat9.yqcloud.top/"
}
def _create_payload(
messages: Messages,
system_message: str = "",
user_id: int = None,
**kwargs
):
if not user_id:
user_id = random.randint(1690000544336, 2093025544336)
return {
"prompt": format_prompt(messages),
"network": True,
"system": system_message,
"withoutContext": False,
"stream": True,
"userId": f"#/chat/{user_id}"
}

View file

@ -1,34 +0,0 @@
from .AiService import AiService
from .CodeLinkAva import CodeLinkAva
from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .Lockchat import Lockchat
from .Wewordle import Wewordle
from .Equing import Equing
from .Wuguokai import Wuguokai
from .V50 import V50
from .FastGpt import FastGpt
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
from .H2o import H2o
from .Myshell import Myshell
from .Acytoo import Acytoo
from .Aibn import Aibn
from .Ails import Ails
from .ChatgptDuo import ChatgptDuo
from .Cromicle import Cromicle
from .Opchatgpts import Opchatgpts
from .Yqcloud import Yqcloud
from .Aichat import Aichat
from .Berlin import Berlin
from .Phind import Phind
from .AiAsk import AiAsk
from .ChatAnywhere import ChatAnywhere
from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
from .Ylokh import Ylokh
from .OpenAssistant import OpenAssistant

View file

@ -15,7 +15,7 @@ from ..helper import get_last_user_message
from ..openai.har_file import get_headers
class HarProvider(AsyncGeneratorProvider, ProviderModelMixin):
label = "LM Arena"
label = "LM Arena (Har)"
url = "https://lmarena.ai"
api_endpoint = "/queue/join?"
working = True

View file

@ -1,74 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, FormData
from ...typing import AsyncResult, Messages, MediaListType
from ...requests import raise_for_status
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_random_string
from ...image import to_bytes, is_accepted_format
class Qwen_QVQ_72B(AsyncGeneratorProvider, ProviderModelMixin):
label = "Qwen QVQ-72B"
url = "https://qwen-qvq-72b-preview.hf.space"
api_endpoint = "/gradio_api/call/generate"
working = True
default_model = "qwen-qvq-72b-preview"
default_vision_model = default_model
model_aliases = {"qvq-72b": default_vision_model}
vision_models = list(model_aliases.keys())
models = vision_models
@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
media: MediaListType = None,
api_key: str = None,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Accept": "application/json",
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
async with ClientSession(headers=headers) as session:
if media:
data = FormData()
data_bytes = to_bytes(media[0][0])
data.add_field("files", data_bytes, content_type=is_accepted_format(data_bytes), filename=media[0][1])
url = f"{cls.url}/gradio_api/upload?upload_id={get_random_string()}"
async with session.post(url, data=data, proxy=proxy) as response:
await raise_for_status(response)
image = await response.json()
data = {"data": [{"path": image[0]}, format_prompt(messages)]}
else:
data = {"data": [None, format_prompt(messages)]}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
await raise_for_status(response)
event_id = (await response.json()).get("event_id")
async with session.get(f"{cls.url}{cls.api_endpoint}/{event_id}") as event_response:
await raise_for_status(event_response)
event = None
text_position = 0
async for chunk in event_response.content:
if chunk.startswith(b"event: "):
event = chunk[7:].decode(errors="replace").strip()
if chunk.startswith(b"data: "):
if event == "error":
raise ResponseError(f"GPU token limit exceeded: {chunk.decode(errors='replace')}")
if event in ("complete", "generating"):
try:
data = json.loads(chunk[6:])
except (json.JSONDecodeError, KeyError, TypeError) as e:
raise RuntimeError(f"Failed to read response: {chunk.decode(errors='replace')}", e)
if event == "generating":
if isinstance(data[0], str):
yield data[0][text_position:]
text_position = len(data[0])
else:
break

View file

@ -21,7 +21,7 @@ class Qwen_Qwen_2_5_Max(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = False
default_model = "qwen-qwen2-5-max"
model_aliases = {"qwen-2-5-max": default_model}
model_aliases = {"qwen-2.5-max": default_model}
models = list(model_aliases.keys())
@classmethod

View file

@ -19,7 +19,7 @@ class StabilityAI_SD35Large(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'stabilityai-stable-diffusion-3-5-large'
default_image_model = default_model
model_aliases = {"stable-diffusion-3.5-large": default_model}
model_aliases = {"sd-3.5-large": default_model}
image_models = list(model_aliases.keys())
models = image_models

View file

@ -1,78 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse
from ...errors import ResponseError
from ...requests.raise_for_status import raise_for_status
from ..helper import format_image_prompt
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Voodoohop_Flux1Schnell(AsyncGeneratorProvider, ProviderModelMixin):
label = "Voodoohop Flux-1-Schnell"
url = "https://voodoohop-flux-1-schnell.hf.space"
api_endpoint = "https://voodoohop-flux-1-schnell.hf.space/call/infer"
working = True
default_model = "voodoohop-flux-1-schnell"
default_image_model = default_model
model_aliases = {
"flux-schnell": default_image_model,
"flux": default_image_model
}
image_models = list(model_aliases.keys())
models = image_models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
prompt: str = None,
width: int = 768,
height: int = 768,
num_inference_steps: int = 2,
seed: int = 0,
randomize_seed: bool = True,
**kwargs
) -> AsyncResult:
width = max(32, width - (width % 8))
height = max(32, height - (height % 8))
prompt = format_image_prompt(messages, prompt)
payload = {
"data": [
prompt,
seed,
randomize_seed,
width,
height,
num_inference_steps
]
}
async with ClientSession() as session:
async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
await raise_for_status(response)
response_data = await response.json()
event_id = response_data['event_id']
while True:
async with session.get(f"{cls.api_endpoint}/{event_id}", proxy=proxy) as status_response:
await raise_for_status(status_response)
while not status_response.content.at_eof():
event = await status_response.content.readuntil(b'\n\n')
if event.startswith(b'event:'):
event_parts = event.split(b'\ndata: ')
if len(event_parts) < 2:
continue
event_type = event_parts[0].split(b': ')[1]
data = event_parts[1]
if event_type == b'error':
raise ResponseError(f"Error generating image: {data}")
elif event_type == b'complete':
json_data = json.loads(data)
image_url = json_data[0]['url']
yield ImageResponse(urls=[image_url], alt=prompt)
return

View file

@ -10,14 +10,12 @@ from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
from .Microsoft_Phi_4_Multimodal import Microsoft_Phi_4_Multimodal
from .Qwen_QVQ_72B import Qwen_QVQ_72B
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
from .Qwen_Qwen_2_5M import Qwen_Qwen_2_5M
from .Qwen_Qwen_2_5_Max import Qwen_Qwen_2_5_Max
from .Qwen_Qwen_2_72B import Qwen_Qwen_2_72B
from .Qwen_Qwen_3 import Qwen_Qwen_3
from .StabilityAI_SD35Large import StabilityAI_SD35Large
from .Voodoohop_Flux1Schnell import Voodoohop_Flux1Schnell
class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/spaces"
@ -26,20 +24,18 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_model = Qwen_Qwen_2_72B.default_model
default_image_model = BlackForestLabs_Flux1Dev.default_model
default_vision_model = Qwen_QVQ_72B.default_model
default_vision_model = Microsoft_Phi_4_Multimodal.default_model
providers = [
BlackForestLabs_Flux1Dev,
CohereForAI_C4AI_Command,
DeepseekAI_JanusPro7b,
Microsoft_Phi_4_Multimodal,
Qwen_QVQ_72B,
Qwen_Qwen_2_5,
Qwen_Qwen_2_5M,
Qwen_Qwen_2_5_Max,
Qwen_Qwen_2_72B,
Qwen_Qwen_3,
StabilityAI_SD35Large,
Voodoohop_Flux1Schnell,
]
@classmethod

View file

@ -5,7 +5,7 @@ from ...providers.response import ImageResponse
from ...errors import MissingAuthError
from ...typing import AsyncResult, Messages, Cookies
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..bing.create_images import create_images, create_session
from .bing.create_images import create_images, create_session
from ..helper import format_image_prompt
class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):

View file

@ -13,8 +13,8 @@ try:
except ImportError:
has_requirements = False
from ..helper import get_connector
from ...errors import MissingRequirementsError, RateLimitError
from ...helper import get_connector
from ....errors import MissingRequirementsError, RateLimitError
BING_URL = "https://www.bing.com"
TIMEOUT_LOGIN = 1200

View file

@ -1,73 +0,0 @@
from __future__ import annotations
import time
from hashlib import sha256
from aiohttp import BaseConnector, ClientSession
from ...errors import RateLimitError
from ...requests import raise_for_status
from ...requests.aiohttp import get_connector
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info"
working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-1.5-pro'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs,
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages": [
{
"role": "model" if message["role"] == "assistant" else "user",
"parts": [{"text": message["content"]}],
}
for message in messages
],
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(
f"{cls.url}/api/generate", json=data, proxy=proxy
) as response:
if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(
f"Response {response.status}: Rate limit reached"
)
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")
def generate_signature(time: int, text: str, secret: str = ""):
message = f"{time}:{text}:{secret}"
return sha256(message.encode()).hexdigest()

View file

@ -1,275 +0,0 @@
import json
import random
import re
import requests
from aiohttp import ClientSession
from typing import List
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse, FinishReason, Usage
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ... import debug
def split_message(message: str, max_length: int = 1000) -> List[str]:
"""Splits the message into parts up to (max_length)."""
chunks = []
while len(message) > max_length:
split_point = message.rfind(' ', 0, max_length)
if split_point == -1:
split_point = max_length
chunks.append(message[:split_point])
message = message[split_point:].strip()
if message:
chunks.append(message)
return chunks
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
api_endpoint_completions = "https://api.airforce/chat/completions"
api_endpoint_imagine2 = "https://api.airforce/imagine2"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "llama-3.1-70b-chat"
default_image_model = "flux"
models = []
image_models = []
hidden_models = {"Flux-1.1-Pro"}
additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"]
model_aliases = {
# Alias mappings for models
"openchat-3.5": "openchat-3.5-0106",
"deepseek-coder": "deepseek-coder-6.7b-instruct",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"hermes-2-pro": "hermes-2-pro-mistral-7b",
"openhermes-2.5": "openhermes-2.5-mistral-7b",
"lfm-40b": "lfm-40b-moe",
"german-7b": "discolm-german-7b-v1",
"llama-2-7b": "llama-2-7b-chat-int8",
"llama-3.1-70b": "llama-3.1-70b-chat",
"llama-3.1-8b": "llama-3.1-8b-chat",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
"neural-7b": "neural-chat-7b-v3-1",
"zephyr-7b": "zephyr-7b-beta",
"evil": "any-uncensored",
"sdxl": "stable-diffusion-xl-lightning",
"sdxl": "stable-diffusion-xl-base",
"flux-pro": "flux-1.1-pro",
"llama-3.1-8b": "llama-3.1-8b-chat"
}
@classmethod
def get_models(cls):
"""Get available models with error handling"""
if not cls.image_models:
try:
response = requests.get(
f"{cls.url}/imagine2/models",
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
}
)
response.raise_for_status()
cls.image_models = response.json()
if isinstance(cls.image_models, list):
cls.image_models.extend(cls.additional_models_imagine)
else:
cls.image_models = cls.additional_models_imagine.copy()
except Exception as e:
debug.log(f"Error fetching image models: {e}")
cls.image_models = cls.additional_models_imagine.copy()
if not cls.models:
try:
response = requests.get(
f"{cls.url}/models",
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
}
)
response.raise_for_status()
data = response.json()
if isinstance(data, dict) and 'data' in data:
cls.models = [model['id'] for model in data['data']]
cls.models.extend(cls.image_models)
cls.models = [model for model in cls.models if model not in cls.hidden_models]
else:
cls.models = list(cls.model_aliases.keys())
except Exception as e:
debug.log(f"Error fetching text models: {e}")
cls.models = list(cls.model_aliases.keys())
return cls.models or list(cls.model_aliases.keys())
@classmethod
def get_model(cls, model: str) -> str:
"""Get the actual model name from alias"""
return cls.model_aliases.get(model, model or cls.default_model)
@classmethod
def _filter_content(cls, part_response: str) -> str:
"""
Filters out unwanted content from the partial response.
"""
part_response = re.sub(
r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
'',
part_response
)
part_response = re.sub(
r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
'',
part_response
)
return part_response
@classmethod
def _filter_response(cls, response: str) -> str:
"""
Filters the full response to remove system errors and other unwanted text.
"""
if "Model not found or too long input. Or any other error (xD)" in response:
raise ValueError(response)
filtered_response = re.sub(r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", '', response) # any-uncensored
filtered_response = re.sub(r'<\|im_end\|>', '', filtered_response) # remove <|im_end|> token
filtered_response = re.sub(r'</s>', '', filtered_response) # neural-chat-7b-v3-1
filtered_response = re.sub(r'^(Assistant: |AI: |ANSWER: |Output: )', '', filtered_response) # phi-2
filtered_response = cls._filter_content(filtered_response)
return filtered_response
@classmethod
async def generate_image(
cls,
model: str,
prompt: str,
size: str,
seed: int,
proxy: str = None
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
}
params = {"model": model, "prompt": prompt, "size": size, "seed": seed}
async with ClientSession(headers=headers) as session:
async with session.get(cls.api_endpoint_imagine2, params=params, proxy=proxy) as response:
if response.status == 200:
image_url = str(response.url)
yield ImageResponse(urls=image_url, alt=prompt)
else:
error_text = await response.text()
raise RuntimeError(f"Image generation failed: {response.status} - {error_text}")
@classmethod
async def generate_text(
cls,
model: str,
messages: Messages,
max_tokens: int,
temperature: float,
top_p: float,
stream: bool,
proxy: str = None
) -> AsyncResult:
"""
Generates text, buffers the response, filters it, and returns the final result.
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
"Accept": "application/json, text/event-stream",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
}
final_messages = []
for message in messages:
message_chunks = split_message(message["content"], max_length=1000)
final_messages.extend([{"role": message["role"], "content": chunk} for chunk in message_chunks])
data = {
"messages": final_messages,
"model": model,
"temperature": temperature,
"top_p": top_p,
"stream": stream,
}
if max_tokens != 512:
data["max_tokens"] = max_tokens
async with ClientSession(headers=headers) as session:
async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response:
await raise_for_status(response)
if stream:
idx = 0
async for line in response.content:
line = line.decode('utf-8').strip()
if line.startswith('data: '):
try:
json_str = line[6:] # Remove 'data: ' prefix
chunk = json.loads(json_str)
if 'choices' in chunk and chunk['choices']:
delta = chunk['choices'][0].get('delta', {})
if 'content' in delta:
chunk = cls._filter_response(delta['content'])
if chunk:
yield chunk
idx += 1
except json.JSONDecodeError:
continue
if idx == 512:
yield FinishReason("length")
else:
# Non-streaming response
result = await response.json()
if "usage" in result:
yield Usage(**result["usage"])
if result["usage"]["completion_tokens"] == 512:
yield FinishReason("length")
if 'choices' in result and result['choices']:
message = result['choices'][0].get('message', {})
content = message.get('content', '')
filtered_response = cls._filter_response(content)
yield filtered_response
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
proxy: str = None,
max_tokens: int = 512,
temperature: float = 1,
top_p: float = 1,
stream: bool = True,
size: str = "1:1",
seed: int = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
if model in cls.image_models:
if prompt is None:
prompt = messages[-1]['content']
if seed is None:
seed = random.randint(0, 10000)
async for result in cls.generate_image(model, prompt, size, seed, proxy):
yield result
else:
async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, proxy):
yield result

View file

@ -1,88 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import base64
import json
from ...typing import AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ...providers.response import FinishReason
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.autonomous.ai/anon/"
api_endpoints = {
"llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
"qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
"hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes",
"vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision",
"summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary"
}
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "llama"
models = [default_model, "qwen_coder", "hermes", "vision", "summary"]
model_aliases = {
"llama-3.3-70b": default_model,
"qwen-2.5-coder-32b": "qwen_coder",
"hermes-3": "hermes",
"llama-3.2-90b": "vision",
"llama-3.2-70b": "summary",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
**kwargs
) -> AsyncResult:
api_endpoint = cls.api_endpoints[model]
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'country-code': 'US',
'origin': 'https://www.autonomous.ai',
'referer': 'https://www.autonomous.ai/',
'time-zone': 'America/New_York',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
async with ClientSession(headers=headers) as session:
message_json = json.dumps(messages)
encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore")
data = {
"messages": encoded_message,
"threadId": model,
"stream": stream,
"aiAgent": model
}
async with session.post(api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk:
chunk_str = chunk.decode()
if chunk_str == "data: [DONE]":
continue
try:
# Remove "data: " prefix and parse JSON
chunk_data = json.loads(chunk_str.replace("data: ", ""))
if "choices" in chunk_data and chunk_data["choices"]:
delta = chunk_data["choices"][0].get("delta", {})
if "content" in delta and delta["content"]:
yield delta["content"]
if "finish_reason" in chunk_data and chunk_data["finish_reason"]:
yield FinishReason(chunk_data["finish_reason"])
except json.JSONDecodeError:
continue

View file

@ -1,77 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, ClientTimeout, StreamReader
from ...typing import AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://darkai.foundation/chat"
api_endpoint = "https://darkai.foundation/chat"
working = False
supports_stream = True
default_model = 'llama-3-70b'
models = [
'gpt-4o',
'gpt-3.5-turbo',
default_model,
]
model_aliases = {
"llama-3.1-70b": "llama-3-70b",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "text/event-stream",
"content-type": "application/json",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
}
timeout = ClientTimeout(total=600) # Increase timeout to 10 minutes
async with ClientSession(headers=headers, timeout=timeout) as session:
prompt = format_prompt(messages)
data = {
"query": prompt,
"model": model,
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
reader: StreamReader = response.content
buffer = b""
while True:
chunk = await reader.read(1024) # Read in smaller chunks
if not chunk:
break
buffer += chunk
while b"\n" in buffer:
line, buffer = buffer.split(b"\n", 1)
line = line.strip()
if line:
try:
line_str = line.decode()
if line_str.startswith('data: '):
chunk_data = json.loads(line_str[6:])
if chunk_data['event'] == 'text-chunk':
chunk = chunk_data['data']['text']
yield chunk
elif chunk_data['event'] == 'stream-end':
return
except json.JSONDecodeError:
pass
except Exception:
pass

View file

@ -1,75 +0,0 @@
from __future__ import annotations
import time, json
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider
from ..helper import format_prompt
class MyShell(AbstractProvider):
url = "https://app.myshell.ai/chat"
working = False
supports_gpt_35_turbo = True
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
webdriver = None,
**kwargs
) -> CreateResult:
with WebDriverSession(webdriver, "", proxy=proxy) as driver:
bypass_cloudflare(driver, cls.url, timeout)
# Send request with message
data = {
"botId": "4738",
"conversation_scenario": 3,
"message": format_prompt(messages),
"messageType": 1
}
script = """
response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
"headers": {
"accept": "application/json",
"content-type": "application/json",
"myshell-service-name": "organics-api",
"visitor-id": localStorage.getItem("mix_visitorId")
},
"body": '{body}',
"method": "POST"
})
window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
"""
driver.execute_script(script.replace("{body}", json.dumps(data)))
script = """
chunk = await window._reader.read();
if (chunk.done) {
return null;
}
content = '';
chunk.value.split('\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.substring('data: '.length));
if ('content' in data) {
content += data['content'];
}
} catch(e) {}
}
});
return content;
"""
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)

View file

@ -1,22 +1,21 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
import time
import re
import random
import json # Ensure json is imported if not already
import uuid # Import the uuid module
import json
import uuid
import asyncio
from typing import Optional, Dict, Any, List, Union # Assuming these are used elsewhere or can be pruned if not
from typing import Optional, Dict, Any
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages, MediaListType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..providers.response import FinishReason, JsonConversation
from ..tools.media import merge_media
from ..image import to_data_uri, is_data_uri_an_image
from ..errors import RateLimitError, ModelNotFoundError
from .. import debug
from ...typing import AsyncResult, Messages, MediaListType
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...providers.response import FinishReason, JsonConversation
from ...image import to_data_uri
from ...errors import RateLimitError, ModelNotFoundError
from ... import debug
class AuthData:
"""
@ -95,7 +94,7 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://docs.puter.com/playground"
api_endpoint = "https://api.puter.com/drivers/call"
working = True
working = False
needs_auth = False
supports_stream = True
supports_system_message = True
@ -769,7 +768,7 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
stream: bool = True,
conversation: Optional[JsonConversation] = None,
return_conversation: bool = False,
return_conversation: bool = True,
media: MediaListType = None, # Add media parameter for images
**kwargs
) -> AsyncResult:
@ -847,8 +846,7 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
if auth_data.is_rate_limited():
wait_time = auth_data.rate_limited_until - time.time()
if wait_time > 0:
yield f"Rate limited. Please try again in {int(wait_time)} seconds."
return
raise RateLimitError(f"Rate limited. Please try again in {int(wait_time)} seconds.")
async with ClientSession() as session:
# Step 1: Create a temporary account (if needed)
@ -859,16 +857,14 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
auth_data.auth_token = signup_data.get("token")
if not auth_data.auth_token:
yield f"Error: No auth token in response for model {model}"
return
raise RuntimeError(f"Error: No auth token in response for model {model}")
# Get app token
app_token_data = await cls._get_app_token(session, auth_data.auth_token, proxy)
auth_data.app_token = app_token_data.get("token")
if not auth_data.app_token:
yield f"Error: No app token in response for model {model}"
return
raise RuntimeError(f"Error: No app token in response for model {model}")
# Mark tokens as valid
auth_data.created_at = time.time()
@ -880,11 +876,9 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
except RateLimitError as e:
# Set rate limit and inform user
auth_data.set_rate_limit(cls.RATE_LIMIT_DELAY)
yield str(e)
return
raise e
except Exception as e:
yield f"Error during authentication for model {model}: {str(e)}"
return
raise RuntimeError(f"Error during authentication for model {model}: {str(e)}")
# Step 3: Make the chat request with proper image handling
try:
@ -1218,8 +1212,7 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
await asyncio.sleep(min(cls.RATE_LIMIT_DELAY, 10)) # Wait but cap at 10 seconds for retries
continue
else:
yield str(e)
return
raise RuntimeError(str(e))
except Exception as e:
# For network errors or other exceptions
@ -1230,13 +1223,10 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
await asyncio.sleep(cls.RETRY_DELAY * (attempt + 1))
continue
else:
yield f"Error: {str(e)}"
return
raise RuntimeError(str(e))
except Exception as e:
# If any error occurs outside the retry loop
if "token" in str(e).lower() or "auth" in str(e).lower():
auth_data.invalidate()
yield f"Error: {str(e)}"
return
raise RuntimeError(str(e))

View file

@ -1,123 +0,0 @@
from __future__ import annotations
import json
import asyncio
from aiohttp import ClientSession, ContentTypeError
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...requests.aiohttp import get_connector
from ...requests.raise_for_status import raise_for_status
from ..helper import format_prompt
from ...providers.response import ImageResponse
class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://replicate.com"
api_endpoint = "https://homepage.replicate.com/api/prediction"
working = False
supports_stream = True
default_model = 'google-deepmind/gemma-2b-it'
default_image_model = 'stability-ai/stable-diffusion-3'
image_models = [
'stability-ai/stable-diffusion-3',
'bytedance/sdxl-lightning-4step',
'playgroundai/playground-v2.5-1024px-aesthetic',
]
text_models = [
'google-deepmind/gemma-2b-it',
]
models = text_models + image_models
model_aliases = {
# image_models
"sd-3": "stability-ai/stable-diffusion-3",
"sdxl": "bytedance/sdxl-lightning-4step",
"playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
# text_models
"gemma-2b": "google-deepmind/gemma-2b-it",
}
model_versions = {
# image_models
'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f",
'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f",
'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
# text_models
"google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": "https://replicate.com",
"referer": "https://replicate.com/",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers, connector=get_connector(proxy=proxy)) as session:
if prompt is None:
if model in cls.image_models:
prompt = messages[-1]['content']
else:
prompt = format_prompt(messages)
data = {
"model": model,
"version": cls.model_versions[model],
"input": {"prompt": prompt},
}
async with session.post(cls.api_endpoint, json=data) as response:
await raise_for_status(response)
result = await response.json()
prediction_id = result['id']
poll_url = f"https://homepage.replicate.com/api/poll?id={prediction_id}"
max_attempts = 30
delay = 5
for _ in range(max_attempts):
async with session.get(poll_url) as response:
await raise_for_status(response)
try:
result = await response.json()
except ContentTypeError:
text = await response.text()
try:
result = json.loads(text)
except json.JSONDecodeError:
raise ValueError(f"Unexpected response format: {text}")
if result['status'] == 'succeeded':
if model in cls.image_models:
image_url = result['output'][0]
yield ImageResponse(image_url, prompt)
return
else:
for chunk in result['output']:
yield chunk
break
elif result['status'] == 'failed':
raise Exception(f"Prediction failed: {result.get('error')}")
await asyncio.sleep(delay)
if result['status'] != 'succeeded':
raise Exception("Prediction timed out")

View file

@ -1,238 +0,0 @@
from __future__ import annotations
import json
import aiohttp
from pathlib import Path
try:
from bs4 import BeautifulSoup
HAS_BEAUTIFULSOUP = True
except ImportError:
HAS_BEAUTIFULSOUP = False
BeautifulSoup = None
from aiohttp import ClientTimeout
from ...errors import MissingRequirementsError
from ...typing import AsyncResult, Messages
from ...cookies import get_cookies_dir
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
from ... import debug
class RobocodersAPI(AsyncGeneratorProvider, ProviderModelMixin):
label = "API Robocoders AI"
url = "https://api.robocoders.ai/docs"
api_endpoint = "https://api.robocoders.ai/chat"
working = False
supports_message_history = True
default_model = 'GeneralCodingAgent'
agent = [default_model, "RepoAgent", "FrontEndAgent"]
models = [*agent]
CACHE_DIR = Path(get_cookies_dir())
CACHE_FILE = CACHE_DIR / "robocoders.json"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
timeout = ClientTimeout(total=600)
async with aiohttp.ClientSession(timeout=timeout) as session:
# Load or create access token and session ID
access_token, session_id = await cls._get_or_create_access_and_session(session)
if not access_token or not session_id:
raise Exception("Failed to initialize API interaction")
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {access_token}"
}
prompt = format_prompt(messages)
data = {
"sid": session_id,
"prompt": prompt,
"agent": model
}
async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
if response.status == 401: # Unauthorized, refresh token
cls._clear_cached_data()
raise Exception("Unauthorized: Invalid token, please retry.")
elif response.status == 422:
raise Exception("Validation Error: Invalid input.")
elif response.status >= 500:
raise Exception(f"Server Error: {response.status}")
elif response.status != 200:
raise Exception(f"Unexpected Error: {response.status}")
async for line in response.content:
if line:
try:
# Decode bytes into a string
line_str = line.decode('utf-8')
response_data = json.loads(line_str)
# Get the message from the 'args.content' or 'message' field
message = (response_data.get('args', {}).get('content') or
response_data.get('message', ''))
if message:
yield message
# Check for reaching the resource limit
if (response_data.get('action') == 'message' and
response_data.get('args', {}).get('wait_for_response')):
# Automatically continue the dialog
continue_data = {
"sid": session_id,
"prompt": "continue",
"agent": model
}
async with session.post(
cls.api_endpoint,
headers=headers,
json=continue_data,
proxy=proxy
) as continue_response:
if continue_response.status == 200:
async for continue_line in continue_response.content:
if continue_line:
try:
continue_line_str = continue_line.decode('utf-8')
continue_data = json.loads(continue_line_str)
continue_message = (
continue_data.get('args', {}).get('content') or
continue_data.get('message', '')
)
if continue_message:
yield continue_message
except json.JSONDecodeError:
debug.log(f"Failed to decode continue JSON: {continue_line}")
except Exception as e:
debug.log(f"Error processing continue response: {e}")
except json.JSONDecodeError:
debug.log(f"Failed to decode JSON: {line}")
except Exception as e:
debug.log(f"Error processing response: {e}")
@staticmethod
async def _get_or_create_access_and_session(session: aiohttp.ClientSession):
RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True) # Ensure cache directory exists
# Load data from cache
if RobocodersAPI.CACHE_FILE.exists():
with open(RobocodersAPI.CACHE_FILE, "r") as f:
data = json.load(f)
access_token = data.get("access_token")
session_id = data.get("sid")
# Validate loaded data
if access_token and session_id:
return access_token, session_id
# If data not valid, create new access token and session ID
access_token = await RobocodersAPI._fetch_and_cache_access_token(session)
session_id = await RobocodersAPI._create_and_cache_session(session, access_token)
return access_token, session_id
@staticmethod
async def _fetch_and_cache_access_token(session: aiohttp.ClientSession) -> str:
if not HAS_BEAUTIFULSOUP:
raise MissingRequirementsError('Install "beautifulsoup4" package | pip install -U beautifulsoup4')
return token
url_auth = 'https://api.robocoders.ai/auth'
headers_auth = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
}
async with session.get(url_auth, headers=headers_auth) as response:
if response.status == 200:
html = await response.text()
soup = BeautifulSoup(html, 'html.parser')
token_element = soup.find('pre', id='token')
if token_element:
token = token_element.text.strip()
# Cache the token
RobocodersAPI._save_cached_data({"access_token": token})
return token
return None
@staticmethod
async def _create_and_cache_session(session: aiohttp.ClientSession, access_token: str) -> str:
url_create_session = 'https://api.robocoders.ai/create-session'
headers_create_session = {
'Authorization': f'Bearer {access_token}'
}
async with session.get(url_create_session, headers=headers_create_session) as response:
if response.status == 200:
data = await response.json()
session_id = data.get('sid')
# Cache session ID
RobocodersAPI._update_cached_data({"sid": session_id})
return session_id
elif response.status == 401:
RobocodersAPI._clear_cached_data()
raise Exception("Unauthorized: Invalid token during session creation.")
elif response.status == 422:
raise Exception("Validation Error: Check input parameters.")
return None
@staticmethod
def _save_cached_data(new_data: dict):
"""Save new data to cache file"""
RobocodersAPI.CACHE_DIR.mkdir(exist_ok=True)
RobocodersAPI.CACHE_FILE.touch(exist_ok=True)
with open(RobocodersAPI.CACHE_FILE, "w") as f:
json.dump(new_data, f)
@staticmethod
def _update_cached_data(updated_data: dict):
"""Update existing cache data with new values"""
data = {}
if RobocodersAPI.CACHE_FILE.exists():
with open(RobocodersAPI.CACHE_FILE, "r") as f:
try:
data = json.load(f)
except json.JSONDecodeError:
# If cache file is corrupted, start with empty dict
data = {}
data.update(updated_data)
with open(RobocodersAPI.CACHE_FILE, "w") as f:
json.dump(data, f)
@staticmethod
def _clear_cached_data():
"""Remove cache file"""
try:
if RobocodersAPI.CACHE_FILE.exists():
RobocodersAPI.CACHE_FILE.unlink()
except Exception as e:
debug.log(f"Error clearing cache: {e}")
@staticmethod
def _get_cached_data() -> dict:
"""Get all cached data"""
if RobocodersAPI.CACHE_FILE.exists():
try:
with open(RobocodersAPI.CACHE_FILE, "r") as f:
return json.load(f)
except json.JSONDecodeError:
return {}
return {}

View file

@ -1,9 +1,8 @@
from .AI365VIP import AI365VIP
from .AIChatFree import AIChatFree
from .Aichat import Aichat
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
from .Airforce import Airforce
from .AutonomousAI import AutonomousAI
from .Ails import Ails
from .AIUncensored import AIUncensored
from .AllenAI import AllenAI
from .AmigoChat import AmigoChat
@ -13,21 +12,23 @@ from .Chatgpt4Online import Chatgpt4Online
from .ChatGptEs import ChatGptEs
from .ChatgptFree import ChatgptFree
from .ChatGptt import ChatGptt
from .DarkAI import DarkAI
from .Equing import Equing
from .FlowGpt import FlowGpt
from .FreeNetfly import FreeNetfly
from .FreeRouter import FreeRouter
from .Glider import Glider
from .GPROChat import GPROChat
from .Koala import Koala
from .Liaobots import Liaobots
from .Lockchat import Lockchat
from .MagickPen import MagickPen
from .MyShell import MyShell
from .Phind import Phind
from .Poe import Poe
from .Prodia import Prodia
from .PuterJS import PuterJS
from .Raycast import Raycast
from .ReplicateHome import ReplicateHome
from .RobocodersAPI import RobocodersAPI
from .RubiksAI import RubiksAI
from .Theb import Theb
from .TypeGPT import TypeGPT
from .Upstage import Upstage
from .Vercel import Vercel

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,7 @@
from __future__ import annotations
import re
from typing import Dict, List, Set, Optional, Tuple, Any
from ..typing import AsyncResult, Messages, MediaListType
from ..errors import ModelNotFoundError
from ..image import is_data_an_audio
@ -7,7 +9,7 @@ from ..providers.retry_provider import IterListProvider
from ..providers.types import ProviderType
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
from ..Provider.hf_space import HuggingSpace
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, EdgeTTS, gTTS, MarkItDown
from ..Provider import HarProvider, DDG, HuggingFace, HuggingFaceMedia
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@ -108,6 +110,8 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
cls.image_models = []
cls.vision_models = []
cls.video_models = []
# Get models from the models registry
model_with_providers = {
model: [
provider for provider in providers
@ -122,6 +126,8 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
model: len(providers) for model, providers in model_with_providers.items() if len(providers) > 1
}
all_models = [cls.default_model] + list(model_with_providers.keys())
# Process special providers
for provider in [OpenaiChat, CopilotAccount, PollinationsAI, HuggingSpace, Cloudflare, PerplexityLabs, Gemini, Grok, DDG]:
provider: ProviderType = provider
if not provider.working or provider.get_parent() in ignored:
@ -139,41 +145,26 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
cls.image_models.extend(provider.image_models)
cls.vision_models.extend(provider.vision_models)
cls.video_models.extend(provider.video_models)
# Clean model names function
def clean_name(name: str) -> str:
return name.split("/")[-1].split(":")[0].lower(
).replace("-instruct", ""
).replace("-chat", ""
).replace("-08-2024", ""
).replace("-03-2025", ""
).replace("-20241022", ""
).replace("-20240904", ""
).replace("-2025-04-16", ""
).replace("-2025-04-14", ""
).replace("-0125", ""
).replace("-2407", ""
).replace("-2501", ""
).replace("-0324", ""
).replace("-2409", ""
).replace("-2410", ""
).replace("-2411", ""
).replace("-1119", ""
).replace("-0919", ""
).replace("-02-24", ""
).replace("-03-25", ""
).replace("-03-26", ""
).replace("-01-21", ""
).replace("-002", ""
).replace("_", "."
).replace("c4ai-", ""
).replace("-preview", ""
).replace("-experimental", ""
).replace("-v1", ""
).replace("-fp8", ""
).replace("-bf16", ""
).replace("-hf", ""
).replace("flux.1-", "flux-"
).replace("llama3", "llama-3"
).replace("meta-llama-", "llama-")
name = name.split("/")[-1].split(":")[0].lower()
# Date patterns
name = re.sub(r'-\d{4}-\d{2}-\d{2}', '', name)
name = re.sub(r'-\d{8}', '', name)
name = re.sub(r'-\d{4}', '', name)
name = re.sub(r'-\d{2}-\d{2}', '', name)
# Version patterns
name = re.sub(r'-(instruct|chat|preview|experimental|v\d+|fp8|bf16|hf)$', '', name)
# Other replacements
name = name.replace("_", ".")
name = name.replace("c4ai-", "")
name = name.replace("meta-llama-", "llama-")
name = name.replace("llama3", "llama-3")
name = name.replace("flux.1-", "flux-")
return name
# Process HAR providers
for provider in [HarProvider, LambdaChat, DeepInfraChat, HuggingFace, HuggingFaceMedia]:
if not provider.working or provider.get_parent() in ignored:
continue
@ -188,11 +179,18 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
cls.image_models.extend([clean_name(model) for model in provider.image_models])
cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
cls.video_models.extend([clean_name(model) for model in provider.video_models])
# Process audio providers
for provider in [Microsoft_Phi_4_Multimodal, PollinationsAI]:
if provider.working and provider.get_parent() not in ignored:
cls.audio_models.update(provider.audio_models)
# Update model counts
cls.models_count.update({model: all_models.count(model) for model in all_models if all_models.count(model) > cls.models_count.get(model, 0)})
# Deduplicate and store
cls.models_storage[ignored_key] = list(dict.fromkeys([model if model else cls.default_model for model in all_models]))
return cls.models_storage[ignored_key]
@classmethod
@ -207,6 +205,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
) -> AsyncResult:
cls.get_models(ignored=ignored)
providers = []
if model and ":" in model:
providers = model.split(":")
model = providers.pop()
@ -238,7 +237,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
providers.append(provider)
else:
for provider in [
OpenaiChat, Cloudflare, HarProvider, PerplexityLabs, Gemini, Grok, DeepSeekAPI, FreeRouter, Blackbox,
OpenaiChat, Cloudflare, HarProvider, PerplexityLabs, Gemini, Grok, DeepSeekAPI, Blackbox,
HuggingSpace, LambdaChat, CopilotAccount, PollinationsAI, DeepInfraChat, DDG, HuggingFace, HuggingFaceMedia,
]:
if provider.working:
@ -247,10 +246,13 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
if model in models.__models__:
for provider in models.__models__[model][1]:
providers.append(provider)
providers = [provider for provider in providers if provider.working and provider.get_parent() not in ignored]
providers = list({provider.__name__: provider for provider in providers}.values())
if len(providers) == 0:
raise ModelNotFoundError(f"Model {model} not found in any provider.")
async for chunk in IterListProvider(providers).create_async_generator(
model,
messages,