Updated some providers, added new providers and added new models

This commit is contained in:
kqlio67 2025-02-03 14:17:33 +02:00
parent 50afc2316d
commit ec8caec579
6 changed files with 352 additions and 92 deletions

View file

@ -39,8 +39,9 @@ This document provides an overview of various AI providers and models, including
|----------|-------------|--------------|---------------|--------|--------|------|------|
|[aichatfree.info](https://aichatfree.info)|No auth required|`g4f.Provider.AIChatFree`|`gemini-1.5-pro` _**(1+)**_|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[autonomous.ai](https://www.autonomous.ai/anon/)|No auth required|`g4f.Provider.AutonomousAI`|`llama-3.3-70b, qwen-2.5-coder-32b, hermes-3, llama-3.2-90b, llama-3.3-70b, llama-3-2-70b`|✔|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-1.5-flash, gemini-1.5-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-7b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1` _**(+31)**_|`flux`|`blackboxai, gpt-4o, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[cablyai.com](https://cablyai.com)|No auth required|`g4f.Provider.CablyAI`|`cably-80b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gemini-1.5-flash, gemini-1.5-pro, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1` _**(+34)**_|`flux`|`blackboxai, gpt-4o, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[api.blackbox.ai](https://api.blackbox.ai)|No auth required|`g4f.Provider.BlackboxAPI`|`deepseek-v3, deepseek-r1, deepseek-chat, mixtral-small-28b, dbrx-instruct, qwq-32b, hermes-2-dpo`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[cablyai.com](https://cablyai.com)|Optional API key|`g4f.Provider.CablyAI`|`gpt-4o-mini, llama-3.1-8b, deepseek-v3, deepseek-r1, o3-mini-low` _**(2+)**_|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[chatglm.cn](https://chatglm.cn)|No auth required|`g4f.Provider.ChatGLM`|`glm-4`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|
|[chatgpt.com](https://chatgpt.com)|No auth required|`g4f.Provider.ChatGpt`|✔ _**(+7)**_|❌|❌|✔|![Error](https://img.shields.io/badge/HTTPError-f48d37)|
|[chatgpt.es](https://chatgpt.es)|No auth required|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|
@ -126,18 +127,19 @@ This document provides an overview of various AI providers and models, including
|-------|---------------|-----------|---------|
|gpt-3|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
|gpt-3.5-turbo|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)|
|gpt-4|OpenAI|11+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4o|OpenAI|9+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
|gpt-4o-mini|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
|gpt-4|OpenAI|10+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|gpt-4o|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
|gpt-4o-mini|OpenAI|9+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
|o1|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
|o1-preview|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
|o1-mini|OpenAI|1+ Providers|[openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)|
|o3-mini-low|OpenAI|1+ Providers|[openai.com](https://openai.com/index/openai-o3-mini/)|
|gigachat|GigaChat|1+ Providers|[developers.sber.ru/gigachat](https://developers.sber.ru/gigachat)|
|meta-ai|Meta|1+ Providers|[ai.meta.com](https://ai.meta.com/)|
|llama-2-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)|
|llama-3-8b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
|llama-3-70b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Meta-Llama-3-70B)|
|llama-3.1-8b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-8b|Meta Llama|7+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-70b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|llama-3.1-405b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.1-405B)|
|llama-3.2-1b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)|
@ -145,10 +147,10 @@ This document provides an overview of various AI providers and models, including
|llama-3.2-11b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
|llama-3.2-90b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision)|
|llama-3.3-70b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-3/)|
|mixtral-7b|Mistral|1+|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
|mixtral-8x7b|Mistral|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
|mistral-nemo|Mistral|3+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|mixtral-small-28b|Mistral|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-small-28b/)|
|hermes-2-dpo|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|phi-3.5-mini|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)|
|wizardlm-2-7b|Microsoft|1+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)|
|wizardlm-2-8x22b|Microsoft|2+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)|
@ -161,7 +163,7 @@ This document provides an overview of various AI providers and models, including
|claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3-opus|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|claude-3.5-sonnet|Anthropic|3+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|claude-3.5-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|reka-core|Reka AI|1+ Providers|[reka.ai](https://www.reka.ai/ourmodels)|
|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|blackboxai-pro|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
@ -174,18 +176,18 @@ This document provides an overview of various AI providers and models, including
|qwen-2.5-72b|Qwen|3+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct)|
|qwen-2.5-coder-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-Coder-32B)|
|qwen-2.5-1m-demo|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-1M-Demo)|
|qwq-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/QwQ-32B-Preview)|
|qwq-32b|Qwen|5+ Providers|[huggingface.co](https://huggingface.co/Qwen/QwQ-32B-Preview)|
|qvq-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/QVQ-72B-Preview)|
|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)|
|deepseek-chat|DeepSeek|3+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)|
|deepseek-v3|DeepSeek|2+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)|
|deepseek-r1|DeepSeek|6+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)|
|deepseek-chat|DeepSeek|4+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)|
|deepseek-v3|DeepSeek|4+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)|
|deepseek-r1|DeepSeek|8+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)|
|grok-2|x.ai|1+|[x.ai](https://x.ai/blog/grok-2)|
|sonar|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|sonar-pro|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|sonar-reasoning|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|nemotron-70b|Nvidia|3+ Providers|[build.nvidia.com](https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct)|
|dbrx-instruct|Databricks|1+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)|
|dbrx-instruct|Databricks|2+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)|
|p1|PollinationsAI|1+ Providers|[pollinations.ai](https://pollinations.ai/)|
|cably-80b|CablyAI|1+ Providers|[cablyai.com](https://cablyai.com)|
|glm-4|THUDM|1+ Providers|[github.com/THUDM](https://github.com/THUDM/GLM-4)|
@ -193,7 +195,6 @@ This document provides an overview of various AI providers and models, including
|evil|Evil Mode - Experimental|1+ Providers||
---
### Image Models
| Model | Base Provider | Providers | Website |
|-------|---------------|-----------|---------|
@ -207,6 +208,7 @@ This document provides an overview of various AI providers and models, including
|midjourney|Midjourney|1+ Providers|[docs.midjourney.com](https://docs.midjourney.com/docs/model-versions)|
## Conclusion and Usage Tips
This document provides a comprehensive overview of various AI providers and models available for text generation, image generation, and vision tasks. **When choosing a provider or model, consider the following factors:**
1. **Availability**: Check the status of the provider to ensure it's currently active and accessible.

View file

@ -38,16 +38,17 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "blackboxai"
default_vision_model = default_model
default_image_model = 'ImageGeneration'
image_models = [default_image_model, "ImageGeneration2"]
vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'deepseek-v3', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
reasoning_models = ['deepseek-r1']
image_models = [default_image_model]
vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'DeepSeek-V3', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
reasoning_models = ['DeepSeek-R1']
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'deepseek-r1', 'deepseek-v3', 'blackboxai-pro']
userSelectedModel = ['gpt-4o', 'o3-mini', 'claude-sonnet-3.5', 'gemini-pro', 'blackboxai-pro']
agentMode = {
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
'Mistral-(7B)-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"},
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
@ -96,12 +97,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
models = list(dict.fromkeys([default_model, *userSelectedModel, *reasoning_models, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
model_aliases = {
"gpt-4": "gpt-4o",
"gemini-1.5-flash": "gemini-1.5-flash",
"gemini-1.5-pro": "gemini-pro",
"claude-3.5-sonnet": "claude-sonnet-3.5",
"deepseek-v3": "DeepSeek-V3",
"deepseek-r1": "DeepSeek-R1",
"llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo",
"mixtral-7b": "Mistral-(7B)-Instruct-v0.2",
"mixtral-small-28b": "Mistral-Small-24B-Instruct-2501",
"deepseek-chat": "DeepSeek-LLM-Chat-(67B)",
"dbrx-instruct": "DBRX-Instruct",
"qwq-32b": "Qwen-QwQ-32B-Preview",
@ -196,7 +197,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}
async with ClientSession(headers=headers) as session:
if model == "ImageGeneration2":
if model in "ImageGeneration":
prompt = format_image_prompt(messages, prompt)
data = {
"query": format_image_prompt(messages, prompt),
@ -294,14 +295,30 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if not text_to_yield or text_to_yield.isspace():
return
if model in cls.image_models:
image_url_match = re.search(r'!\[.*?\]\((.*?)\)', text_to_yield)
if image_url_match:
image_url = image_url_match.group(1)
prompt = format_image_prompt(messages, prompt)
yield ImageResponse(images=[image_url], alt=prompt)
if model in cls.reasoning_models and "\n\n\n" in text_to_yield:
think_split = text_to_yield.split("\n\n\n", 1)
if len(think_split) > 1:
think_content, answer = think_split[0].strip(), think_split[1].strip()
yield Reasoning(status=think_content)
yield answer
else:
if "Generated by BLACKBOX.AI" in text_to_yield:
yield text_to_yield
elif "<think>" in text_to_yield:
pre_think, rest = text_to_yield.split('<think>', 1)
think_content, post_think = rest.split('</think>', 1)
pre_think = pre_think.strip()
think_content = think_content.strip()
post_think = post_think.strip()
if pre_think:
yield pre_think
if think_content:
yield Reasoning(status=think_content)
if post_think:
yield post_think
elif "Generated by BLACKBOX.AI" in text_to_yield:
conversation.validated_value = await cls.fetch_validated(force_refresh=True)
if conversation.validated_value:
data["validated"] = conversation.validated_value

103
g4f/Provider/BlackboxAPI.py Normal file
View file

@ -0,0 +1,103 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from ..providers.response import Reasoning
from .helper import format_prompt
class BlackboxAPI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Blackbox AI API"
url = "https://api.blackbox.ai"
api_endpoint = "https://api.blackbox.ai/api/chat"
working = True
needs_auth = False
supports_stream = False
supports_system_message = True
supports_message_history = True
default_model = 'deepseek-ai/DeepSeek-V3'
reasoning_models = ['deepseek-ai/DeepSeek-R1']
models = [
default_model,
'mistralai/Mistral-Small-24B-Instruct-2501',
'deepseek-ai/deepseek-llm-67b-chat',
'databricks/dbrx-instruct',
'Qwen/QwQ-32B-Preview',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO'
] + reasoning_models
model_aliases = {
"deepseek-v3": "deepseek-ai/DeepSeek-V3",
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
"deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
"mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501",
"dbrx-instruct": "databricks/dbrx-instruct",
"qwq-32b": "Qwen/QwQ-32B-Preview",
"hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
max_tokens: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Content-Type": "application/json",
}
async with ClientSession(headers=headers) as session:
data = {
"messages": messages,
"model": model,
"max_tokens": max_tokens
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
is_reasoning = False
current_reasoning = ""
async for chunk in response.content:
if not chunk:
continue
text = chunk.decode(errors='ignore')
if model in cls.reasoning_models:
if "<think>" in text:
text = text.replace("<think>", "")
is_reasoning = True
current_reasoning = text
continue
if "</think>" in text:
text = text.replace("</think>", "")
is_reasoning = False
current_reasoning += text
yield Reasoning(status=current_reasoning.strip())
current_reasoning = ""
continue
if is_reasoning:
current_reasoning += text
continue
try:
if text:
yield text
except Exception as e:
return
if is_reasoning and current_reasoning:
yield Reasoning(status=current_reasoning.strip())

View file

@ -1,37 +1,166 @@
from __future__ import annotations
import json
from typing import AsyncGenerator
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .template import OpenaiTemplate
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from ..providers.response import FinishReason, Reasoning
class CablyAI(OpenaiTemplate):
class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "CablyAI"
url = "https://cablyai.com"
login_url = None
needs_auth = False
api_base = "https://cablyai.com/v1"
working = True
api_endpoint = "https://cablyai.com/v1/chat/completions"
api_key = "sk-your-openai-api-key"
default_model = "Cably-80B"
models = [default_model]
model_aliases = {"cably-80b": default_model}
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4o-mini'
reasoning_models = ['deepseek-r1-uncensored']
models = [
default_model,
'searchgpt',
'llama-3.1-8b-instruct',
'deepseek-v3',
'tinyswallow1.5b',
'andy-3.5',
'o3-mini-low',
] + reasoning_models
model_aliases = {
"gpt-4o-mini": "searchgpt",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"deepseek-r1": "deepseek-r1-uncensored",
}
@classmethod
def create_async_generator(
async def create_async_generator(
cls,
model: str,
messages: Messages,
api_key: str = None,
stream: bool = True,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
api_key = api_key or cls.api_key
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
'Content-Type': 'application/json',
'Origin': 'https://cablyai.com',
'Referer': 'https://cablyai.com/chat',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"Origin": cls.url,
"Referer": f"{cls.url}/chat",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
}
return super().create_async_generator(
model=model,
messages=messages,
headers=headers,
**kwargs
)
async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": messages,
"stream": stream
}
async with session.post(
cls.api_endpoint,
json=data,
proxy=proxy
) as response:
await raise_for_status(response)
if stream:
reasoning_buffer = []
in_reasoning = False
async for line in response.content:
if not line:
continue
line = line.decode('utf-8').strip()
print(line)
if not line.startswith("data: "):
continue
if line == "data: [DONE]":
if in_reasoning and reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
yield FinishReason("stop")
return
try:
json_data = json.loads(line[6:])
delta = json_data["choices"][0].get("delta", {})
content = delta.get("content", "")
finish_reason = json_data["choices"][0].get("finish_reason")
if finish_reason:
if in_reasoning and reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
yield FinishReason(finish_reason)
return
if model in cls.reasoning_models:
# Processing the beginning of a tag
if "<think>" in content:
pre, _, post = content.partition("<think>")
if pre:
yield pre
in_reasoning = True
content = post
# Tag end processing
if "</think>" in content:
in_reasoning = False
thought, _, post = content.partition("</think>")
if thought:
reasoning_buffer.append(thought)
if reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
reasoning_buffer.clear()
if post:
yield post
continue
# Buffering content inside tags
if in_reasoning:
reasoning_buffer.append(content)
else:
if content:
yield content
else:
if content:
yield content
except json.JSONDecodeError:
continue
except Exception:
yield FinishReason("error")
return
else:
try:
response_data = await response.json()
message = response_data["choices"][0]["message"]
content = message["content"]
if model in cls.reasoning_models and "<think>" in content:
think_start = content.find("<think>") + 7
think_end = content.find("</think>")
if think_start > 6 and think_end > 0:
reasoning = content[think_start:think_end].strip()
yield Reasoning(status=reasoning)
content = content[think_end + 8:].strip()
yield content
yield FinishReason("stop")
except Exception:
yield FinishReason("error")

View file

@ -15,6 +15,7 @@ from .mini_max import HailuoAI, MiniMax
from .template import OpenaiTemplate, BackendApi
from .Blackbox import Blackbox
from .BlackboxAPI import BlackboxAPI
from .CablyAI import CablyAI
from .ChatGLM import ChatGLM
from .ChatGpt import ChatGpt

View file

@ -6,6 +6,7 @@ from .Provider import IterListProvider, ProviderType
from .Provider import (
### no auth required ###
Blackbox,
BlackboxAPI,
CablyAI,
ChatGLM,
ChatGptEs,
@ -123,20 +124,20 @@ gpt_35_turbo = Model(
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
best_provider = IterListProvider([DDG, Blackbox, Jmuz, ChatGptEs, ChatGptt, PollinationsAI, Yqcloud, Copilot, OpenaiChat, Liaobots, Mhystical])
best_provider = IterListProvider([DDG, Jmuz, ChatGptEs, ChatGptt, PollinationsAI, Yqcloud, Copilot, OpenaiChat, Liaobots, Mhystical])
)
# gpt-4o
gpt_4o = VisionModel(
name = 'gpt-4o',
base_provider = 'OpenAI',
best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, Liaobots, OpenaiChat])
best_provider = IterListProvider([ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, Liaobots, OpenaiChat])
)
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
base_provider = 'OpenAI',
best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, Liaobots, OpenaiChat])
best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, CablyAI, Liaobots, OpenaiChat])
)
# o1
@ -158,6 +159,13 @@ o1_mini = Model(
best_provider = Liaobots
)
# o3
o3_mini_low = Model(
name = 'o3-mini-low',
base_provider = 'OpenAI',
best_provider = CablyAI
)
### GigaChat ###
gigachat = Model(
name = 'GigaChat:latest',
@ -195,7 +203,7 @@ llama_3_70b = Model(
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Blackbox, DeepInfraChat, Glider, Jmuz, PollinationsAI, Cloudflare])
best_provider = IterListProvider([Blackbox, DeepInfraChat, Glider, Jmuz, PollinationsAI, CablyAI, Cloudflare])
)
llama_3_1_70b = Model(
@ -243,12 +251,6 @@ llama_3_3_70b = Model(
)
### Mistral ###
mixtral_7b = Model(
name = "mixtral-7b",
base_provider = "Mistral",
best_provider = Blackbox
)
mixtral_8x7b = Model(
name = "mixtral-8x7b",
base_provider = "Mistral",
@ -261,11 +263,17 @@ mistral_nemo = Model(
best_provider = IterListProvider([PollinationsAI, HuggingChat, HuggingFace])
)
mixtral_small_28b = Model(
name = "mixtral-small-28b",
base_provider = "Mistral",
best_provider = IterListProvider([Blackbox, BlackboxAPI])
)
### NousResearch ###
hermes_2_dpo = Model(
name = "hermes-2-dpo",
base_provider = "NousResearch",
best_provider = Blackbox
best_provider = IterListProvider([Blackbox, BlackboxAPI])
)
@ -356,7 +364,7 @@ claude_3_opus = Model(
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Blackbox, Jmuz, Liaobots])
best_provider = IterListProvider([Jmuz, Liaobots])
)
### Reka AI ###
@ -434,7 +442,7 @@ qwen_2_5_1m = Model(
qwq_32b = Model(
name = 'qwq-32b',
base_provider = 'Qwen',
best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, HuggingChat])
best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, Jmuz, HuggingChat])
)
qvq_72b = VisionModel(
name = 'qvq-72b',
@ -453,19 +461,19 @@ pi = Model(
deepseek_chat = Model(
name = 'deepseek-chat',
base_provider = 'DeepSeek',
best_provider = IterListProvider([Blackbox, Jmuz, PollinationsAI])
best_provider = IterListProvider([Blackbox, BlackboxAPI, Jmuz, PollinationsAI])
)
deepseek_v3 = Model(
name = 'deepseek-v3',
base_provider = 'DeepSeek',
best_provider = IterListProvider([Blackbox, DeepInfraChat])
best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, CablyAI])
)
deepseek_r1 = Model(
name = 'deepseek-r1',
base_provider = 'DeepSeek',
best_provider = IterListProvider([Blackbox, Glider, PollinationsAI, Jmuz, HuggingChat, HuggingFace])
best_provider = IterListProvider([Blackbox, BlackboxAPI, Glider, PollinationsAI, Jmuz, CablyAI, HuggingChat, HuggingFace])
)
### x.ai ###
@ -505,7 +513,7 @@ nemotron_70b = Model(
dbrx_instruct = Model(
name = 'dbrx-instruct',
base_provider = 'Databricks',
best_provider = Blackbox
best_provider = IterListProvider([Blackbox, BlackboxAPI])
)
### PollinationsAI ###
@ -657,9 +665,9 @@ class ModelUtils:
llama_3_3_70b.name: llama_3_3_70b,
### Mistral ###
mixtral_7b.name: mixtral_7b,
mixtral_8x7b.name: mixtral_8x7b,
mistral_nemo.name: mistral_nemo,
mixtral_small_28b.name: mixtral_small_28b,
### NousResearch ###
hermes_2_dpo.name: hermes_2_dpo,