Update model list, Fix model list in CablyAI

This commit is contained in:
hlohaus 2025-02-05 14:06:41 +01:00
parent 88918cb897
commit be8c3f7cd2
3 changed files with 30 additions and 142 deletions

View file

@ -1,27 +1,17 @@
from __future__ import annotations
import json
from typing import AsyncGenerator
from aiohttp import ClientSession
from ..errors import ModelNotSupportedError
from .template import OpenaiTemplate
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from ..providers.response import FinishReason, Reasoning
class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
class CablyAI(OpenaiTemplate):
label = "CablyAI"
url = "https://cablyai.com"
login_url = url
api_endpoint = "https://cablyai.com/v1/chat/completions"
api_base = "https://cablyai.com/v1"
api_key = "sk-your-openai-api-key"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4o-mini'
reasoning_models = ['deepseek-r1-uncensored']
@ -36,131 +26,23 @@ class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
] + reasoning_models
model_aliases = {
"searchgpt": "searchgpt (free)",
"gpt-4o-mini": "searchgpt",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"deepseek-r1": "deepseek-r1-uncensored",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
api_key: str = None,
stream: bool = True,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
api_key = api_key or cls.api_key
headers = {
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"Origin": cls.url,
"Referer": f"{cls.url}/chat",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
}
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
models = super().get_models(api_key, api_base);
return [f"{m} (free)" for m in models if m in cls.fallback_models] + models
async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": messages,
"stream": stream
}
async with session.post(
cls.api_endpoint,
json=data,
proxy=proxy
) as response:
await raise_for_status(response)
if stream:
reasoning_buffer = []
in_reasoning = False
async for line in response.content:
if not line:
continue
line = line.decode('utf-8').strip()
if not line.startswith("data: "):
continue
if line == "data: [DONE]":
if in_reasoning and reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
yield FinishReason("stop")
return
try:
json_data = json.loads(line[6:])
delta = json_data["choices"][0].get("delta", {})
content = delta.get("content", "")
finish_reason = json_data["choices"][0].get("finish_reason")
if finish_reason:
if in_reasoning and reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
yield FinishReason(finish_reason)
return
if model in cls.reasoning_models:
# Processing the beginning of a tag
if "<think>" in content:
pre, _, post = content.partition("<think>")
if pre:
yield pre
in_reasoning = True
content = post
# Tag end processing
if "</think>" in content:
in_reasoning = False
thought, _, post = content.partition("</think>")
if thought:
reasoning_buffer.append(thought)
if reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
reasoning_buffer.clear()
if post:
yield post
continue
# Buffering content inside tags
if in_reasoning:
reasoning_buffer.append(content)
else:
if content:
yield content
else:
if content:
yield content
except json.JSONDecodeError:
continue
except Exception:
yield FinishReason("error")
return
else:
try:
response_data = await response.json()
message = response_data["choices"][0]["message"]
content = message["content"]
if model in cls.reasoning_models and "<think>" in content:
think_start = content.find("<think>") + 7
think_end = content.find("</think>")
if think_start > 6 and think_end > 0:
reasoning = content[think_start:think_end].strip()
yield Reasoning(status=reasoning)
content = content[think_end + 8:].strip()
yield content
yield FinishReason("stop")
except Exception:
yield FinishReason("error")
@classmethod
def get_model(cls, model: str, **kwargs) -> str:
try:
model = super().get_model(model, **kwargs)
return model.split(" (free)")[0]
except ModelNotSupportedError:
if f"f{model} (free)" in cls.models:
return model
raise

View file

@ -14,6 +14,7 @@ from ... import debug
class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin):
api_base = ""
api_key = None
supports_message_history = True
supports_system_message = True
default_model = ""
@ -28,6 +29,8 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
headers = {}
if api_base is None:
api_base = cls.api_base
if api_key is None and cls.api_key is not None:
api_key = cls.api_key
if api_key is not None:
headers["authorization"] = f"Bearer {api_key}"
response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
@ -66,6 +69,8 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
extra_data: dict = {},
**kwargs
) -> AsyncResult:
if api_key is None and cls.api_key is not None:
api_key = cls.api_key
if cls.needs_auth and api_key is None:
raise MissingAuthError('Add a "api_key"')
async with StreamSession(

View file

@ -25,6 +25,7 @@ from .Provider import (
PerplexityLabs,
Pi,
PollinationsAI,
PollinationsImage,
TeachAnything,
Yqcloud,
@ -536,7 +537,7 @@ evil = Model(
sdxl_turbo = ImageModel(
name = 'sdxl-turbo',
base_provider = 'Stability AI',
best_provider = IterListProvider([PollinationsAI, ImageLabs])
best_provider = IterListProvider([PollinationsImage, ImageLabs])
)
sd_3_5 = ImageModel(
@ -549,13 +550,13 @@ sd_3_5 = ImageModel(
flux = ImageModel(
name = 'flux',
base_provider = 'Black Forest Labs',
best_provider = IterListProvider([Blackbox, PollinationsAI, HuggingSpace])
best_provider = IterListProvider([Blackbox, PollinationsImage, HuggingSpace])
)
flux_pro = ImageModel(
name = 'flux-pro',
base_provider = 'Black Forest Labs',
best_provider = PollinationsAI
best_provider = PollinationsImage
)
flux_dev = ImageModel(
@ -575,14 +576,14 @@ flux_schnell = ImageModel(
dall_e_3 = ImageModel(
name = 'dall-e-3',
base_provider = 'OpenAI',
best_provider = IterListProvider([PollinationsAI, CopilotAccount, OpenaiAccount, MicrosoftDesigner, BingCreateImages])
best_provider = IterListProvider([PollinationsImage, CopilotAccount, OpenaiAccount, MicrosoftDesigner, BingCreateImages])
)
### Midjourney ###
midjourney = ImageModel(
name = 'midjourney',
base_provider = 'Midjourney',
best_provider = PollinationsAI
best_provider = PollinationsImage
)
class ModelUtils:
@ -754,8 +755,8 @@ demo_models = {
qwq_32b.name: [qwq_32b, [HuggingFace]],
llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]],
sd_3_5.name: [sd_3_5, [HuggingSpace, HuggingFace]],
flux_dev.name: [flux_dev, [PollinationsAI, HuggingSpace, HuggingFace]],
flux_schnell.name: [flux_schnell, [PollinationsAI, HuggingFace, HuggingSpace, PollinationsAI]],
flux_dev.name: [flux_dev, [PollinationsImage, HuggingSpace, HuggingFace]],
flux_schnell.name: [flux_schnell, [PollinationsImage, HuggingFace, HuggingSpace]],
}
# Create a list of all models and his providers