minor improvements for release

This commit is contained in:
Tekky 2024-11-15 11:58:58 +01:00
parent 66d3cecc59
commit 6ac545445a
8 changed files with 196 additions and 187 deletions

View file

@ -12,6 +12,7 @@ class Pi(AbstractProvider):
supports_stream = True
_session = None
default_model = "pi"
models = [default_model]
@classmethod
def create_completion(

View file

@ -12,7 +12,6 @@ from .not_working import *
from .local import *
from .AIUncensored import AIUncensored
from .Allyfy import Allyfy
from .Airforce import Airforce
from .Bing import Bing
from .Blackbox import Blackbox

View file

@ -57,6 +57,7 @@ class Gemini(AsyncGeneratorProvider):
default_model = 'gemini'
image_models = ["gemini"]
default_vision_model = "gemini"
models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"]
_cookies: Cookies = None
_snlm0e: str = None
_sid: str = None

View file

@ -26,6 +26,7 @@ class Poe(AbstractProvider):
needs_auth = True
supports_gpt_35_turbo = True
supports_stream = True
models = models.keys()
@classmethod
def create_completion(

View file

@ -16,6 +16,11 @@ class Raycast(AbstractProvider):
needs_auth = True
working = True
models = [
"gpt-3.5-turbo",
"gpt-4"
]
@staticmethod
def create_completion(
model: str,
@ -25,6 +30,9 @@ class Raycast(AbstractProvider):
**kwargs,
) -> CreateResult:
auth = kwargs.get('auth')
if not auth:
raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter")
headers = {
'Accept': 'application/json',
'Accept-Language': 'en-US,en;q=0.9',

View file

@ -38,6 +38,7 @@ class Theb(AbstractProvider):
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
models = models.keys()
@classmethod
def create_completion(

View file

@ -4,15 +4,15 @@ import asyncio
import json
import uuid
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class Allyfy(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://allyfy.chat"
api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
working = True
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True

View file

@ -9,7 +9,6 @@ from .Provider import (
AIChatFree,
Airforce,
AIUncensored,
Allyfy,
Bing,
Blackbox,
ChatGpt,
@ -599,203 +598,202 @@ class ModelUtils:
"""
convert: dict[str, Model] = {
############
### Text ###
############
############
### Text ###
############
### OpenAI ###
# gpt-3.5
'gpt-3.5-turbo': gpt_35_turbo,
### OpenAI ###
# gpt-3.5
'gpt-3.5-turbo': gpt_35_turbo,
# gpt-4
'gpt-4o': gpt_4o,
'gpt-4o-mini': gpt_4o_mini,
'gpt-4': gpt_4,
'gpt-4-turbo': gpt_4_turbo,
# gpt-4
'gpt-4o': gpt_4o,
'gpt-4o-mini': gpt_4o_mini,
'gpt-4': gpt_4,
'gpt-4-turbo': gpt_4_turbo,
# o1
'o1': o1,
'o1-mini': o1_mini,
# o1
'o1': o1,
'o1-mini': o1_mini,
### Meta ###
"meta-ai": meta,
### Meta ###
"meta-ai": meta,
# llama-2
'llama-2-7b': llama_2_7b,
# llama-2
'llama-2-7b': llama_2_7b,
# llama-3
'llama-3-8b': llama_3_8b,
# llama-3
'llama-3-8b': llama_3_8b,
# llama-3.1
'llama-3.1-8b': llama_3_1_8b,
'llama-3.1-70b': llama_3_1_70b,
'llama-3.1-405b': llama_3_1_405b,
# llama-3.1
'llama-3.1-8b': llama_3_1_8b,
'llama-3.1-70b': llama_3_1_70b,
'llama-3.1-405b': llama_3_1_405b,
# llama-3.2
'llama-3.2-1b': llama_3_2_1b,
'llama-3.2-11b': llama_3_2_11b,
# llama-3.2
'llama-3.2-1b': llama_3_2_1b,
'llama-3.2-11b': llama_3_2_11b,
### Mistral ###
'mistral-7b': mistral_7b,
'mixtral-8x7b': mixtral_8x7b,
'mistral-nemo': mistral_nemo,
### Mistral ###
'mistral-7b': mistral_7b,
'mixtral-8x7b': mixtral_8x7b,
'mistral-nemo': mistral_nemo,
### NousResearch ###
'hermes-2-pro': hermes_2_pro,
'hermes-2-dpo': hermes_2_dpo,
'hermes-3': hermes_3,
### NousResearch ###
'hermes-2-pro': hermes_2_pro,
'hermes-2-dpo': hermes_2_dpo,
'hermes-3': hermes_3,
### Microsoft ###
'phi-2': phi_2,
'phi-3.5-mini': phi_3_5_mini,
### Microsoft ###
'phi-2': phi_2,
'phi-3.5-mini': phi_3_5_mini,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
'gemini-flash': gemini_flash,
### Google ###
# gemini
'gemini': gemini,
'gemini-pro': gemini_pro,
'gemini-flash': gemini_flash,
# gemma
'gemma-2b': gemma_2b,
# gemma
'gemma-2b': gemma_2b,
### Anthropic ###
'claude-2.1': claude_2_1,
### Anthropic ###
'claude-2.1': claude_2_1,
# claude 3
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-haiku': claude_3_haiku,
# claude 3
'claude-3-opus': claude_3_opus,
'claude-3-sonnet': claude_3_sonnet,
'claude-3-haiku': claude_3_haiku,
# claude 3.5
'claude-3.5-sonnet': claude_3_5_sonnet,
# claude 3.5
'claude-3.5-sonnet': claude_3_5_sonnet,
### Reka AI ###
'reka-core': reka_core,
### Reka AI ###
'reka-core': reka_core,
### Blackbox AI ###
'blackboxai': blackboxai,
'blackboxai-pro': blackboxai_pro,
### Blackbox AI ###
'blackboxai': blackboxai,
'blackboxai-pro': blackboxai_pro,
### CohereForAI ###
'command-r+': command_r_plus,
### CohereForAI ###
'command-r+': command_r_plus,
### GigaChat ###
'gigachat': gigachat,
### GigaChat ###
'gigachat': gigachat,
### Qwen ###
# qwen 1.5
'qwen-1.5-7b': qwen_1_5_7b,
### Qwen ###
# qwen 1.5
'qwen-1.5-7b': qwen_1_5_7b,
# qwen 2
'qwen-2-72b': qwen_2_72b,
# qwen 2
'qwen-2-72b': qwen_2_72b,
# qwen 2.5
'qwen-2.5-coder-32b': qwen_2_5_coder_32b,
# qwen 2.5
'qwen-2.5-coder-32b': qwen_2_5_coder_32b,
### Upstage ###
'solar-mini': solar_mini,
'solar-pro': solar_pro,
### Upstage ###
'solar-mini': solar_mini,
'solar-pro': solar_pro,
### Inflection ###
'pi': pi,
### Inflection ###
'pi': pi,
### DeepSeek ###
'deepseek-coder': deepseek_coder,
### DeepSeek ###
'deepseek-coder': deepseek_coder,
### Yorickvp ###
'llava-13b': llava_13b,
### Yorickvp ###
'llava-13b': llava_13b,
### WizardLM ###
'wizardlm-2-8x22b': wizardlm_2_8x22b,
### WizardLM ###
'wizardlm-2-8x22b': wizardlm_2_8x22b,
### OpenChat ###
'openchat-3.5': openchat_3_5,
### OpenChat ###
'openchat-3.5': openchat_3_5,
### x.ai ###
'grok-2': grok_2,
'grok-2-mini': grok_2_mini,
'grok-beta': grok_beta,
### x.ai ###
'grok-2': grok_2,
'grok-2-mini': grok_2_mini,
'grok-beta': grok_beta,
### Perplexity AI ###
'sonar-online': sonar_online,
'sonar-chat': sonar_chat,
### Perplexity AI ###
'sonar-online': sonar_online,
'sonar-chat': sonar_chat,
### TheBloke ###
'german-7b': german_7b,
### TheBloke ###
'german-7b': german_7b,
### Nvidia ###
'nemotron-70b': nemotron_70b,
### Nvidia ###
'nemotron-70b': nemotron_70b,
### Teknium ###
'openhermes-2.5': openhermes_2_5,
### Teknium ###
'openhermes-2.5': openhermes_2_5,
### Liquid ###
'lfm-40b': lfm_40b,
### Liquid ###
'lfm-40b': lfm_40b,
### DiscoResearch ###
'german-7b': german_7b,
### DiscoResearch ###
'german-7b': german_7b,
### HuggingFaceH4 ###
'zephyr-7b': zephyr_7b,
### HuggingFaceH4 ###
'zephyr-7b': zephyr_7b,
### Inferless ###
'neural-7b': neural_7b,
### Inferless ###
'neural-7b': neural_7b,
#############
### Image ###
#############
#############
### Image ###
#############
### Stability AI ###
'sdxl': sdxl,
'sd-3': sd_3,
### Stability AI ###
'sdxl': sdxl,
'sd-3': sd_3,
### Playground ###
'playground-v2.5': playground_v2_5,
### Playground ###
'playground-v2.5': playground_v2_5,
### Flux AI ###
'flux': flux,
'flux-pro': flux_pro,
'flux-realism': flux_realism,
'flux-anime': flux_anime,
'flux-3d': flux_3d,
'flux-disney': flux_disney,
'flux-pixel': flux_pixel,
'flux-4o': flux_4o,
### Flux AI ###
'flux': flux,
'flux-pro': flux_pro,
'flux-realism': flux_realism,
'flux-anime': flux_anime,
'flux-3d': flux_3d,
'flux-disney': flux_disney,
'flux-pixel': flux_pixel,
'flux-4o': flux_4o,
### Other ###
'any-dark': any_dark,
### Other ###
'any-dark': any_dark,
}
_all_models = list(ModelUtils.convert.keys())