mirror of
https://github.com/xtekky/gpt4free.git
synced 2026-01-06 01:02:13 -08:00
Update (g4f/models.py g4f/Provider/ docs/providers-and-models.md)
This commit is contained in:
parent
d29f0af7d3
commit
18b309257c
18 changed files with 5 additions and 1470 deletions
|
|
@ -49,20 +49,6 @@ This document provides an overview of various AI providers and models, including
|
||||||
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔||❌|
|
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔||❌|
|
||||||
|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔||❌|
|
|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔||❌|
|
||||||
|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?||✔|
|
|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?||✔|
|
||||||
|[nexra.aryahcr.cc/bing](https://nexra.aryahcr.cc/documentation/bing/en)|`g4f.Provider.NexraBing`|✔|❌|❌|✔||❌|
|
|
||||||
|[nexra.aryahcr.cc/blackbox](https://nexra.aryahcr.cc/documentation/blackbox/en)|`g4f.Provider.NexraBlackbox`|`blackboxai` |❌|❌|✔||❌|
|
|
||||||
|[nexra.aryahcr.cc/chatgpt](https://nexra.aryahcr.cc/documentation/chatgpt/en)|`g4f.Provider.NexraChatGPT`|`gpt-4, gpt-3.5-turbo, gpt-3, gpt-4o` |❌|❌|✔||❌|
|
|
||||||
|[nexra.aryahcr.cc/dall-e](https://nexra.aryahcr.cc/documentation/dall-e/en)|`g4f.Provider.NexraDallE`|❌|`dalle`|❌|❌||❌|
|
|
||||||
|[nexra.aryahcr.cc/dall-e](https://nexra.aryahcr.cc/documentation/dall-e/en)|`g4f.Provider.NexraDallE2`|❌|`dalle-2`|❌|❌||❌|
|
|
||||||
|[nexra.aryahcr.cc/emi](https://nexra.aryahcr.cc/documentation/emi/en)|`g4f.Provider.NexraEmi`|❌|`emi`|❌|❌||❌|
|
|
||||||
|[nexra.aryahcr.cc/flux-pro](https://nexra.aryahcr.cc/documentation/flux-pro/en)|`g4f.Provider.NexraFluxPro`|❌|`flux-pro`|❌|❌||❌|
|
|
||||||
|[nexra.aryahcr.cc/gemini-pro](https://nexra.aryahcr.cc/documentation/gemini-pro/en)|`g4f.Provider.NexraGeminiPro`|`gemini-pro`|❌|❌|❌||❌|
|
|
||||||
|[nexra.aryahcr.cc/midjourney](https://nexra.aryahcr.cc/documentation/midjourney/en)|`g4f.Provider.NexraMidjourney`|❌|`midjourney`|❌|❌||❌|
|
|
||||||
|[nexra.aryahcr.cc/prodia](https://nexra.aryahcr.cc/documentation/prodia/en)|`g4f.Provider.NexraProdiaAI`|❌|✔|❌|❌||❌|
|
|
||||||
|[nexra.aryahcr.cc/qwen](https://nexra.aryahcr.cc/documentation/qwen/en)|`g4f.Provider.NexraQwen`|`qwen`|❌|❌|✔||❌|
|
|
||||||
|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSD15`|❌|`sd-1.5`|❌|❌||❌
|
|
||||||
|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSDLora`|❌|`sdxl-lora`|❌|❌||❌
|
|
||||||
|[nexra.aryahcr.cc/stable-diffusion](https://nexra.aryahcr.cc/documentation/stable-diffusion/en)|`g4f.Provider.NexraSDTurbo`|❌|`sdxl-turbo`|❌|❌||❌
|
|
||||||
|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|✔|❌|✔|||✔|
|
|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|✔|❌|✔|||✔|
|
||||||
|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔|||✔|
|
|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔|||✔|
|
||||||
|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|✔|❌|❌|?||❌|
|
|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|✔|❌|❌|?||❌|
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,6 @@ from .needs_auth import *
|
||||||
from .not_working import *
|
from .not_working import *
|
||||||
from .local import *
|
from .local import *
|
||||||
|
|
||||||
from .nexra import *
|
|
||||||
|
|
||||||
from .AI365VIP import AI365VIP
|
from .AI365VIP import AI365VIP
|
||||||
from .AIChatFree import AIChatFree
|
from .AIChatFree import AIChatFree
|
||||||
from .AIUncensored import AIUncensored
|
from .AIUncensored import AIUncensored
|
||||||
|
|
|
||||||
|
|
@ -1,93 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ..helper import format_prompt
|
|
||||||
|
|
||||||
class NexraBing(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Bing"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/bing/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
|
|
||||||
working = True
|
|
||||||
supports_stream = True
|
|
||||||
|
|
||||||
default_model = 'Balanced'
|
|
||||||
models = [default_model, 'Creative', 'Precise']
|
|
||||||
|
|
||||||
model_aliases = {
|
|
||||||
"gpt-4": "Balanced",
|
|
||||||
"gpt-4": "Creative",
|
|
||||||
"gpt-4": "Precise",
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool = False,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": format_prompt(messages)
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"conversation_style": model,
|
|
||||||
"markdown": markdown,
|
|
||||||
"stream": stream,
|
|
||||||
"model": "Bing"
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=True)
|
|
||||||
|
|
||||||
return cls.process_response(response)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code != 200:
|
|
||||||
yield f"Error: {response.status_code}"
|
|
||||||
return
|
|
||||||
|
|
||||||
full_message = ""
|
|
||||||
for chunk in response.iter_content(chunk_size=None):
|
|
||||||
if chunk:
|
|
||||||
messages = chunk.decode('utf-8').split('\x1e')
|
|
||||||
for message in messages:
|
|
||||||
try:
|
|
||||||
json_data = json.loads(message)
|
|
||||||
if json_data.get('finish', False):
|
|
||||||
return
|
|
||||||
current_message = json_data.get('message', '')
|
|
||||||
if current_message:
|
|
||||||
new_content = current_message[len(full_message):]
|
|
||||||
if new_content:
|
|
||||||
yield new_content
|
|
||||||
full_message = current_message
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not full_message:
|
|
||||||
yield "No message received"
|
|
||||||
|
|
@ -1,100 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ..helper import format_prompt
|
|
||||||
|
|
||||||
class NexraBlackbox(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Blackbox"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/blackbox/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
|
|
||||||
working = True
|
|
||||||
supports_stream = True
|
|
||||||
|
|
||||||
default_model = "blackbox"
|
|
||||||
models = [default_model]
|
|
||||||
model_aliases = {"blackboxai": "blackbox",}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
websearch: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": format_prompt(messages)
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"websearch": websearch,
|
|
||||||
"stream": stream,
|
|
||||||
"markdown": markdown,
|
|
||||||
"model": model
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
|
|
||||||
|
|
||||||
if stream:
|
|
||||||
return cls.process_streaming_response(response)
|
|
||||||
else:
|
|
||||||
return cls.process_non_streaming_response(response)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_non_streaming_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
full_response = ""
|
|
||||||
for line in response.iter_lines(decode_unicode=True):
|
|
||||||
if line:
|
|
||||||
data = json.loads(line)
|
|
||||||
if data.get('finish'):
|
|
||||||
break
|
|
||||||
message = data.get('message', '')
|
|
||||||
if message:
|
|
||||||
full_response = message
|
|
||||||
return full_response
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return "Error: Unable to decode JSON response"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_streaming_response(cls, response):
|
|
||||||
previous_message = ""
|
|
||||||
for line in response.iter_lines(decode_unicode=True):
|
|
||||||
if line:
|
|
||||||
try:
|
|
||||||
data = json.loads(line)
|
|
||||||
if data.get('finish'):
|
|
||||||
break
|
|
||||||
message = data.get('message', '')
|
|
||||||
if message and message != previous_message:
|
|
||||||
yield message[len(previous_message):]
|
|
||||||
previous_message = message
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
pass
|
|
||||||
|
|
@ -1,285 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from typing import Any, Dict
|
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
||||||
from ..helper import format_prompt
|
|
||||||
|
|
||||||
|
|
||||||
class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra ChatGPT"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
|
|
||||||
api_endpoint_nexra_chatgpt = "https://nexra.aryahcr.cc/api/chat/gpt"
|
|
||||||
api_endpoint_nexra_chatgpt4o = "https://nexra.aryahcr.cc/api/chat/complements"
|
|
||||||
api_endpoint_nexra_chatgpt_v2 = "https://nexra.aryahcr.cc/api/chat/complements"
|
|
||||||
api_endpoint_nexra_gptweb = "https://nexra.aryahcr.cc/api/chat/gptweb"
|
|
||||||
working = True
|
|
||||||
supports_system_message = True
|
|
||||||
supports_message_history = True
|
|
||||||
supports_stream = True
|
|
||||||
|
|
||||||
default_model = 'gpt-3.5-turbo'
|
|
||||||
nexra_chatgpt = [
|
|
||||||
'gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314',
|
|
||||||
default_model, 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
|
|
||||||
'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002'
|
|
||||||
]
|
|
||||||
nexra_chatgpt4o = ['gpt-4o']
|
|
||||||
nexra_chatgptv2 = ['chatgpt']
|
|
||||||
nexra_gptweb = ['gptweb']
|
|
||||||
models = nexra_chatgpt + nexra_chatgpt4o + nexra_chatgptv2 + nexra_gptweb
|
|
||||||
|
|
||||||
model_aliases = {
|
|
||||||
"gpt-4": "gpt-4-0613",
|
|
||||||
"gpt-4-32k": "gpt-4-32k-0314",
|
|
||||||
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
|
|
||||||
"gpt-3.5-turbo-0613": "gpt-3.5-turbo-16k-0613",
|
|
||||||
"gpt-3": "text-davinci-003",
|
|
||||||
"text-davinci-002": "code-davinci-002",
|
|
||||||
"text-curie-001": "text-babbage-001",
|
|
||||||
"text-ada-001": "davinci",
|
|
||||||
"curie": "babbage",
|
|
||||||
"ada": "babbage-002",
|
|
||||||
"davinci-002": "davinci-002",
|
|
||||||
"chatgpt": "chatgpt",
|
|
||||||
"gptweb": "gptweb"
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool = False,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
if model in cls.nexra_chatgpt:
|
|
||||||
async for chunk in cls._create_async_generator_nexra_chatgpt(model, messages, proxy, **kwargs):
|
|
||||||
yield chunk
|
|
||||||
elif model in cls.nexra_chatgpt4o:
|
|
||||||
async for chunk in cls._create_async_generator_nexra_chatgpt4o(model, messages, stream, proxy, markdown, **kwargs):
|
|
||||||
yield chunk
|
|
||||||
elif model in cls.nexra_chatgptv2:
|
|
||||||
async for chunk in cls._create_async_generator_nexra_chatgpt_v2(model, messages, stream, proxy, markdown, **kwargs):
|
|
||||||
yield chunk
|
|
||||||
elif model in cls.nexra_gptweb:
|
|
||||||
async for chunk in cls._create_async_generator_nexra_gptweb(model, messages, proxy, **kwargs):
|
|
||||||
yield chunk
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def _create_async_generator_nexra_chatgpt(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt = format_prompt(messages)
|
|
||||||
data = {
|
|
||||||
"messages": messages,
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": model,
|
|
||||||
"markdown": markdown
|
|
||||||
}
|
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
try:
|
|
||||||
response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt, data, headers, proxy)
|
|
||||||
filtered_response = cls._filter_response(response)
|
|
||||||
|
|
||||||
for chunk in filtered_response:
|
|
||||||
yield chunk
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error during API request (nexra_chatgpt): {e}")
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def _create_async_generator_nexra_chatgpt4o(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool = False,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt = format_prompt(messages)
|
|
||||||
data = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": prompt
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"stream": stream,
|
|
||||||
"markdown": markdown,
|
|
||||||
"model": model
|
|
||||||
}
|
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
try:
|
|
||||||
response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt4o, data, headers, proxy, stream)
|
|
||||||
|
|
||||||
if stream:
|
|
||||||
async for chunk in cls._process_streaming_response(response):
|
|
||||||
yield chunk
|
|
||||||
else:
|
|
||||||
for chunk in cls._process_non_streaming_response(response):
|
|
||||||
yield chunk
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error during API request (nexra_chatgpt4o): {e}")
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def _create_async_generator_nexra_chatgpt_v2(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool = False,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt = format_prompt(messages)
|
|
||||||
data = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": prompt
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"stream": stream,
|
|
||||||
"markdown": markdown,
|
|
||||||
"model": model
|
|
||||||
}
|
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
try:
|
|
||||||
response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_chatgpt_v2, data, headers, proxy, stream)
|
|
||||||
|
|
||||||
if stream:
|
|
||||||
async for chunk in cls._process_streaming_response(response):
|
|
||||||
yield chunk
|
|
||||||
else:
|
|
||||||
for chunk in cls._process_non_streaming_response(response):
|
|
||||||
yield chunk
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error during API request (nexra_chatgpt_v2): {e}")
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def _create_async_generator_nexra_gptweb(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt = format_prompt(messages)
|
|
||||||
data = {
|
|
||||||
"prompt": prompt,
|
|
||||||
"markdown": markdown,
|
|
||||||
}
|
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
try:
|
|
||||||
response = await loop.run_in_executor(None, cls._sync_post_request, cls.api_endpoint_nexra_gptweb, data, headers, proxy)
|
|
||||||
|
|
||||||
for chunk in response.iter_content(1024):
|
|
||||||
if chunk:
|
|
||||||
decoded_chunk = chunk.decode().lstrip('_')
|
|
||||||
try:
|
|
||||||
response_json = json.loads(decoded_chunk)
|
|
||||||
if response_json.get("status"):
|
|
||||||
yield response_json.get("gpt", "")
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error during API request (nexra_gptweb): {e}")
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _sync_post_request(url: str, data: Dict[str, Any], headers: Dict[str, str], proxy: str = None, stream: bool = False) -> requests.Response:
|
|
||||||
proxies = {
|
|
||||||
"http": proxy,
|
|
||||||
"https": proxy,
|
|
||||||
} if proxy else None
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = requests.post(url, json=data, headers=headers, proxies=proxies, stream=stream)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response
|
|
||||||
except requests.RequestException as e:
|
|
||||||
print(f"Request failed: {e}")
|
|
||||||
raise
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _process_non_streaming_response(response: requests.Response) -> str:
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.lstrip('')
|
|
||||||
data = json.loads(content)
|
|
||||||
return data.get('message', '')
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return "Error: Unable to decode JSON response"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}"
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def _process_streaming_response(response: requests.Response):
|
|
||||||
full_message = ""
|
|
||||||
for line in response.iter_lines(decode_unicode=True):
|
|
||||||
if line:
|
|
||||||
try:
|
|
||||||
line = line.lstrip('')
|
|
||||||
data = json.loads(line)
|
|
||||||
if data.get('finish'):
|
|
||||||
break
|
|
||||||
message = data.get('message', '')
|
|
||||||
if message:
|
|
||||||
yield message[len(full_message):]
|
|
||||||
full_message = message
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _filter_response(response: requests.Response) -> str:
|
|
||||||
response_json = response.json()
|
|
||||||
return response_json.get("gpt", "")
|
|
||||||
|
|
@ -1,63 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraDallE(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra DALL-E"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = "dalle"
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_')
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,63 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraDallE2(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra DALL-E 2"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = "dalle2"
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_')
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,63 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraEmi(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Emi"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/emi/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = "emi"
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_')
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraFluxPro(AbstractProvider, ProviderModelMixin):
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/flux-pro/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = 'flux'
|
|
||||||
models = [default_model]
|
|
||||||
model_aliases = {
|
|
||||||
"flux-pro": "flux",
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_')
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,86 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ..helper import format_prompt
|
|
||||||
|
|
||||||
class NexraGeminiPro(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Gemini PRO"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/gemini-pro/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
|
|
||||||
working = True
|
|
||||||
supports_stream = True
|
|
||||||
|
|
||||||
default_model = 'gemini-pro'
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": format_prompt(messages)
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"stream": stream,
|
|
||||||
"markdown": markdown,
|
|
||||||
"model": model
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
|
|
||||||
|
|
||||||
if stream:
|
|
||||||
return cls.process_streaming_response(response)
|
|
||||||
else:
|
|
||||||
return cls.process_non_streaming_response(response)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_non_streaming_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.lstrip('')
|
|
||||||
data = json.loads(content)
|
|
||||||
return data.get('message', '')
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return "Error: Unable to decode JSON response"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_streaming_response(cls, response):
|
|
||||||
full_message = ""
|
|
||||||
for line in response.iter_lines(decode_unicode=True):
|
|
||||||
if line:
|
|
||||||
try:
|
|
||||||
line = line.lstrip('')
|
|
||||||
data = json.loads(line)
|
|
||||||
if data.get('finish'):
|
|
||||||
break
|
|
||||||
message = data.get('message', '')
|
|
||||||
if message:
|
|
||||||
yield message[len(full_message):]
|
|
||||||
full_message = message
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
pass
|
|
||||||
|
|
@ -1,63 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraMidjourney(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Midjourney"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = "midjourney"
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_')
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,151 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraProdiaAI(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Prodia AI"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/prodia/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
|
|
||||||
models = [
|
|
||||||
'3Guofeng3_v34.safetensors [50f420de]',
|
|
||||||
'absolutereality_V16.safetensors [37db0fc3]',
|
|
||||||
default_model,
|
|
||||||
'amIReal_V41.safetensors [0a8a2e61]',
|
|
||||||
'analog-diffusion-1.0.ckpt [9ca13f02]',
|
|
||||||
'aniverse_v30.safetensors [579e6f85]',
|
|
||||||
'anythingv3_0-pruned.ckpt [2700c435]',
|
|
||||||
'anything-v4.5-pruned.ckpt [65745d25]',
|
|
||||||
'anythingV5_PrtRE.safetensors [893e49b9]',
|
|
||||||
'AOM3A3_orangemixs.safetensors [9600da17]',
|
|
||||||
'blazing_drive_v10g.safetensors [ca1c1eab]',
|
|
||||||
'breakdomain_I2428.safetensors [43cc7d2f]',
|
|
||||||
'breakdomain_M2150.safetensors [15f7afca]',
|
|
||||||
'cetusMix_Version35.safetensors [de2f2560]',
|
|
||||||
'childrensStories_v13D.safetensors [9dfaabcb]',
|
|
||||||
'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
|
|
||||||
'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
|
|
||||||
'Counterfeit_v30.safetensors [9e2a8f19]',
|
|
||||||
'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
|
|
||||||
'cyberrealistic_v33.safetensors [82b0d085]',
|
|
||||||
'dalcefo_v4.safetensors [425952fe]',
|
|
||||||
'deliberate_v2.safetensors [10ec4b29]',
|
|
||||||
'deliberate_v3.safetensors [afd9d2d4]',
|
|
||||||
'dreamlike-anime-1.0.safetensors [4520e090]',
|
|
||||||
'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
|
|
||||||
'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
|
|
||||||
'dreamshaper_6BakedVae.safetensors [114c8abb]',
|
|
||||||
'dreamshaper_7.safetensors [5cf5ae06]',
|
|
||||||
'dreamshaper_8.safetensors [9d40847d]',
|
|
||||||
'edgeOfRealism_eorV20.safetensors [3ed5de15]',
|
|
||||||
'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
|
|
||||||
'elldreths-vivid-mix.safetensors [342d9d26]',
|
|
||||||
'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]',
|
|
||||||
'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
|
|
||||||
'epicrealism_pureEvolutionV3.safetensors [42c8440c]',
|
|
||||||
'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
|
|
||||||
'indigoFurryMix_v75Hybrid.safetensors [91208cbb]',
|
|
||||||
'juggernaut_aftermath.safetensors [5e20c455]',
|
|
||||||
'lofi_v4.safetensors [ccc204d6]',
|
|
||||||
'lyriel_v16.safetensors [68fceea2]',
|
|
||||||
'majicmixRealistic_v4.safetensors [29d0de58]',
|
|
||||||
'mechamix_v10.safetensors [ee685731]',
|
|
||||||
'meinamix_meinaV9.safetensors [2ec66ab0]',
|
|
||||||
'meinamix_meinaV11.safetensors [b56ce717]',
|
|
||||||
'neverendingDream_v122.safetensors [f964ceeb]',
|
|
||||||
'openjourney_V4.ckpt [ca2f377f]',
|
|
||||||
'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
|
|
||||||
'portraitplus_V1.0.safetensors [1400e684]',
|
|
||||||
'protogenx34.safetensors [5896f8d5]',
|
|
||||||
'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
|
|
||||||
'Realistic_Vision_V2.0.safetensors [79587710]',
|
|
||||||
'Realistic_Vision_V4.0.safetensors [29a7afaa]',
|
|
||||||
'Realistic_Vision_V5.0.safetensors [614d1063]',
|
|
||||||
'Realistic_Vision_V5.1.safetensors [a0f13c83]',
|
|
||||||
'redshift_diffusion-V10.safetensors [1400e684]',
|
|
||||||
'revAnimated_v122.safetensors [3f4fefd9]',
|
|
||||||
'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
|
|
||||||
'rundiffusionFX_v10.safetensors [cd4e694d]',
|
|
||||||
'sdv1_4.ckpt [7460a6fa]',
|
|
||||||
'v1-5-pruned-emaonly.safetensors [d7049739]',
|
|
||||||
'v1-5-inpainting.safetensors [21c7ab71]',
|
|
||||||
'shoninsBeautiful_v10.safetensors [25d8c546]',
|
|
||||||
'theallys-mix-ii-churned.safetensors [5d9225a4]',
|
|
||||||
'timeless-1.0.ckpt [7c4971d4]',
|
|
||||||
'toonyou_beta6.safetensors [980f6b15]',
|
|
||||||
]
|
|
||||||
|
|
||||||
model_aliases = {}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
steps: str = 25, # Min: 1, Max: 30
|
|
||||||
cfg_scale: str = 7, # Min: 0, Max: 20
|
|
||||||
sampler: str = "DPM++ 2M Karras", # Select from these: "Euler","Euler a","Heun","DPM++ 2M Karras","DPM++ SDE Karras","DDIM"
|
|
||||||
negative_prompt: str = "", # Indicates what the AI should not do
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": "prodia",
|
|
||||||
"response": response,
|
|
||||||
"data": {
|
|
||||||
"model": model,
|
|
||||||
"steps": steps,
|
|
||||||
"cfg_scale": cfg_scale,
|
|
||||||
"sampler": sampler,
|
|
||||||
"negative_prompt": negative_prompt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_') # Remove leading underscores
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,86 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ..helper import format_prompt
|
|
||||||
|
|
||||||
class NexraQwen(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Qwen"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/qwen/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
|
|
||||||
working = True
|
|
||||||
supports_stream = True
|
|
||||||
|
|
||||||
default_model = 'qwen'
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool,
|
|
||||||
proxy: str = None,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": format_prompt(messages)
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"stream": stream,
|
|
||||||
"markdown": markdown,
|
|
||||||
"model": model
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream)
|
|
||||||
|
|
||||||
if stream:
|
|
||||||
return cls.process_streaming_response(response)
|
|
||||||
else:
|
|
||||||
return cls.process_non_streaming_response(response)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_non_streaming_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.lstrip('')
|
|
||||||
data = json.loads(content)
|
|
||||||
return data.get('message', '')
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return "Error: Unable to decode JSON response"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_streaming_response(cls, response):
|
|
||||||
full_message = ""
|
|
||||||
for line in response.iter_lines(decode_unicode=True):
|
|
||||||
if line:
|
|
||||||
try:
|
|
||||||
line = line.lstrip('')
|
|
||||||
data = json.loads(line)
|
|
||||||
if data.get('finish'):
|
|
||||||
break
|
|
||||||
message = data.get('message', '')
|
|
||||||
if message is not None and message != full_message:
|
|
||||||
yield message[len(full_message):]
|
|
||||||
full_message = message
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
pass
|
|
||||||
|
|
@ -1,72 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraSD15(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Stable Diffusion 1.5"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = 'stablediffusion-1.5'
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
model_aliases = {
|
|
||||||
"sd-1.5": "stablediffusion-1.5",
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_')
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,69 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraSDLora(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Stable Diffusion Lora"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = "sdxl-lora"
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
guidance: str = 0.3, # Min: 0, Max: 5
|
|
||||||
steps: str = 2, # Min: 2, Max: 10
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response,
|
|
||||||
"data": {
|
|
||||||
"guidance": guidance,
|
|
||||||
"steps": steps
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_')
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,69 +0,0 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
from ...typing import CreateResult, Messages
|
|
||||||
from ..base_provider import ProviderModelMixin, AbstractProvider
|
|
||||||
from ...image import ImageResponse
|
|
||||||
|
|
||||||
class NexraSDTurbo(AbstractProvider, ProviderModelMixin):
|
|
||||||
label = "Nexra Stable Diffusion Turbo"
|
|
||||||
url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
|
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
|
|
||||||
working = True
|
|
||||||
|
|
||||||
default_model = "sdxl-turbo"
|
|
||||||
models = [default_model]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_completion(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
proxy: str = None,
|
|
||||||
response: str = "url", # base64 or url
|
|
||||||
strength: str = 0.7, # Min: 0, Max: 1
|
|
||||||
steps: str = 2, # Min: 1, Max: 10
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
model = cls.get_model(model)
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"prompt": messages[-1]["content"],
|
|
||||||
"model": model,
|
|
||||||
"response": response,
|
|
||||||
"data": {
|
|
||||||
"strength": strength,
|
|
||||||
"steps": steps
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(cls.api_endpoint, headers=headers, json=data)
|
|
||||||
|
|
||||||
result = cls.process_response(response)
|
|
||||||
yield result
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process_response(cls, response):
|
|
||||||
if response.status_code == 200:
|
|
||||||
try:
|
|
||||||
content = response.text.strip()
|
|
||||||
content = content.lstrip('_') # Remove the leading underscore
|
|
||||||
data = json.loads(content)
|
|
||||||
if data.get('status') and data.get('images'):
|
|
||||||
image_url = data['images'][0]
|
|
||||||
return ImageResponse(images=[image_url], alt="Generated Image")
|
|
||||||
else:
|
|
||||||
return "Error: No image URL found in the response"
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
return f"Error: Unable to decode JSON response. Details: {str(e)}"
|
|
||||||
else:
|
|
||||||
return f"Error: {response.status_code}, Response: {response.text}"
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
from .NexraBing import NexraBing
|
|
||||||
from .NexraBlackbox import NexraBlackbox
|
|
||||||
from .NexraChatGPT import NexraChatGPT
|
|
||||||
from .NexraDallE import NexraDallE
|
|
||||||
from .NexraDallE2 import NexraDallE2
|
|
||||||
from .NexraEmi import NexraEmi
|
|
||||||
from .NexraFluxPro import NexraFluxPro
|
|
||||||
from .NexraGeminiPro import NexraGeminiPro
|
|
||||||
from .NexraMidjourney import NexraMidjourney
|
|
||||||
from .NexraProdiaAI import NexraProdiaAI
|
|
||||||
from .NexraQwen import NexraQwen
|
|
||||||
from .NexraSD15 import NexraSD15
|
|
||||||
from .NexraSDLora import NexraSDLora
|
|
||||||
from .NexraSDTurbo import NexraSDTurbo
|
|
||||||
112
g4f/models.py
112
g4f/models.py
|
|
@ -32,19 +32,6 @@ from .Provider import (
|
||||||
Liaobots,
|
Liaobots,
|
||||||
MagickPen,
|
MagickPen,
|
||||||
MetaAI,
|
MetaAI,
|
||||||
NexraBing,
|
|
||||||
NexraBlackbox,
|
|
||||||
NexraChatGPT,
|
|
||||||
NexraDallE,
|
|
||||||
NexraDallE2,
|
|
||||||
NexraEmi,
|
|
||||||
NexraFluxPro,
|
|
||||||
NexraGeminiPro,
|
|
||||||
NexraMidjourney,
|
|
||||||
NexraQwen,
|
|
||||||
NexraSD15,
|
|
||||||
NexraSDLora,
|
|
||||||
NexraSDTurbo,
|
|
||||||
OpenaiChat,
|
OpenaiChat,
|
||||||
PerplexityLabs,
|
PerplexityLabs,
|
||||||
Pi,
|
Pi,
|
||||||
|
|
@ -107,25 +94,18 @@ default = Model(
|
||||||
############
|
############
|
||||||
|
|
||||||
### OpenAI ###
|
### OpenAI ###
|
||||||
# gpt-3
|
|
||||||
gpt_3 = Model(
|
|
||||||
name = 'gpt-3',
|
|
||||||
base_provider = 'OpenAI',
|
|
||||||
best_provider = NexraChatGPT
|
|
||||||
)
|
|
||||||
|
|
||||||
# gpt-3.5
|
# gpt-3.5
|
||||||
gpt_35_turbo = Model(
|
gpt_35_turbo = Model(
|
||||||
name = 'gpt-3.5-turbo',
|
name = 'gpt-3.5-turbo',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([DarkAI, NexraChatGPT, Airforce, Liaobots, Allyfy])
|
best_provider = IterListProvider([DarkAI, Airforce, Liaobots, Allyfy])
|
||||||
)
|
)
|
||||||
|
|
||||||
# gpt-4
|
# gpt-4
|
||||||
gpt_4o = Model(
|
gpt_4o = Model(
|
||||||
name = 'gpt-4o',
|
name = 'gpt-4o',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([Blackbox, ChatGptEs, DarkAI, NexraChatGPT, Airforce, ChatGpt, Liaobots, OpenaiChat])
|
best_provider = IterListProvider([Blackbox, ChatGptEs, DarkAI, Airforce, ChatGpt, Liaobots, OpenaiChat])
|
||||||
)
|
)
|
||||||
|
|
||||||
gpt_4o_mini = Model(
|
gpt_4o_mini = Model(
|
||||||
|
|
@ -143,7 +123,7 @@ gpt_4_turbo = Model(
|
||||||
gpt_4 = Model(
|
gpt_4 = Model(
|
||||||
name = 'gpt-4',
|
name = 'gpt-4',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraBing, NexraChatGPT, ChatGpt, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
|
best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, ChatGpt, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
|
||||||
)
|
)
|
||||||
|
|
||||||
# o1
|
# o1
|
||||||
|
|
@ -342,7 +322,7 @@ phi_3_5_mini = Model(
|
||||||
gemini_pro = Model(
|
gemini_pro = Model(
|
||||||
name = 'gemini-pro',
|
name = 'gemini-pro',
|
||||||
base_provider = 'Google DeepMind',
|
base_provider = 'Google DeepMind',
|
||||||
best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, FreeGpt, NexraGeminiPro, Airforce, Liaobots])
|
best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, FreeGpt, Airforce, Liaobots])
|
||||||
)
|
)
|
||||||
|
|
||||||
gemini_flash = Model(
|
gemini_flash = Model(
|
||||||
|
|
@ -430,7 +410,7 @@ reka_core = Model(
|
||||||
blackboxai = Model(
|
blackboxai = Model(
|
||||||
name = 'blackboxai',
|
name = 'blackboxai',
|
||||||
base_provider = 'Blackbox AI',
|
base_provider = 'Blackbox AI',
|
||||||
best_provider = IterListProvider([Blackbox, NexraBlackbox])
|
best_provider = Blackbox
|
||||||
)
|
)
|
||||||
|
|
||||||
blackboxai_pro = Model(
|
blackboxai_pro = Model(
|
||||||
|
|
@ -501,12 +481,6 @@ qwen_2_5_72b = Model(
|
||||||
best_provider = Airforce
|
best_provider = Airforce
|
||||||
)
|
)
|
||||||
|
|
||||||
qwen = Model(
|
|
||||||
name = 'qwen',
|
|
||||||
base_provider = 'Qwen',
|
|
||||||
best_provider = NexraQwen
|
|
||||||
)
|
|
||||||
|
|
||||||
### Upstage ###
|
### Upstage ###
|
||||||
solar_10_7b = Model(
|
solar_10_7b = Model(
|
||||||
name = 'solar-10-7b',
|
name = 'solar-10-7b',
|
||||||
|
|
@ -683,20 +657,6 @@ zephyr_7b = Model(
|
||||||
#############
|
#############
|
||||||
|
|
||||||
### Stability AI ###
|
### Stability AI ###
|
||||||
sdxl_turbo = Model(
|
|
||||||
name = 'sdxl-turbo',
|
|
||||||
base_provider = 'Stability AI',
|
|
||||||
best_provider = NexraSDTurbo
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
sdxl_lora = Model(
|
|
||||||
name = 'sdxl-lora',
|
|
||||||
base_provider = 'Stability AI',
|
|
||||||
best_provider = NexraSDLora
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
sdxl = Model(
|
sdxl = Model(
|
||||||
name = 'sdxl',
|
name = 'sdxl',
|
||||||
base_provider = 'Stability AI',
|
base_provider = 'Stability AI',
|
||||||
|
|
@ -704,13 +664,6 @@ sdxl = Model(
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
sd_1_5 = Model(
|
|
||||||
name = 'sd-1.5',
|
|
||||||
base_provider = 'Stability AI',
|
|
||||||
best_provider = IterListProvider([NexraSD15])
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
sd_3 = Model(
|
sd_3 = Model(
|
||||||
name = 'sd-3',
|
name = 'sd-3',
|
||||||
base_provider = 'Stability AI',
|
base_provider = 'Stability AI',
|
||||||
|
|
@ -735,13 +688,6 @@ flux = Model(
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
flux_pro = Model(
|
|
||||||
name = 'flux-pro',
|
|
||||||
base_provider = 'Flux AI',
|
|
||||||
best_provider = IterListProvider([NexraFluxPro])
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
flux_realism = Model(
|
flux_realism = Model(
|
||||||
name = 'flux-realism',
|
name = 'flux-realism',
|
||||||
base_provider = 'Flux AI',
|
base_provider = 'Flux AI',
|
||||||
|
|
@ -792,37 +738,7 @@ flux_schnell = Model(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
### OpenAI ###
|
|
||||||
dalle_2 = Model(
|
|
||||||
name = 'dalle-2',
|
|
||||||
base_provider = 'OpenAI',
|
|
||||||
best_provider = NexraDallE2
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
dalle = Model(
|
|
||||||
name = 'dalle',
|
|
||||||
base_provider = 'OpenAI',
|
|
||||||
best_provider = NexraDallE
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
### Midjourney ###
|
|
||||||
midjourney = Model(
|
|
||||||
name = 'midjourney',
|
|
||||||
base_provider = 'Midjourney',
|
|
||||||
best_provider = NexraMidjourney
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
### Other ###
|
### Other ###
|
||||||
emi = Model(
|
|
||||||
name = 'emi',
|
|
||||||
base_provider = '',
|
|
||||||
best_provider = NexraEmi
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
any_dark = Model(
|
any_dark = Model(
|
||||||
name = 'any-dark',
|
name = 'any-dark',
|
||||||
base_provider = '',
|
base_provider = '',
|
||||||
|
|
@ -844,9 +760,6 @@ class ModelUtils:
|
||||||
############
|
############
|
||||||
|
|
||||||
### OpenAI ###
|
### OpenAI ###
|
||||||
# gpt-3
|
|
||||||
'gpt-3': gpt_3,
|
|
||||||
|
|
||||||
# gpt-3.5
|
# gpt-3.5
|
||||||
'gpt-3.5-turbo': gpt_35_turbo,
|
'gpt-3.5-turbo': gpt_35_turbo,
|
||||||
|
|
||||||
|
|
@ -959,8 +872,6 @@ class ModelUtils:
|
||||||
|
|
||||||
|
|
||||||
### Qwen ###
|
### Qwen ###
|
||||||
'qwen': qwen,
|
|
||||||
|
|
||||||
# qwen 1.5
|
# qwen 1.5
|
||||||
'qwen-1.5-5b': qwen_1_5_5b,
|
'qwen-1.5-5b': qwen_1_5_5b,
|
||||||
'qwen-1.5-7b': qwen_1_5_7b,
|
'qwen-1.5-7b': qwen_1_5_7b,
|
||||||
|
|
@ -1063,9 +974,6 @@ class ModelUtils:
|
||||||
|
|
||||||
### Stability AI ###
|
### Stability AI ###
|
||||||
'sdxl': sdxl,
|
'sdxl': sdxl,
|
||||||
'sdxl-lora': sdxl_lora,
|
|
||||||
'sdxl-turbo': sdxl_turbo,
|
|
||||||
'sd-1.5': sd_1_5,
|
|
||||||
'sd-3': sd_3,
|
'sd-3': sd_3,
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1075,7 +983,6 @@ class ModelUtils:
|
||||||
|
|
||||||
### Flux AI ###
|
### Flux AI ###
|
||||||
'flux': flux,
|
'flux': flux,
|
||||||
'flux-pro': flux_pro,
|
|
||||||
'flux-realism': flux_realism,
|
'flux-realism': flux_realism,
|
||||||
'flux-anime': flux_anime,
|
'flux-anime': flux_anime,
|
||||||
'flux-3d': flux_3d,
|
'flux-3d': flux_3d,
|
||||||
|
|
@ -1085,16 +992,7 @@ class ModelUtils:
|
||||||
'flux-schnell': flux_schnell,
|
'flux-schnell': flux_schnell,
|
||||||
|
|
||||||
|
|
||||||
### OpenAI ###
|
|
||||||
'dalle': dalle,
|
|
||||||
'dalle-2': dalle_2,
|
|
||||||
|
|
||||||
### Midjourney ###
|
|
||||||
'midjourney': midjourney,
|
|
||||||
|
|
||||||
|
|
||||||
### Other ###
|
### Other ###
|
||||||
'emi': emi,
|
|
||||||
'any-dark': any_dark,
|
'any-dark': any_dark,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue