mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-15 14:51:19 -08:00
Add streaming and system messages support in Airforce
This commit is contained in:
parent
a591c5d409
commit
315a2f2595
7 changed files with 140 additions and 139 deletions
|
|
@ -26,4 +26,9 @@ class TestProviderHasModel(unittest.IsolatedAsyncioTestCase):
|
|||
except (MissingRequirementsError, MissingAuthError):
|
||||
return
|
||||
if self.cache[provider.__name__]:
|
||||
self.assertIn(model, self.cache[provider.__name__], provider.__name__)
|
||||
self.assertIn(model, self.cache[provider.__name__], provider.__name__)
|
||||
|
||||
async def test_all_providers_working(self):
|
||||
for model, providers in __models__.values():
|
||||
for provider in providers:
|
||||
self.assertTrue(provider.working, f"{provider.__name__} in {model.name}")
|
||||
|
|
@ -5,8 +5,10 @@ import requests
|
|||
from aiohttp import ClientSession
|
||||
from typing import List
|
||||
from requests.packages.urllib3.exceptions import InsecureRequestWarning
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..image import ImageResponse
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
from .. import debug
|
||||
|
|
@ -32,7 +34,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
api_endpoint_imagine2 = "https://api.airforce/imagine2"
|
||||
|
||||
working = True
|
||||
supports_stream = False
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
|
|
@ -87,7 +89,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
debug.log(f"Error fetching text models: {e}")
|
||||
|
||||
return cls.models
|
||||
|
||||
|
||||
@classmethod
|
||||
async def check_api_key(cls, api_key: str) -> bool:
|
||||
"""
|
||||
|
|
@ -95,12 +97,11 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"""
|
||||
if not api_key or api_key == "null":
|
||||
return True # No restrictions if no key.
|
||||
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
|
||||
"Accept": "*/*",
|
||||
}
|
||||
|
||||
try:
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(f"https://api.airforce/check?key={api_key}") as response:
|
||||
|
|
@ -195,11 +196,13 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
}
|
||||
full_message = "\n".join([msg['content'] for msg in messages])
|
||||
message_chunks = split_message(full_message, max_length=1000)
|
||||
|
||||
final_messages = []
|
||||
for message in messages:
|
||||
message_chunks = split_message(message["content"], max_length=1000)
|
||||
final_messages.extend([{"role": message["role"], "content": chunk} for chunk in message_chunks])
|
||||
data = {
|
||||
"messages": [{"role": "user", "content": chunk} for chunk in message_chunks],
|
||||
"messages": final_messages,
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
|
|
@ -209,10 +212,9 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
await raise_for_status(response)
|
||||
|
||||
if stream:
|
||||
buffer = [] # Buffer to collect partial responses
|
||||
async for line in response.content:
|
||||
line = line.decode('utf-8').strip()
|
||||
if line.startswith('data: '):
|
||||
|
|
@ -222,12 +224,11 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if 'choices' in chunk and chunk['choices']:
|
||||
delta = chunk['choices'][0].get('delta', {})
|
||||
if 'content' in delta:
|
||||
buffer.append(delta['content'])
|
||||
chunk = cls._filter_response(delta['content'])
|
||||
if chunk:
|
||||
yield chunk
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
# Combine the buffered response and filter it
|
||||
filtered_response = cls._filter_response(''.join(buffer))
|
||||
yield filtered_response
|
||||
else:
|
||||
# Non-streaming response
|
||||
result = await response.json()
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
|||
|
||||
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
|
||||
conversation_url = f"{url}/c/api/conversations"
|
||||
|
||||
|
||||
_access_token: str = None
|
||||
_cookies: CookieJar = None
|
||||
|
||||
|
|
@ -94,20 +94,20 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
|||
) as session:
|
||||
if cls._access_token is not None:
|
||||
cls._cookies = session.cookies.jar
|
||||
if cls._access_token is None:
|
||||
try:
|
||||
url = "https://copilot.microsoft.com/cl/eus-sc/collect"
|
||||
headers = {
|
||||
"Accept": "application/x-clarity-gzip",
|
||||
"referrer": "https://copilot.microsoft.com/onboarding"
|
||||
}
|
||||
response = session.post(url, headers=headers, data=get_clarity())
|
||||
clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"]
|
||||
debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}")
|
||||
except Exception as e:
|
||||
debug.log(f"Copilot: {e}")
|
||||
else:
|
||||
clarity_token = None
|
||||
# if cls._access_token is None:
|
||||
# try:
|
||||
# url = "https://copilot.microsoft.com/cl/eus-sc/collect"
|
||||
# headers = {
|
||||
# "Accept": "application/x-clarity-gzip",
|
||||
# "referrer": "https://copilot.microsoft.com/onboarding"
|
||||
# }
|
||||
# response = session.post(url, headers=headers, data=get_clarity())
|
||||
# clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"]
|
||||
# debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}")
|
||||
# except Exception as e:
|
||||
# debug.log(f"Copilot: {e}")
|
||||
# else:
|
||||
# clarity_token = None
|
||||
response = session.get("https://copilot.microsoft.com/c/api/user")
|
||||
raise_for_status(response)
|
||||
user = response.json().get('firstName')
|
||||
|
|
@ -121,6 +121,14 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
|||
if return_conversation:
|
||||
yield Conversation(conversation_id)
|
||||
prompt = format_prompt(messages)
|
||||
if len(prompt) > 10000:
|
||||
if len(messages) > 6:
|
||||
prompt = format_prompt(messages[:3]+messages[-3:])
|
||||
elif len(messages) > 2:
|
||||
prompt = format_prompt(messages[:2]+messages[-1:])
|
||||
if len(prompt) > 10000:
|
||||
prompt = messages[-1]["content"]
|
||||
debug.log(f"Copilot: Trim messages to: {len(prompt)}")
|
||||
debug.log(f"Copilot: Created conversation: {conversation_id}")
|
||||
else:
|
||||
conversation_id = conversation.conversation_id
|
||||
|
|
@ -138,14 +146,15 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
|||
)
|
||||
raise_for_status(response)
|
||||
uploaded_images.append({"type":"image", "url": response.json().get("url")})
|
||||
break
|
||||
|
||||
wss = session.ws_connect(cls.websocket_url)
|
||||
if clarity_token is not None:
|
||||
wss.send(json.dumps({
|
||||
"event": "challengeResponse",
|
||||
"token": clarity_token,
|
||||
"method":"clarity"
|
||||
}).encode(), CurlWsFlag.TEXT)
|
||||
# if clarity_token is not None:
|
||||
# wss.send(json.dumps({
|
||||
# "event": "challengeResponse",
|
||||
# "token": clarity_token,
|
||||
# "method":"clarity"
|
||||
# }).encode(), CurlWsFlag.TEXT)
|
||||
wss.send(json.dumps({
|
||||
"event": "send",
|
||||
"conversationId": conversation_id,
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@ class arkReq:
|
|||
self.userAgent = userAgent
|
||||
|
||||
def get_har_files():
|
||||
if not os.access(get_cookies_dir(), os.R_OK):
|
||||
raise NoValidHarFileError("har_and_cookies dir is not readable")
|
||||
harPath = []
|
||||
for root, _, files in os.walk(get_cookies_dir()):
|
||||
for file in files:
|
||||
|
|
|
|||
|
|
@ -1028,7 +1028,7 @@ ul {
|
|||
.buttons {
|
||||
align-items: flex-start;
|
||||
flex-wrap: wrap;
|
||||
gap: 15px;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.mobile-sidebar {
|
||||
|
|
|
|||
|
|
@ -120,11 +120,11 @@ class Api:
|
|||
}
|
||||
|
||||
def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str, download_images: bool = True) -> Iterator:
|
||||
def log_handler(text: str):
|
||||
def decorated_log(text: str):
|
||||
debug.logs.append(text)
|
||||
if debug.logging:
|
||||
print(text)
|
||||
debug.log_handler = log_handler
|
||||
debug.log_handler(text)
|
||||
debug.log = decorated_log
|
||||
proxy = os.environ.get("G4F_PROXY")
|
||||
provider = kwargs.get("provider")
|
||||
model, provider_handler = get_model_and_provider(
|
||||
|
|
|
|||
184
g4f/models.py
184
g4f/models.py
|
|
@ -4,7 +4,6 @@ from dataclasses import dataclass
|
|||
|
||||
from .Provider import IterListProvider, ProviderType
|
||||
from .Provider import (
|
||||
AIChatFree,
|
||||
Blackbox,
|
||||
Blackbox2,
|
||||
BingCreateImages,
|
||||
|
|
@ -72,10 +71,10 @@ default = Model(
|
|||
ReplicateHome,
|
||||
Blackbox2,
|
||||
Blackbox,
|
||||
Copilot,
|
||||
Free2GPT,
|
||||
DeepInfraChat,
|
||||
Airforce,
|
||||
ChatGptEs,
|
||||
Cloudflare,
|
||||
Mhystical,
|
||||
PollinationsAI,
|
||||
|
|
@ -256,7 +255,7 @@ phi_3_5_mini = Model(
|
|||
gemini_pro = Model(
|
||||
name = 'gemini-pro',
|
||||
base_provider = 'Google DeepMind',
|
||||
best_provider = IterListProvider([Blackbox, AIChatFree, Gemini, GeminiPro, Liaobots])
|
||||
best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots])
|
||||
)
|
||||
|
||||
gemini_flash = Model(
|
||||
|
|
@ -408,7 +407,6 @@ grok_beta = Model(
|
|||
best_provider = Liaobots
|
||||
)
|
||||
|
||||
|
||||
### Perplexity AI ###
|
||||
sonar_online = Model(
|
||||
name = 'sonar-online',
|
||||
|
|
@ -429,7 +427,6 @@ nemotron_70b = Model(
|
|||
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
||||
)
|
||||
|
||||
|
||||
### Teknium ###
|
||||
openhermes_2_5 = Model(
|
||||
name = 'openhermes-2.5',
|
||||
|
|
@ -466,13 +463,6 @@ neural_7b = Model(
|
|||
best_provider = Airforce
|
||||
)
|
||||
|
||||
### PollinationsAI ###
|
||||
p1 = Model(
|
||||
name = 'p1',
|
||||
base_provider = 'PollinationsAI',
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
### Uncensored AI ###
|
||||
evil = Model(
|
||||
name = 'evil',
|
||||
|
|
@ -529,7 +519,6 @@ playground_v2_5 = ImageModel(
|
|||
best_provider = ReplicateHome
|
||||
)
|
||||
|
||||
|
||||
### Flux AI ###
|
||||
flux = ImageModel(
|
||||
name = 'flux',
|
||||
|
|
@ -629,184 +618,181 @@ class ModelUtils:
|
|||
'gpt-3': gpt_35_turbo,
|
||||
|
||||
# gpt-3.5
|
||||
'gpt-3.5-turbo': gpt_35_turbo,
|
||||
gpt_35_turbo.name: gpt_35_turbo,
|
||||
|
||||
# gpt-4
|
||||
'gpt-4': gpt_4,
|
||||
'gpt-4-turbo': gpt_4_turbo,
|
||||
gpt_4.name: gpt_4,
|
||||
gpt_4_turbo.name: gpt_4_turbo,
|
||||
|
||||
# gpt-4o
|
||||
'gpt-4o': gpt_4o,
|
||||
'gpt-4o-mini': gpt_4o_mini,
|
||||
gpt_4o.name: gpt_4o,
|
||||
gpt_4o_mini.name: gpt_4o_mini,
|
||||
|
||||
# o1
|
||||
'o1-preview': o1_preview,
|
||||
'o1-mini': o1_mini,
|
||||
o1_preview.name: o1_preview,
|
||||
o1_mini.name: o1_mini,
|
||||
|
||||
### Meta ###
|
||||
"meta-ai": meta,
|
||||
meta.name: meta,
|
||||
|
||||
# llama-2
|
||||
'llama-2-7b': llama_2_7b,
|
||||
llama_2_7b.name: llama_2_7b,
|
||||
|
||||
# llama-3
|
||||
'llama-3-8b': llama_3_8b,
|
||||
llama_3_8b.name: llama_3_8b,
|
||||
|
||||
# llama-3.1
|
||||
'llama-3.1-8b': llama_3_1_8b,
|
||||
'llama-3.1-70b': llama_3_1_70b,
|
||||
'llama-3.1-405b': llama_3_1_405b,
|
||||
llama_3_1_8b.name: llama_3_1_8b,
|
||||
llama_3_1_70b.name: llama_3_1_70b,
|
||||
llama_3_1_405b.name: llama_3_1_405b,
|
||||
|
||||
# llama-3.2
|
||||
'llama-3.2-1b': llama_3_2_1b,
|
||||
'llama-3.2-11b': llama_3_2_11b,
|
||||
llama_3_2_1b.name: llama_3_2_1b,
|
||||
llama_3_2_11b.name: llama_3_2_11b,
|
||||
|
||||
# llama-3.3
|
||||
'llama-3.3-70b': llama_3_3_70b,
|
||||
llama_3_3_70b.name: llama_3_3_70b,
|
||||
|
||||
### Mistral ###
|
||||
'mixtral-8x7b': mixtral_8x7b,
|
||||
'mistral-nemo': mistral_nemo,
|
||||
'mistral-large': mistral_large,
|
||||
mixtral_8x7b.name: mixtral_8x7b,
|
||||
mistral_nemo.name: mistral_nemo,
|
||||
mistral_large.name: mistral_large,
|
||||
|
||||
### NousResearch ###
|
||||
'hermes-2-dpo': hermes_2_dpo,
|
||||
'hermes-2-pro': hermes_2_pro,
|
||||
'hermes-3': hermes_3,
|
||||
hermes_2_dpo.name: hermes_2_dpo,
|
||||
hermes_2_pro.name: hermes_2_pro,
|
||||
hermes_3.name: hermes_3,
|
||||
|
||||
### Microsoft ###
|
||||
'phi-2': phi_2,
|
||||
'phi-3.5-mini': phi_3_5_mini,
|
||||
phi_2.name: phi_2,
|
||||
phi_3_5_mini.name: phi_3_5_mini,
|
||||
|
||||
### Google ###
|
||||
# gemini
|
||||
'gemini': gemini,
|
||||
'gemini-pro': gemini_pro,
|
||||
'gemini-flash': gemini_flash,
|
||||
gemini.name: gemini,
|
||||
gemini_pro.name: gemini_pro,
|
||||
gemini_flash.name: gemini_flash,
|
||||
|
||||
# gemma
|
||||
'gemma-2b': gemma_2b,
|
||||
gemma_2b.name: gemma_2b,
|
||||
|
||||
### Anthropic ###
|
||||
# claude 3
|
||||
'claude-3-opus': claude_3_opus,
|
||||
'claude-3-sonnet': claude_3_sonnet,
|
||||
'claude-3-haiku': claude_3_haiku,
|
||||
claude_3_opus.name: claude_3_opus,
|
||||
claude_3_sonnet.name: claude_3_sonnet,
|
||||
claude_3_haiku.name: claude_3_haiku,
|
||||
|
||||
# claude 3.5
|
||||
'claude-3.5-sonnet': claude_3_5_sonnet,
|
||||
claude_3_5_sonnet.name: claude_3_5_sonnet,
|
||||
|
||||
### Reka AI ###
|
||||
'reka-core': reka_core,
|
||||
reka_core.name: reka_core,
|
||||
|
||||
### Blackbox AI ###
|
||||
'blackboxai': blackboxai,
|
||||
'blackboxai-pro': blackboxai_pro,
|
||||
blackboxai.name: blackboxai,
|
||||
blackboxai_pro.name: blackboxai_pro,
|
||||
|
||||
### CohereForAI ###
|
||||
'command-r+': command_r_plus,
|
||||
'command-r': command_r,
|
||||
command_r_plus.name: command_r_plus,
|
||||
command_r.name: command_r,
|
||||
|
||||
### GigaChat ###
|
||||
'gigachat': gigachat,
|
||||
gigachat.name: gigachat,
|
||||
|
||||
### Qwen ###
|
||||
# qwen 1_5
|
||||
'qwen-1.5-7b': qwen_1_5_7b,
|
||||
qwen_1_5_7b.name: qwen_1_5_7b,
|
||||
|
||||
# qwen 2
|
||||
'qwen-2-72b': qwen_2_72b,
|
||||
qwen_2_72b.name: qwen_2_72b,
|
||||
|
||||
# qwen 2.5
|
||||
'qwen-2.5-72b': qwen_2_5_72b,
|
||||
'qwen-2.5-coder-32b': qwen_2_5_coder_32b,
|
||||
'qwq-32b': qwq_32b,
|
||||
qwen_2_5_72b.name: qwen_2_5_72b,
|
||||
qwen_2_5_coder_32b.name: qwen_2_5_coder_32b,
|
||||
qwq_32b.name: qwq_32b,
|
||||
|
||||
### Inflection ###
|
||||
'pi': pi,
|
||||
pi.name: pi,
|
||||
|
||||
### WizardLM ###
|
||||
'wizardlm-2-8x22b': wizardlm_2_8x22b,
|
||||
wizardlm_2_8x22b.name: wizardlm_2_8x22b,
|
||||
|
||||
### OpenChat ###
|
||||
'openchat-3.5': openchat_3_5,
|
||||
openchat_3_5.name: openchat_3_5,
|
||||
|
||||
### x.ai ###
|
||||
'grok-beta': grok_beta,
|
||||
grok_beta.name: grok_beta,
|
||||
|
||||
### Perplexity AI ###
|
||||
'sonar-online': sonar_online,
|
||||
'sonar-chat': sonar_chat,
|
||||
sonar_online.name: sonar_online,
|
||||
sonar_chat.name: sonar_chat,
|
||||
|
||||
### DeepSeek ###
|
||||
'deepseek-coder': deepseek_coder,
|
||||
deepseek_coder.name: deepseek_coder,
|
||||
|
||||
### TheBloke ###
|
||||
'german-7b': german_7b,
|
||||
german_7b.name: german_7b,
|
||||
|
||||
### Nvidia ###
|
||||
'nemotron-70b': nemotron_70b,
|
||||
nemotron_70b.name: nemotron_70b,
|
||||
|
||||
### Teknium ###
|
||||
'openhermes-2.5': openhermes_2_5,
|
||||
openhermes_2_5.name: openhermes_2_5,
|
||||
|
||||
### Liquid ###
|
||||
'lfm-40b': lfm_40b,
|
||||
lfm_40b.name: lfm_40b,
|
||||
|
||||
### HuggingFaceH4 ###
|
||||
'zephyr-7b': zephyr_7b,
|
||||
zephyr_7b.name: zephyr_7b,
|
||||
|
||||
### Inferless ###
|
||||
'neural-7b': neural_7b,
|
||||
|
||||
### PollinationsAI ###
|
||||
'p1': p1,
|
||||
neural_7b.name: neural_7b,
|
||||
|
||||
### Uncensored AI ###
|
||||
'evil': evil,
|
||||
evil.name: evil,
|
||||
|
||||
### Other ###
|
||||
'midijourney': midijourney,
|
||||
'turbo': turbo,
|
||||
'unity': unity,
|
||||
'rtist': rtist,
|
||||
midijourney.name: midijourney,
|
||||
turbo.name: turbo,
|
||||
unity.name: unity,
|
||||
rtist.name: rtist,
|
||||
|
||||
#############
|
||||
### Image ###
|
||||
#############
|
||||
|
||||
### Stability AI ###
|
||||
'sdxl': sdxl,
|
||||
'sd-3': sd_3,
|
||||
sdxl.name: sdxl,
|
||||
sd_3.name: sd_3,
|
||||
|
||||
### Playground ###
|
||||
'playground-v2.5': playground_v2_5,
|
||||
playground_v2_5.name: playground_v2_5,
|
||||
|
||||
### Flux AI ###
|
||||
'flux': flux,
|
||||
'flux-pro': flux_pro,
|
||||
'flux-dev': flux_dev,
|
||||
'flux-realism': flux_realism,
|
||||
'flux-cablyai': flux_cablyai,
|
||||
'flux-anime': flux_anime,
|
||||
'flux-3d': flux_3d,
|
||||
'flux-disney': flux_disney,
|
||||
'flux-pixel': flux_pixel,
|
||||
'flux-4o': flux_4o,
|
||||
flux.name: flux,
|
||||
flux_pro.name: flux_pro,
|
||||
flux_dev.name: flux_dev,
|
||||
flux_realism.name: flux_realism,
|
||||
flux_cablyai.name: flux_cablyai,
|
||||
flux_anime.name: flux_anime,
|
||||
flux_3d.name: flux_3d,
|
||||
flux_disney.name: flux_disney,
|
||||
flux_pixel.name: flux_pixel,
|
||||
flux_4o.name: flux_4o,
|
||||
|
||||
### OpenAI ###
|
||||
'dall-e-3': dall_e_3,
|
||||
dall_e_3.name: dall_e_3,
|
||||
|
||||
### Midjourney ###
|
||||
'midjourney': midjourney,
|
||||
midjourney.name: midjourney,
|
||||
|
||||
### Other ###
|
||||
'any-dark': any_dark,
|
||||
any_dark.name: any_dark,
|
||||
}
|
||||
|
||||
# Create a list of all working models
|
||||
__models__ = {model.name: (model, providers) for model, providers in [
|
||||
(model, [provider for provider in providers if provider.working])
|
||||
# Create a list of all models and his providers
|
||||
__models__ = {
|
||||
model.name: (model, providers)
|
||||
for model, providers in [
|
||||
(model, model.best_provider.providers
|
||||
if isinstance(model.best_provider, IterListProvider)
|
||||
|
|
@ -814,7 +800,5 @@ __models__ = {model.name: (model, providers) for model, providers in [
|
|||
if model.best_provider is not None
|
||||
else [])
|
||||
for model in ModelUtils.convert.values()]
|
||||
] if providers}
|
||||
# Update the ModelUtils.convert with the working models
|
||||
ModelUtils.convert = {model.name: model for model, _ in __models__.values()}
|
||||
_all_models = list(ModelUtils.convert.keys())
|
||||
}
|
||||
_all_models = list(__models__.keys())
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue