Update docs/providers-and-models.md g4f/models.py g4f/Provider/Blackbox.py g4f/Provider/FreeGpt.py g4f/Provider/GizAI.py g4f/Provider/Liaobots.py g4f/Provider/needs_auth/GeminiPro.py

This commit is contained in:
kqlio67 2025-01-03 09:11:48 +02:00
parent 63c68c12e9
commit ee0affd8dc
7 changed files with 87 additions and 43 deletions

View file

@ -22,7 +22,7 @@ This document provides an overview of various AI providers and models, including
|----------|-------------|--------------|---------------|--------|--------|------|------| |----------|-------------|--------------|---------------|--------|--------|------|------|
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`phi-2, openchat-3.5, deepseek-coder, hermes-2-dpo, hermes-2-pro, openhermes-2.5, lfm-40b, german-7b, llama-2-7b, llama-3.1-8b, llama-3.1-70b, neural-7b, zephyr-7b, evil,`|`sdxl, flux-pro, flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌+✔| |[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`phi-2, openchat-3.5, deepseek-coder, hermes-2-dpo, hermes-2-pro, openhermes-2.5, lfm-40b, german-7b, llama-2-7b, llama-3.1-8b, llama-3.1-70b, neural-7b, zephyr-7b, evil,`|`sdxl, flux-pro, flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌+✔|
|[amigochat.io](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|✔|✔|❌|✔|![Error](https://img.shields.io/badge/RateLimit-f48d37)|❌| |[amigochat.io](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|✔|✔|❌|✔|![Error](https://img.shields.io/badge/RateLimit-f48d37)|❌|
|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3.3-70b, mixtral-7b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo`|`flux`|`blackboxai, gpt-4o, gemini-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-1.5-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3.3-70b, mixtral-7b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo`|`flux`|`blackboxai, gpt-4o, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.BlackboxCreateAgent`|`llama-3.1-70b`|`flux`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.BlackboxCreateAgent`|`llama-3.1-70b`|`flux`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|✔|❌|❌|✔|![Error](https://img.shields.io/badge/HTTPError-f48d37)|❌| |[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|✔|❌|❌|✔|![Error](https://img.shields.io/badge/HTTPError-f48d37)|❌|
|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
@ -34,9 +34,9 @@ This document provides an overview of various AI providers and models, including
|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, qwq-32b, wizardlm-2-8x22b, qwen-2-72b, qwen-2.5-coder-32b, nemotron-70b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, qwq-32b, wizardlm-2-8x22b, qwen-2-72b, qwen-2.5-coder-32b, nemotron-70b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[black-forest-labs-flux-1-dev.hf.space](https://black-forest-labs-flux-1-dev.hf.space)|`g4f.Provider.Flux`|❌|`flux-dev`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[black-forest-labs-flux-1-dev.hf.space](https://black-forest-labs-flux-1-dev.hf.space)|`g4f.Provider.Flux`|❌|`flux-dev`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[chat10.free2gpt.xyz](https://chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`mistral-7b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[chat10.free2gpt.xyz](https://chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`mistral-7b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`gemini-pro`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`gemini-1.5-pro`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[app.giz.ai/assistant](https://app.giz.ai/assistant)|`g4f.Provider.GizAI`|`gemini-flash`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[app.giz.ai/assistant](https://app.giz.ai/assistant)|`g4f.Provider.GizAI`|`gemini-1.5-flash`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`grok-beta, gpt-4o-mini, gpt-4o, gpt-4, o1-preview, o1-mini, claude-3-opus, claude-3.5-sonnet, claude-3-sonnet, gemini-flash, gemini-pro`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`grok-2, gpt-4o-mini, gpt-4o, gpt-4, o1-preview, o1-mini, claude-3-opus, claude-3.5-sonnet, claude-3-sonnet, gemini-1.5-flash, gemini-1.5-pro, gemini-2.0-flash, gemini-2.0-flash-thinking`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌|
|[mhystical.cc](https://mhystical.cc)|`g4f.Provider.Mhystical`|`gpt-4`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌| |[mhystical.cc](https://mhystical.cc)|`g4f.Provider.Mhystical`|`gpt-4`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌|
|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.3-70b, llama-3.1-8b, llama-3.1-70b, lfm-40b`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌| |[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.3-70b, llama-3.1-8b, llama-3.1-70b, lfm-40b`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌|
|[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌| |[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌|
@ -64,7 +64,7 @@ This document provides an overview of various AI providers and models, including
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|✔|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|✔|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|✔|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔|
|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|`gemini`|`gemini`|`gemini`|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|`gemini`|`gemini`|`gemini`|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔|
|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|`gemini-pro`|❌|`gemini-pro`|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|`gemini-1.5-flash, gemini-1.5-pro, gemini-2.0-flash`|❌|`gemini-1.5-pro`|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔|
|[github.com/copilot](https://github.com/copilot)|`g4f.Provider.GithubCopilot`|✔|❌|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[github.com/copilot](https://github.com/copilot)|`g4f.Provider.GithubCopilot`|✔|❌|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔|
|[glhf.chat](https://glhf.chat)|`g4f.Provider.GlhfChat`|✔|❌|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[glhf.chat](https://glhf.chat)|`g4f.Provider.GlhfChat`|✔|❌|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔|
|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|✔|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|✔|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔|
@ -118,8 +118,10 @@ This document provides an overview of various AI providers and models, including
|phi-2|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/phi-2)| |phi-2|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/phi-2)|
|phi-3.5-mini|Microsoft|2+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)| |phi-3.5-mini|Microsoft|2+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)|
|gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)| |gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
|gemini-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| |gemini-1.5-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-pro|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)| |gemini-1.5-pro|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|gemini-2.0-flash|Google DeepMind|2+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-2.0-flash-thinking|Google DeepMind|1+ Providers|[deepmind.google](https://ai.google.dev/gemini-api/docs/thinking-mode)|
|gemma-2b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)| |gemma-2b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)|
|claude-3-haiku|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)| |claude-3-haiku|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| |claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
@ -140,7 +142,7 @@ This document provides an overview of various AI providers and models, including
|deepseek-coder|DeepSeek|1+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct)| |deepseek-coder|DeepSeek|1+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct)|
|wizardlm-2-8x22b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)| |wizardlm-2-8x22b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)|
|openchat-3.5|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat_3.5)| |openchat-3.5|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat_3.5)|
|grok-beta|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)| |grok-2|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)|
|sonar-online|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |sonar-online|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|sonar-chat|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |sonar-chat|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)|
|nemotron-70b|Nvidia|3+ Providers|[build.nvidia.com](https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct)| |nemotron-70b|Nvidia|3+ Providers|[build.nvidia.com](https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct)|

View file

@ -151,7 +151,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = { model_aliases = {
### chat ### ### chat ###
"gpt-4": "gpt-4o", "gpt-4": "gpt-4o",
"gemini-flash": "gemini-1.5-flash", "gemini-1.5-flash": "gemini-1.5-flash",
"gemini-1.5-pro": "gemini-pro",
"claude-3.5-sonnet": "claude-sonnet-3.5", "claude-3.5-sonnet": "claude-sonnet-3.5",
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.2", "mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.2",

View file

@ -26,7 +26,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True
default_model = 'gemini-pro' default_model = 'gemini-1.5-pro'
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View file

@ -18,7 +18,7 @@ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'chat-gemini-flash' default_model = 'chat-gemini-flash'
models = [default_model] models = [default_model]
model_aliases = {"gemini-flash": "chat-gemini-flash",} model_aliases = {"gemini-1.5-flash": "chat-gemini-flash",}
@classmethod @classmethod
def get_model(cls, model: str) -> str: def get_model(cls, model: str) -> str:

View file

@ -18,8 +18,8 @@ models = {
"tokenLimit": 7800, "tokenLimit": 7800,
"context": "8K", "context": "8K",
}, },
"gpt-4o-2024-08-06": { "gpt-4o-2024-11-20": {
"id": "gpt-4o-2024-08-06", "id": "gpt-4o-2024-11-20",
"name": "GPT-4o", "name": "GPT-4o",
"model": "ChatGPT", "model": "ChatGPT",
"provider": "OpenAI", "provider": "OpenAI",
@ -54,9 +54,9 @@ models = {
"tokenLimit": 100000, "tokenLimit": 100000,
"context": "128K", "context": "128K",
}, },
"grok-beta": { "grok-2": {
"id": "grok-beta", "id": "grok-2",
"name": "Grok-Beta", "name": "Grok-2",
"model": "Grok", "model": "Grok",
"provider": "x.ai", "provider": "x.ai",
"maxLength": 400000, "maxLength": 400000,
@ -99,7 +99,7 @@ models = {
"tokenLimit": 200000, "tokenLimit": 200000,
"context": "200K", "context": "200K",
}, },
"claude-3-opus-20240229-t": { "claude-3-opus-20240229-t": {
"id": "claude-3-opus-20240229-t", "id": "claude-3-opus-20240229-t",
"name": "Claude-3-Opus-T", "name": "Claude-3-Opus-T",
"model": "Claude", "model": "Claude",
@ -109,7 +109,7 @@ models = {
"context": "200K", "context": "200K",
}, },
"claude-3-5-sonnet-20241022-t": { "claude-3-5-sonnet-20241022-t": {
"id": "claude-3-5-sonnet-20241022-t", "id": "claude-3-5-sonnet-20241022-t",
"name": "Claude-3.5-Sonnet-V2-T", "name": "Claude-3.5-Sonnet-V2-T",
"model": "Claude", "model": "Claude",
"provider": "Anthropic", "provider": "Anthropic",
@ -117,6 +117,24 @@ models = {
"tokenLimit": 200000, "tokenLimit": 200000,
"context": "200K", "context": "200K",
}, },
"gemini-2.0-flash-exp": {
"id": "gemini-2.0-flash-exp",
"name": "Gemini-2.0-Flash-Exp",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-2.0-flash-thinking-exp": {
"id": "gemini-2.0-flash-thinking-exp",
"name": "Gemini-2.0-Flash-Thinking-Exp",
"model": "Gemini",
"provider": "Google",
"maxLength": 4000000,
"tokenLimit": 1000000,
"context": "1024K",
},
"gemini-1.5-flash-002": { "gemini-1.5-flash-002": {
"id": "gemini-1.5-flash-002", "id": "gemini-1.5-flash-002",
"name": "Gemini-1.5-Flash-1M", "name": "Gemini-1.5-Flash-1M",
@ -128,29 +146,28 @@ models = {
}, },
"gemini-1.5-pro-002": { "gemini-1.5-pro-002": {
"id": "gemini-1.5-pro-002", "id": "gemini-1.5-pro-002",
"name": "Gemini-1.5-Pro-1M", "name": "Gemini-1.5-Pro-1M",
"model": "Gemini", "model": "Gemini",
"provider": "Google", "provider": "Google",
"maxLength": 4000000, "maxLength": 4000000,
"tokenLimit": 1000000, "tokenLimit": 1000000,
"context": "1024K", "context": "1024K",
} },
} }
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site" url = "https://liaobots.site"
working = True working = True
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True
default_model = "gpt-4o-2024-08-06" default_model = "gpt-4o-2024-11-20"
models = list(models.keys()) models = list(models.keys())
model_aliases = { model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-free", "gpt-4o-mini": "gpt-4o-mini-free",
"gpt-4o": "gpt-4o-2024-08-06", "gpt-4o": default_model,
"gpt-4o-mini": "gpt-4o-mini-2024-07-18", "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gpt-4": "gpt-4o-2024-08-06", "gpt-4": default_model,
"o1-preview": "o1-preview-2024-09-12", "o1-preview": "o1-preview-2024-09-12",
"o1-mini": "o1-mini-2024-09-12", "o1-mini": "o1-mini-2024-09-12",
@ -162,8 +179,10 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
"claude-3-opus": "claude-3-opus-20240229-t", "claude-3-opus": "claude-3-opus-20240229-t",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t", "claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
"gemini-flash": "gemini-1.5-flash-002", "gemini-2.0-flash": "gemini-2.0-flash-exp",
"gemini-pro": "gemini-1.5-pro-002" "gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
"gemini-1.5-flash": "gemini-1.5-flash-002",
"gemini-1.5-pro": "gemini-1.5-pro-002"
} }
_auth_code = "" _auth_code = ""

View file

@ -27,8 +27,10 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = default_model default_vision_model = default_model
fallback_models = [default_model, "gemini-2.0-flash-exp", "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"] fallback_models = [default_model, "gemini-2.0-flash-exp", "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
model_aliases = { model_aliases = {
"gemini-flash": "gemini-1.5-flash", "gemini-1.5-flash": "gemini-1.5-flash",
"gemini-flash": "gemini-1.5-flash-8b", "gemini-1.5-flash": "gemini-1.5-flash-8b",
"gemini-1.5-pro": "gemini-pro",
"gemini-2.0-flash": "gemini-2.0-flash-exp",
} }
@classmethod @classmethod

View file

@ -252,24 +252,38 @@ phi_3_5_mini = Model(
### Google DeepMind ### ### Google DeepMind ###
# gemini # gemini
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots])
)
gemini_flash = Model(
name = 'gemini-flash',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots])
)
gemini = Model( gemini = Model(
name = 'gemini', name = 'gemini',
base_provider = 'Google DeepMind', base_provider = 'Google',
best_provider = Gemini best_provider = Gemini
) )
# gemini-1.5
gemini_1_5_pro = Model(
name = 'gemini-1.5-pro',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots])
)
gemini_1_5_flash = Model(
name = 'gemini-1.5-flash',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([Blackbox, Gemini, GeminiPro, Liaobots])
)
# gemini-2.0
gemini_2_0_flash = Model(
name = 'gemini-2.0-flash',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([GeminiPro, Liaobots])
)
gemini_2_0_flash_thinking = Model(
name = 'gemini-2.0-flash-thinking',
base_provider = 'Google DeepMind',
best_provider = Liaobots
)
# gemma # gemma
gemma_2b = Model( gemma_2b = Model(
name = 'gemma-2b', name = 'gemma-2b',
@ -696,8 +710,14 @@ class ModelUtils:
### Google ### ### Google ###
# gemini # gemini
gemini.name: gemini, gemini.name: gemini,
gemini_pro.name: gemini_pro,
gemini_flash.name: gemini_flash, # gemini-1.5
gemini_1_5_pro.name: gemini_1_5_pro,
gemini_1_5_flash.name: gemini_1_5_flash,
# gemini-2.0
gemini_2_0_flash.name: gemini_2_0_flash,
gemini_2_0_flash_thinking.name: gemini_2_0_flash_thinking,
# gemma # gemma
gemma_2b.name: gemma_2b, gemma_2b.name: gemma_2b,