mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-15 14:51:19 -08:00
Major Provider Updates and Model Support Enhancements (#2467)
* refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. --------- Co-authored-by: kqlio67 <>
This commit is contained in:
parent
5969983d83
commit
a358b28f47
16 changed files with 586 additions and 259 deletions
113
g4f/models.py
113
g4f/models.py
|
|
@ -14,6 +14,7 @@ from .Provider import (
|
|||
Cloudflare,
|
||||
Copilot,
|
||||
CopilotAccount,
|
||||
DarkAI,
|
||||
DDG,
|
||||
DeepInfraChat,
|
||||
Free2GPT,
|
||||
|
|
@ -33,6 +34,7 @@ from .Provider import (
|
|||
PerplexityLabs,
|
||||
Pi,
|
||||
Pizzagpt,
|
||||
PollinationsAI,
|
||||
Reka,
|
||||
ReplicateHome,
|
||||
RubiksAI,
|
||||
|
|
@ -93,20 +95,20 @@ default = Model(
|
|||
gpt_35_turbo = Model(
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = Blackbox
|
||||
best_provider = IterListProvider([Blackbox, ChatGptEs, PollinationsAI, DarkAI])
|
||||
)
|
||||
|
||||
# gpt-4
|
||||
gpt_4o = Model(
|
||||
name = 'gpt-4o',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([Blackbox, ChatGptEs, ChatGpt, AmigoChat, Airforce, Liaobots, OpenaiChat])
|
||||
best_provider = IterListProvider([Blackbox, ChatGptEs, PollinationsAI, DarkAI, ChatGpt, AmigoChat, Airforce, Liaobots, OpenaiChat])
|
||||
)
|
||||
|
||||
gpt_4o_mini = Model(
|
||||
name = 'gpt-4o-mini',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([DDG, ChatGptEs, Pizzagpt, ChatGpt, AmigoChat, Airforce, RubiksAI, MagickPen, Liaobots, OpenaiChat])
|
||||
best_provider = IterListProvider([DDG, Blackbox, ChatGptEs, Pizzagpt, ChatGpt, AmigoChat, Airforce, RubiksAI, MagickPen, Liaobots, OpenaiChat])
|
||||
)
|
||||
|
||||
gpt_4_turbo = Model(
|
||||
|
|
@ -118,7 +120,7 @@ gpt_4_turbo = Model(
|
|||
gpt_4 = Model(
|
||||
name = 'gpt-4',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([DDG, Copilot, OpenaiChat, Liaobots, Airforce])
|
||||
best_provider = IterListProvider([DDG, Blackbox, PollinationsAI, Copilot, OpenaiChat, Liaobots, Airforce])
|
||||
)
|
||||
|
||||
# o1
|
||||
|
|
@ -171,7 +173,7 @@ llama_3_1_8b = Model(
|
|||
llama_3_1_70b = Model(
|
||||
name = "llama-3.1-70b",
|
||||
base_provider = "Meta Llama",
|
||||
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
|
||||
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, PollinationsAI, DarkAI, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
|
||||
)
|
||||
|
||||
llama_3_1_405b = Model(
|
||||
|
|
@ -228,7 +230,13 @@ mistral_tiny = Model(
|
|||
mistral_nemo = Model(
|
||||
name = "mistral-nemo",
|
||||
base_provider = "Mistral",
|
||||
best_provider = IterListProvider([HuggingChat, AmigoChat, HuggingFace])
|
||||
best_provider = IterListProvider([PollinationsAI, HuggingChat, AmigoChat, HuggingFace])
|
||||
)
|
||||
|
||||
mistral_large = Model(
|
||||
name = "mistral-large",
|
||||
base_provider = "Mistral",
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
### NousResearch ###
|
||||
|
|
@ -320,7 +328,7 @@ claude_3_haiku = Model(
|
|||
claude_3_5_sonnet = Model(
|
||||
name = 'claude-3.5-sonnet',
|
||||
base_provider = 'Anthropic',
|
||||
best_provider = IterListProvider([Blackbox, AmigoChat, Liaobots])
|
||||
best_provider = IterListProvider([Blackbox, PollinationsAI, AmigoChat, Liaobots])
|
||||
)
|
||||
|
||||
claude_3_5_haiku = Model(
|
||||
|
|
@ -353,7 +361,7 @@ blackboxai_pro = Model(
|
|||
command_r_plus = Model(
|
||||
name = 'command-r-plus',
|
||||
base_provider = 'CohereForAI',
|
||||
best_provider = IterListProvider([HuggingChat, AmigoChat])
|
||||
best_provider = IterListProvider([PollinationsAI, HuggingChat, AmigoChat])
|
||||
)
|
||||
|
||||
### Qwen ###
|
||||
|
|
@ -381,7 +389,7 @@ qwen_2_5_72b = Model(
|
|||
qwen_2_5_coder_32b = Model(
|
||||
name = 'qwen-2.5-coder-32b',
|
||||
base_provider = 'Qwen',
|
||||
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
||||
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, HuggingChat, HuggingFace])
|
||||
)
|
||||
|
||||
qwq_32b = Model(
|
||||
|
|
@ -431,13 +439,6 @@ wizardlm_2_8x22b = Model(
|
|||
best_provider = DeepInfraChat
|
||||
)
|
||||
|
||||
### Yorickvp ###
|
||||
llava_13b = Model(
|
||||
name = 'llava-13b',
|
||||
base_provider = 'Yorickvp',
|
||||
best_provider = ReplicateHome
|
||||
)
|
||||
|
||||
### OpenChat ###
|
||||
openchat_3_5 = Model(
|
||||
name = 'openchat-3.5',
|
||||
|
|
@ -551,11 +552,18 @@ jamba_mini = Model(
|
|||
best_provider = AmigoChat
|
||||
)
|
||||
|
||||
### llmplayground.net ###
|
||||
any_uncensored = Model(
|
||||
name = 'any-uncensored',
|
||||
base_provider = 'llmplayground.net',
|
||||
best_provider = Airforce
|
||||
### PollinationsAI ###
|
||||
p1 = Model(
|
||||
name = 'p1',
|
||||
base_provider = 'PollinationsAI',
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
### Uncensored AI ###
|
||||
evil = Model(
|
||||
name = 'evil',
|
||||
base_provider = 'Evil Mode - Experimental',
|
||||
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||
)
|
||||
|
||||
#############
|
||||
|
|
@ -588,13 +596,13 @@ playground_v2_5 = ImageModel(
|
|||
flux = ImageModel(
|
||||
name = 'flux',
|
||||
base_provider = 'Flux AI',
|
||||
best_provider = IterListProvider([Blackbox, Airforce])
|
||||
best_provider = IterListProvider([Blackbox, Blackbox2, PollinationsAI, Airforce])
|
||||
)
|
||||
|
||||
flux_pro = ImageModel(
|
||||
name = 'flux-pro',
|
||||
base_provider = 'Flux AI',
|
||||
best_provider = Airforce
|
||||
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||
)
|
||||
|
||||
flux_dev = ImageModel(
|
||||
|
|
@ -606,19 +614,25 @@ flux_dev = ImageModel(
|
|||
flux_realism = ImageModel(
|
||||
name = 'flux-realism',
|
||||
base_provider = 'Flux AI',
|
||||
best_provider = IterListProvider([Airforce, AmigoChat])
|
||||
best_provider = IterListProvider([PollinationsAI, Airforce, AmigoChat])
|
||||
)
|
||||
|
||||
flux_cablyai = Model(
|
||||
name = 'flux-cablyai',
|
||||
base_provider = 'Flux AI',
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
flux_anime = ImageModel(
|
||||
name = 'flux-anime',
|
||||
base_provider = 'Flux AI',
|
||||
best_provider = Airforce
|
||||
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||
)
|
||||
|
||||
flux_3d = ImageModel(
|
||||
name = 'flux-3d',
|
||||
base_provider = 'Flux AI',
|
||||
best_provider = Airforce
|
||||
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||
)
|
||||
|
||||
flux_disney = ImageModel(
|
||||
|
|
@ -653,11 +667,36 @@ recraft_v3 = ImageModel(
|
|||
best_provider = AmigoChat
|
||||
)
|
||||
|
||||
### Midjourney ###
|
||||
midijourney = Model(
|
||||
name = 'midijourney',
|
||||
base_provider = 'Midjourney',
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
### Other ###
|
||||
any_dark = ImageModel(
|
||||
name = 'any-dark',
|
||||
base_provider = 'Other',
|
||||
best_provider = Airforce
|
||||
best_provider = IterListProvider([PollinationsAI, Airforce])
|
||||
)
|
||||
|
||||
turbo = Model(
|
||||
name = 'turbo',
|
||||
base_provider = 'Other',
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
unity = Model(
|
||||
name = 'unity',
|
||||
base_provider = 'Other',
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
rtist = Model(
|
||||
name = 'rtist',
|
||||
base_provider = 'Other',
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
class ModelUtils:
|
||||
|
|
@ -716,6 +755,7 @@ class ModelUtils:
|
|||
'mixtral-8x7b': mixtral_8x7b,
|
||||
'mistral-tiny': mistral_tiny,
|
||||
'mistral-nemo': mistral_nemo,
|
||||
'mistral-large': mistral_large,
|
||||
|
||||
### NousResearch ###
|
||||
'mixtral-8x7b-dpo': mixtral_8x7b_dpo,
|
||||
|
|
@ -778,9 +818,6 @@ class ModelUtils:
|
|||
### Inflection ###
|
||||
'pi': pi,
|
||||
|
||||
### Yorickvp ###
|
||||
'llava-13b': llava_13b,
|
||||
|
||||
### WizardLM ###
|
||||
'wizardlm-2-8x22b': wizardlm_2_8x22b,
|
||||
|
||||
|
|
@ -830,9 +867,12 @@ class ModelUtils:
|
|||
### Gryphe ###
|
||||
'mythomax-13b': mythomax_13b,
|
||||
|
||||
### llmplayground.net ###
|
||||
'any-uncensored': any_uncensored,
|
||||
|
||||
### PollinationsAI ###
|
||||
'p1': p1,
|
||||
|
||||
### Uncensored AI ###
|
||||
'evil': evil,
|
||||
|
||||
#############
|
||||
### Image ###
|
||||
#############
|
||||
|
|
@ -849,6 +889,7 @@ class ModelUtils:
|
|||
'flux-pro': flux_pro,
|
||||
'flux-dev': flux_dev,
|
||||
'flux-realism': flux_realism,
|
||||
'flux-cablyai': flux_cablyai,
|
||||
'flux-anime': flux_anime,
|
||||
'flux-3d': flux_3d,
|
||||
'flux-disney': flux_disney,
|
||||
|
|
@ -861,8 +902,14 @@ class ModelUtils:
|
|||
### Recraft ###
|
||||
'recraft-v3': recraft_v3,
|
||||
|
||||
### Midjourney ###
|
||||
'midijourney': midijourney,
|
||||
|
||||
### Other ###
|
||||
'any-dark': any_dark,
|
||||
'turbo': turbo,
|
||||
'unity': unity,
|
||||
'rtist': rtist,
|
||||
}
|
||||
|
||||
# Create a list of all working models
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue