mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
* feat: add agent CLI, new providers, and update models
- Add a new `agent` mode to the CLI, a feature-rich AI coding assistant with capabilities for file system operations, code execution, git integration, and interactive chat.
- Add new provider `OperaAria` with support for vision, streaming, and conversation history.
- Add new provider `Startnest` with support for `gpt-4o-mini`, vision, and streaming.
- Move providers `FreeGpt` and `Websim` to the `not_working` directory.
- Delete the `OIVSCodeSer2` provider.
- Rename the CLI `client` mode to `chat` and refactor its argument parsing.
- In `g4f/Provider/DeepInfraChat.py`:
- Add a new `api_endpoint` attribute.
- Extensively update and reorganize the `models` list and `model_aliases` dictionary with numerous new models.
- In `g4f/Provider/LambdaChat.py`:
- Change the `default_model` to `deepseek-v3-0324`.
- In `g4f/Provider/Together.py`:
- Update aliases for `llama-3.1-405b`, `deepseek-r1`, and `flux`.
- Add new models including `gemma-3-27b`, `gemma-3n-e4b`, and `qwen-3-32b`.
- In `g4f/models.py`:
- Add new model definitions for `aria`, `deepseek_v3_0324_turbo`, `deepseek_r1_0528_turbo`, and several `gemma` variants.
- Remove the `mixtral_8x22b` model definition.
- Update the `best_provider` lists for `default`, `default_vision`, `gpt_4o_mini`, `gemini-1.5-pro`, `gemini-1.5-flash`, and others to reflect provider changes.
- In `g4f/Provider/__init__.py`:
- Add `OperaAria` and `Startnest` to the list of imported providers.
- Remove `FreeGpt`, `OIVSCodeSer2`, and `Websim` from imports.
- In `requirements.txt`:
- Add `rich` as a new dependency for the agent CLI.
* feat: add gemma-3-4b alias to DeepInfraChat
In g4f/Provider/DeepInfraChat.py, add the gemma-3-4b alias to the model_aliases dictionary.
The new alias points to the google/gemma-3-4b-it model.
* feat: add OIVSCodeSer2 provider
- Create the new provider file .
- The provider supports the model and includes a custom method to generate a .
- Import and include in .
- Add to the for in .
- Add to in .
* feat: add OIVSCodeSer2 provider
- Create the new provider file .
- The provider supports the model and includes a custom method to generate a .
- Import and include in .
- Add to the for in .
- Add to in .
* refactor: Migrate from duckduckgo-search to ddgs library
* Replaced the dependency with the new library.
* In , updated imports from to and to .
* Modified the function in to use an context manager instead of a global instance.
* Updated the function to catch the new exception.
* In , updated the web search import and installation instructions to use .
* Removed unnecessary comments and simplified f-string formatting in and .
* Added and to .
* Added , , and to the extras in .
* test: Update web search tests for ddgs library and cleanup code
* In `etc/unittest/web_search.py`, updated imports from `duckduckgo_search` to `ddgs` and `DDGSError`.
* Added an import for `MissingRequirementsError` in `etc/unittest/web_search.py`.
* Modified exception handling in web search tests to catch both `DDGSError` and `MissingRequirementsError`.
* Removed temporary modification comments in `g4f/cli/agent/agent.py` and `g4f/tools/web_search.py`.
* fix: remove unstable CLI feature due to critical errors
- Remove the experimental CLI coding assistant feature due to multiple stability issues and critical errors in production environments
- Delete the entire `g4f/cli/agent/` directory and all related functionality
- Remove `rich` dependency from `requirements.txt` as it was only used by the removed feature
- Remove `rich` from the `all` extras in `setup.py`
- Revert CLI mode naming from `chat` back to `client` for consistency
- Clean up argument parsing in CLI to remove references to the removed functionality
- Remove installation instructions and imports related to the unstable feature from documentation
This removal is necessary due to:
- Unpredictable behavior causing data loss risks
- Incompatibility with certain system configurations
- Security concerns with unrestricted file system access
- Excessive resource consumption in production environments
Note: This feature may be reintroduced in the future with a more stable and secure implementation that addresses the current limitations and safety concerns.
---------
Co-authored-by: kqlio67 <kqlio67@users.noreply.github.com>
167 lines
5.9 KiB
Python
167 lines
5.9 KiB
Python
from __future__ import annotations
|
|
|
|
import random
|
|
from .template import OpenaiTemplate
|
|
from ..errors import ModelNotFoundError
|
|
from .. import debug
|
|
|
|
|
|
class DeepInfraChat(OpenaiTemplate):
|
|
parent = "DeepInfra"
|
|
url = "https://deepinfra.com/chat"
|
|
login_url = "https://deepinfra.com/dash/api_keys"
|
|
api_base = "https://api.deepinfra.com/v1/openai"
|
|
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
working = True
|
|
|
|
default_model = 'deepseek-ai/DeepSeek-V3-0324'
|
|
default_vision_model = 'microsoft/Phi-4-multimodal-instruct'
|
|
vision_models = [
|
|
default_vision_model,
|
|
'meta-llama/Llama-3.2-90B-Vision-Instruct'
|
|
]
|
|
models = [
|
|
# cognitivecomputations
|
|
'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
|
|
'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
|
|
|
|
# deepinfra
|
|
'deepinfra/airoboros-70b',
|
|
|
|
# deepseek-ai
|
|
default_model,
|
|
'deepseek-ai/DeepSeek-V3-0324-Turbo',
|
|
|
|
'deepseek-ai/DeepSeek-R1-0528-Turbo',
|
|
'deepseek-ai/DeepSeek-R1-0528',
|
|
|
|
'deepseek-ai/DeepSeek-Prover-V2-671B',
|
|
|
|
'deepseek-ai/DeepSeek-V3',
|
|
|
|
'deepseek-ai/DeepSeek-R1',
|
|
'deepseek-ai/DeepSeek-R1-Turbo',
|
|
'deepseek-ai/DeepSeek-R1-Distill-Llama-70B',
|
|
'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
|
|
|
|
# google (gemma)
|
|
'google/gemma-1.1-7b-it',
|
|
'google/gemma-2-9b-it',
|
|
'google/gemma-2-27b-it',
|
|
'google/gemma-3-4b-it',
|
|
'google/gemma-3-12b-it',
|
|
'google/gemma-3-27b-it',
|
|
|
|
# google (codegemma)
|
|
'google/codegemma-7b-it',
|
|
|
|
# lizpreciatior
|
|
'lizpreciatior/lzlv_70b_fp16_hf',
|
|
|
|
# meta-llama
|
|
'meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
|
|
'meta-llama/Llama-4-Scout-17B-16E-Instruct',
|
|
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
|
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
|
'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
|
|
|
# microsoft
|
|
'microsoft/phi-4-reasoning-plus',
|
|
'microsoft/phi-4',
|
|
|
|
'microsoft/WizardLM-2-8x22B',
|
|
'microsoft/WizardLM-2-7B',
|
|
|
|
# mistralai
|
|
'mistralai/Mistral-Small-3.1-24B-Instruct-2503',
|
|
|
|
# Qwen
|
|
'Qwen/Qwen3-235B-A22B',
|
|
'Qwen/Qwen3-30B-A3B',
|
|
'Qwen/Qwen3-32B',
|
|
'Qwen/Qwen3-14B',
|
|
'Qwen/QwQ-32B',
|
|
] + vision_models
|
|
|
|
model_aliases = {
|
|
# cognitivecomputations
|
|
"dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
|
"dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
|
|
|
|
# deepinfra
|
|
"airoboros-70b": "deepinfra/airoboros-70b",
|
|
|
|
# deepseek-ai
|
|
"deepseek-prover-v2": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
"deepseek-prover-v2-671b": "deepseek-ai/DeepSeek-Prover-V2-671B",
|
|
"deepseek-r1": ["deepseek-ai/DeepSeek-R1", "deepseek-ai/DeepSeek-R1-0528"],
|
|
"deepseek-r1-0528": "deepseek-ai/DeepSeek-R1-0528",
|
|
"deepseek-r1-0528-turbo": "deepseek-ai/DeepSeek-R1-0528-Turbo",
|
|
"deepseek-r1-distill-llama-70b": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
"deepseek-r1-distill-qwen-32b": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
|
"deepseek-r1-turbo": "deepseek-ai/DeepSeek-R1-Turbo",
|
|
"deepseek-v3": ["deepseek-ai/DeepSeek-V3", "deepseek-ai/DeepSeek-V3-0324"],
|
|
"deepseek-v3-0324": "deepseek-ai/DeepSeek-V3-0324",
|
|
"deepseek-v3-0324-turbo": "deepseek-ai/DeepSeek-V3-0324-Turbo",
|
|
|
|
# google
|
|
"codegemma-7b": "google/codegemma-7b-it",
|
|
"gemma-1.1-7b": "google/gemma-1.1-7b-it",
|
|
"gemma-2-27b": "google/gemma-2-27b-it",
|
|
"gemma-2-9b": "google/gemma-2-9b-it",
|
|
"gemma-3-4b": "google/gemma-3-4b-it",
|
|
"gemma-3-12b": "google/gemma-3-12b-it",
|
|
"gemma-3-27b": "google/gemma-3-27b-it",
|
|
|
|
# lizpreciatior
|
|
"lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
|
|
|
|
# meta-llama
|
|
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
|
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
|
|
"llama-4-maverick": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
"llama-4-scout": "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
|
|
|
# microsoft
|
|
"phi-4": "microsoft/phi-4",
|
|
"phi-4-multimodal": default_vision_model,
|
|
"phi-4-reasoning-plus": "microsoft/phi-4-reasoning-plus",
|
|
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
|
|
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
|
|
|
|
# mistralai
|
|
"mistral-small-3.1-24b": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
|
|
|
# Qwen
|
|
"qwen-3-14b": "Qwen/Qwen3-14B",
|
|
"qwen-3-30b": "Qwen/Qwen3-30B-A3B",
|
|
"qwen-3-32b": "Qwen/Qwen3-32B",
|
|
"qwen-3-235b": "Qwen/Qwen3-235B-A22B",
|
|
"qwq-32b": "Qwen/QwQ-32B",
|
|
}
|
|
|
|
@classmethod
|
|
def get_model(cls, model: str, **kwargs) -> str:
|
|
"""Get the internal model name from the user-provided model name."""
|
|
# kwargs can contain api_key, api_base, etc. but we don't need them for model selection
|
|
if not model:
|
|
return cls.default_model
|
|
|
|
# Check if the model exists directly in our models list
|
|
if model in cls.models:
|
|
return model
|
|
|
|
# Check if there's an alias for this model
|
|
if model in cls.model_aliases:
|
|
alias = cls.model_aliases[model]
|
|
# If the alias is a list, randomly select one of the options
|
|
if isinstance(alias, list):
|
|
import random
|
|
selected_model = random.choice(alias)
|
|
debug.log(f"DeepInfraChat: Selected model '{selected_model}' from alias '{model}'")
|
|
return selected_model
|
|
debug.log(f"DeepInfraChat: Using model '{alias}' for alias '{model}'")
|
|
return alias
|
|
|
|
raise ModelNotFoundError(f"Model {model} not found")
|