mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Update models and providers with improved documentation and code structure (#2786)
* docs(docs/providers-and-models.md): update documentation structure and model listings * refactor(g4f/debug.py): add type hints and docstrings * refactor(g4f/tools/run_tools.py): Restructure tool handling and improve modularity * refactor(g4f/providers/response.py): enhance type hints and code documentation * feat(g4f/models.py): Update model providers and add new models * feat(g4f/Provider/Blackbox.py): add encrypted session handling and model updates * fix(g4f/Provider/ChatGptEs.py): migrate to curl_cffi for request handling and improve error resilience * feat(g4f/Provider/DeepInfraChat.py): Update default model and add new DeepSeek variants * feat(g4f/Provider/Free2GPT.py): add Gemini models and streamline headers * feat(g4f/Provider/FreeGpt.py): Add support for Gemini 1.5 Flash model * feat(g4f/Provider/Liaobots.py): Add Claude 3.7 models and update default GPT-4o * fix(g4f/Provider/PollinationsAI.py): Correct model mappings and generation parameters * feat(g4f/Provider/PollinationsImage.py): Add class identifier label * chore(g4f/Provider/TeachAnything.py): Update default model and simplify model handling * (g4f/Provider/Mhystical.py): Remove class implementation * chore(g4f/Provider/Prodia.py > g4f/Provider/not_working/Prodia.py): mark Prodia provider as non-working * feat(g4f/Provider/Blackbox.py): Add Claude 3.7 Sonnet model alias * chore(g4f/models.py): Update model configurations * fix(g4f/Provider/ChatGptEs.py): improve request reliability and nonce detection --------- Co-authored-by: kqlio67 <>
This commit is contained in:
parent
17a99b684a
commit
c79635aaeb
18 changed files with 778 additions and 548 deletions
|
|
@ -11,7 +11,10 @@ This document provides an overview of various AI providers and models, including
|
|||
## Table of Contents
|
||||
- [Providers](#providers)
|
||||
- [No auth required](#providers-not-needs-auth)
|
||||
- [HuggingFace](#providers-huggingface)
|
||||
- [HuggingSpace](#providers-huggingspace)
|
||||
- [Local](#providers-local)
|
||||
- [MiniMax](#providers-minimax)
|
||||
- [Needs auth](#providers-needs-auth)
|
||||
- [Models](#models)
|
||||
- [Text Models](#text-models)
|
||||
|
|
@ -38,7 +41,7 @@ This document provides an overview of various AI providers and models, including
|
|||
| Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status |
|
||||
|----------|-------------|--------------|---------------|--------|--------|------|------|
|
||||
|[playground.allenai.org](https://playground.allenai.org)|No auth required|`g4f.Provider.AllenAI`|`tulu-3-405b, olmo-2-13b, tulu-3-1-8b, tulu-3-70b, olmoe-0125`|❌|❌|✔||
|
||||
|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gemini-1.5-flash, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-v3, deepseek-r1, gemini-2.0-flash` _**(+35)**_|`flux`|`blackboxai, gpt-4o, o3-mini, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gemini-2.0-flash`|✔||
|
||||
|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gemini-1.5-flash, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-v3, deepseek-r1, gemini-2.0-flash, gpt-4o, o1, o3-mini, gemini-1.5-pro, claude-3.7-sonnet` _**(+29)**_|`flux`|`blackboxai, gpt-4o, o1, o3-mini, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gemini-2.0-flash, deepseek-v3`|✔||
|
||||
|[chatglm.cn](https://chatglm.cn)|No auth required|`g4f.Provider.ChatGLM`|`glm-4`|❌|❌|✔||
|
||||
|[chatgpt.com](https://chatgpt.com)|No auth required|`g4f.Provider.ChatGpt`|✔ _**(+7)**_|❌|❌|✔||
|
||||
|[chatgpt.es](https://chatgpt.es)|No auth required|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔||
|
||||
|
|
@ -46,27 +49,34 @@ This document provides an overview of various AI providers and models, including
|
|||
|[copilot.microsoft.com](https://copilot.microsoft.com)|Optional API key|`g4f.Provider.Copilot`|`gpt-4, gpt-4o`|❌|❌|✔||
|
||||
|[duckduckgo.com/aichat](https://duckduckgo.com/aichat)|No auth required|`g4f.Provider.DDG`|`gpt-4, gpt-4o-mini, llama-3.3-70b, claude-3-haiku, o3-mini, mixtral-small-24b`|❌|❌|✔||
|
||||
|[deepinfra.com/chat](https://deepinfra.com/chat)|No auth required|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.2-90b, llama-3.3-70b, deepseek-v3, mixtral-small-28b, deepseek-r1, phi-4, wizardlm-2-8x22b, qwen-2.5-72b, yi-34b, qwen-2-72b, dolphin-2.6, dolphin-2.9, dbrx-instruct, airoboros-70b, lzlv-70b, wizardlm-2-7b, mixtral-8x22b, minicpm-2.5`|❌|`llama-3.2-90b, minicpm-2.5`|✔||
|
||||
|[chat10.free2gpt.xyz](https://chat10.free2gpt.xyz)|No auth required|`g4f.Provider.Free2GPT`|`mistral-7b`|❌|❌|✔||
|
||||
|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|No auth required|`g4f.Provider.FreeGpt`|`gemini-1.5-pro`|❌|❌|✔||
|
||||
|[chat10.free2gpt.xyz](https://chat10.free2gpt.xyz)|No auth required|`g4f.Provider.Free2GPT`|`gemini-1.5-pro, gemini-1.5-flash`|❌|❌|✔||
|
||||
|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|No auth required|`g4f.Provider.FreeGpt`|`gemini-1.5-pro, gemini-1.5-flash`|❌|❌|✔||
|
||||
|[app.giz.ai/assistant](https://app.giz.ai/assistant)|No auth required|`g4f.Provider.GizAI`|`gemini-1.5-flash`|❌|❌|✔||
|
||||
|[glider.so](https://glider.so)|No auth required|`g4f.Provider.Glider`|`llama-3.1-70b, llama-3.1-8b, llama-3.2-3b, deepseek-r1`|❌|❌|✔||
|
||||
|[hailuo.ai](https://www.hailuo.ai)|No auth required|`g4f.Provider.HailuoAI`|`MiniMax` _**(1)**_|❌|❌|✔||
|
||||
|[editor.imagelabs.net](https://editor.imagelabs.net)|No auth required|`g4f.Provider.ImageLabs`|`gemini-1.5-pro`|❌|❌|✔||
|
||||
|[editor.imagelabs.net](editor.imagelabs.net)|No auth required|`g4f.Provider.ImageLabs`|❌|`sdxl-turbo`|❌|✔||
|
||||
|[huggingface.co/spaces](https://huggingface.co/spaces)|Optional API key|`g4f.Provider.HuggingSpace`|`qvq-72b, qwen-2-72b, command-r, command-r-plus, command-r7b`|`flux-dev, flux-schnell, sd-3.5`|❌|✔||
|
||||
|[jmuz.me](https://jmuz.me)|Optional API key|`g4f.Provider.Jmuz`|`claude-3-haiku, claude-3-opus, claude-3-haiku, claude-3.5-sonnet, deepseek-r1, deepseek-chat, gemini-exp, gemini-1.5-flash, gemini-1.5-pro, gemini-2.0-flash-thinking, gpt-4, gpt-4o, gpt-4o-mini, llama-3-70b, llama-3-8b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llama-3.2-11b, llama-3.2-90b, llama-3.3-70b, mixtral-8x7b, qwen-2.5-72b, qwen-2.5-coder-32b, qwq-32b, wizardlm-2-8x22b`|❌|❌|✔||
|
||||
|[liaobots.work](https://liaobots.work)|[Automatic cookies](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-4o-mini, gpt-4o, gpt-4, o1-preview, deepseek-r1, deepseek-v3, claude-3-opus, claude-3.5-sonnet, claude-3-sonnet, gemini-2.0-flash, gemini-2.0-flash-thinking, grok-3, grok-3-r1, o3-mini`|❌|❌|✔||
|
||||
|[mhystical.cc](https://mhystical.cc)|[Optional API key](https://mhystical.cc/dashboard)|`g4f.Provider.Mhystical`|`gpt-4`|❌|❌|✔||
|
||||
|[liaobots.work](https://liaobots.work)|[Automatic cookies](https://liaobots.work)|`g4f.Provider.Liaobots`|`claude-3.5-sonnet, claude-3.7-sonnet, claude-3.7-sonnet-thinking, claude-3-opus, claude-3-sonnet, deepseek-r1, deepseek-v3, gemini-2.0-flash, gemini-2.0-flash-thinking, gemini-2.0-pro, gpt-4, gpt-4o, gpt-4o-mini, grok-3, grok-3-r1, o3-mini`|❌|❌|✔||
|
||||
|[oi-vscode-server.onrender.com](https://oi-vscode-server.onrender.com)|No auth required|`g4f.Provider.OIVSCode`|`gpt-4o-mini, deepseek-v3`|❌|`gpt-4o-mini`|✔||
|
||||
|[labs.perplexity.ai](https://labs.perplexity.ai)|No auth required|`g4f.Provider.PerplexityLabs`|`sonar, sonar-pro, sonar-reasoning, sonar-reasoning-pro`|❌|❌|✔||
|
||||
|[pi.ai/talk](https://pi.ai/talk)|[Manual cookies](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|✔||
|
||||
|[pizzagpt.it](https://www.pizzagpt.it)|No auth required|`g4f.Provider.Pizzagpt`|`gpt-4o-mini`|❌|❌|✔||
|
||||
|[pollinations.ai](https://pollinations.ai)|No auth required|`g4f.Provider.PollinationsAI`|`gpt-4o-mini, gpt-4o, qwen-2.5-coder-32b, llama-3.3-70b, mistral-nemo, deepseek-chat, llama-3.1-8b, deepseek-r1, gemini-2.0-flash, gemini-2.0-flash-thinking` _**(3+)**_|`flux, flux-pro, flux-dev, flux-schnell, dall-e-3, sdxl-turbo`|gpt-4o, gpt-4o-mini|✔||
|
||||
|[pollinations.ai](https://pollinations.ai)|No auth required|`g4f.Provider.PollinationsAI`|`gpt-4o-mini, gpt-4o, o1-mini, qwen-2.5-coder-32b, llama-3.3-70b, mistral-nemo, llama-3.1-8b, deepseek-r1, phi-4` _**(6+)**_|`flux, flux-pro, flux-dev, flux-schnell, dall-e-3, sdxl-turbo`|gpt-4o, gpt-4o-mini, o1-mini|✔||
|
||||
|[pollinations.ai](https://pollinations.ai)|No auth required|`g4f.Provider.PollinationsImage`|❌|`flux, flux-pro, flux-dev, flux-schnell, dall-e-3, sdxl-turbo`|❌|✔||
|
||||
|[app.prodia.com](https://app.prodia.com)|No auth required|`g4f.Provider.Prodia`|❌|✔ _**(46)**_|❌|❌||
|
||||
|[teach-anything.com](https://www.teach-anything.com)|No auth required|`g4f.Provider.TeachAnything`|`llama-3.1-70b`|❌|❌|✔||
|
||||
|[teach-anything.com](https://www.teach-anything.com)|No auth required|`g4f.Provider.TeachAnything`|`gemini-1.5-pro, gemini-1.5-flash`|❌|❌|✔||
|
||||
|[you.com](https://you.com)|[Manual cookies](https://you.com)|`g4f.Provider.You`|✔|✔|✔|✔||
|
||||
|[chat9.yqcloud.top](https://chat9.yqcloud.top)|No auth required|`g4f.Provider.Yqcloud`|`gpt-4`|✔|✔|✔||
|
||||
|
||||
---
|
||||
### Providers HuggingFace
|
||||
| Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status |
|
||||
|----------|-------------|--------------|---------------|--------|--------|------|------|
|
||||
|[huggingface.co/chat](https://huggingface.co/chat)|[Manual cookies](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`qwen-2.5-72b, llama-3.3-70b, command-r-plus, deepseek-r1, qwq-32b, nemotron-70b, llama-3.2-11b, mistral-nemo, phi-3.5-mini`|`flux-dev, flux-schnell`|❌|✔||
|
||||
|[huggingface.co/chat](https://huggingface.co/chat)|[API key / Cookies](https://huggingface.co/settings/tokens)|`g4f.Provider.HuggingFace`|✔ _**(47+)**_|✔ _**(9+)**_|❌|✔||
|
||||
|[api-inference.huggingface.co](https://api-inference.huggingface.co)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.HuggingFaceAPI`|✔ _**(9+)**_|✔ _**(2+)**_|✔ _**(1+)**_|❌||✔|
|
||||
|
||||
---
|
||||
### Providers HuggingSpace
|
||||
| Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status | Auth |
|
||||
|
|
@ -74,6 +84,7 @@ This document provides an overview of various AI providers and models, including
|
|||
|[black-forest-labs-flux-1-dev.hf.space](https://black-forest-labs-flux-1-dev.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.BlackForestLabsFlux1Dev`|❌|`flux-dev`|❌|✔||
|
||||
|[black-forest-labs-flux-1-schnell.hf.space](https://black-forest-labs-flux-1-schnell.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.BlackForestLabsFlux1Schnell`|❌|`flux-schnell`|❌|✔||
|
||||
|[cohereforai-c4ai-command.hf.space](https://cohereforai-c4ai-command.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.CohereForAI`|`command-r, command-r-plus, command-r7b`|❌|❌|✔||
|
||||
|[roxky-flux-1-dev.hf.space](https://roxky-flux-1-dev.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|❌|❌|✔ _**(3)**_|✔||
|
||||
|[huggingface.co/spaces/deepseek-ai/Janus-Pro-7B](https://huggingface.co/spaces/deepseek-ai/Janus-Pro-7B)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.Janus_Pro_7B`|✔|✔|❌|✔||
|
||||
|[qwen-qvq-72b-preview.hf.space](https://qwen-qvq-72b-preview.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.Qwen_QVQ_72B`|`qvq-72b`|❌|❌|✔||
|
||||
|[qwen-qwen2-5-1m-demo.hf.space](https://qwen-qwen2-5-1m-demo.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.Qwen_Qwen_2_5M_Demo`|`qwen-2.5-1m-demo`|❌|❌|✔||
|
||||
|
|
@ -81,6 +92,19 @@ This document provides an overview of various AI providers and models, including
|
|||
|[stabilityai-stable-diffusion-3-5-large.hf.space](https://stabilityai-stable-diffusion-3-5-large.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.StableDiffusion35Large`|❌|`sd-3.5`|❌|✔||
|
||||
|[voodoohop-flux-1-schnell.hf.space](https://voodoohop-flux-1-schnell.hf.space)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.VoodoohopFlux1Schnell`|❌|`flux-schnell`|❌|✔||
|
||||
|
||||
### Providers Local
|
||||
| Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status |
|
||||
|----------|-------------|--------------|---------------|--------|--------|------|------|
|
||||
|[]( )|No auth required|`g4f.Provider.Local`|✔|❌|❌|✔||
|
||||
|[ollama.com](https://ollama.com)|No auth required|`g4f.Provider.Ollama`|✔|❌|❌|✔||
|
||||
|
||||
---
|
||||
### Providers MiniMax
|
||||
| Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status |
|
||||
|----------|-------------|--------------|---------------|--------|--------|------|------|
|
||||
|[hailuo.ai/chat](https://www.hailuo.ai/chat)|[Get API key](https://intl.minimaxi.com/user-center/basic-information/interface-key)|`g4f.Provider.MiniMax`|`MiniMax` _**(1)**_|❌|❌|✔||
|
||||
|
||||
|
||||
---
|
||||
### Providers Needs Auth
|
||||
| Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status |
|
||||
|
|
@ -98,13 +122,9 @@ This document provides an overview of various AI providers and models, including
|
|||
|[github.com/copilot](https://github.com/copilot)|[Manual cookies](https://github.com/copilot)|`g4f.Provider.GithubCopilot`|✔ _**(4+)**_|❌|❌|❌||
|
||||
|[glhf.chat](https://glhf.chat)|[Get API key](https://glhf.chat/user-settings/api)|`g4f.Provider.GlhfChat`|✔ _**(22+)**_|❌|❌|❌||
|
||||
|[console.groq.com/playground](https://console.groq.com/playground)|[Get API key](https://console.groq.com/keys)|`g4f.Provider.Groq`|✔ _**(18+)**_|❌|✔|❌||
|
||||
|[huggingface.co/chat](https://huggingface.co/chat)|[Manual cookies](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`qwen-2.5-72b, llama-3.3-70b, command-r-plus, deepseek-r1, qwq-32b, nemotron-70b, llama-3.2-11b, mistral-nemo, phi-3.5-mini`|`flux-dev, flux-schnell`|❌|✔||
|
||||
|[huggingface.co/chat](https://huggingface.co/chat)|[API key / Cookies](https://huggingface.co/settings/tokens)|`g4f.Provider.HuggingFace`|✔ _**(47+)**_|✔ _**(9+)**_|❌|✔||
|
||||
|[api-inference.huggingface.co](https://api-inference.huggingface.co)|[Get API key](https://huggingface.co/settings/tokens)|`g4f.Provider.HuggingFaceAPI`|✔ _**(9+)**_|✔ _**(2+)**_|✔ _**(1+)**_|❌||✔|
|
||||
|[meta.ai](https://www.meta.ai)|[Manual cookies](https://www.meta.ai)|`g4f.Provider.MetaAI`|`meta-ai`|❌|❌|✔||✔|
|
||||
|[meta.ai](https://www.meta.ai)|[Manual cookies](https://www.meta.ai)|`g4f.Provider.MetaAIAccount`|❌|`meta-ai`|❌|✔||
|
||||
|[designer.microsoft.com](https://designer.microsoft.com)|[Manual cookies](https://designer.microsoft.com)|`g4f.Provider.MicrosoftDesigner`|❌|`dall-e-3`|❌|❌||
|
||||
|[hailuo.ai/chat](https://www.hailuo.ai/chat)|[Get API key](https://intl.minimaxi.com/user-center/basic-information/interface-key)|`g4f.Provider.MiniMax`|`MiniMax` _**(1)**_|❌|❌|✔||
|
||||
|[platform.openai.com](https://platform.openai.com)|[Get API key](https://platform.openai.com/settings/organization/api-keys)|`g4f.Provider.OpenaiAPI`|✔|❌|❌|✔||
|
||||
|[chatgpt.com](https://chatgpt.com)|[Manual cookies](https://chatgpt.com)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4` _**(8+)**_|✔_**(1)**_|✔_**(8+)**_|✔||
|
||||
|[perplexity.ai](https://www.perplexity.ai)|[Get API key](https://www.perplexity.ai/settings/api)|`g4f.Provider.PerplexityApi`|✔ _**(6+)**_|❌|❌|✔||
|
||||
|
|
@ -121,44 +141,47 @@ This document provides an overview of various AI providers and models, including
|
|||
| Model | Base Provider | Providers | Website |
|
||||
|-------|---------------|-----------|---------|
|
||||
|gpt-4|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)|
|
||||
|gpt-4o|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
|
||||
|gpt-4o|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)|
|
||||
|gpt-4o-mini|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)|
|
||||
|o1|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
|
||||
|o1-preview|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
|
||||
|o3-mini|OpenAI|2+ Providers|[openai.com](https://openai.com/index/openai-o3-mini/)|
|
||||
|o1|OpenAI|2+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)|
|
||||
|o1-mini|OpenAI|1+ Providers|[openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)|
|
||||
|o3-mini|OpenAI|3+ Providers|[openai.com](https://openai.com/index/openai-o3-mini/)|
|
||||
|gigachat|GigaChat|1+ Providers|[developers.sber.ru/gigachat](https://developers.sber.ru/gigachat)|
|
||||
|meta-ai|Meta|1+ Providers|[ai.meta.com](https://ai.meta.com/)|
|
||||
|llama-2-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)|
|
||||
|llama-3-8b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)|
|
||||
|llama-3-70b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Meta-Llama-3-70B)|
|
||||
|llama-3.1-8b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|
||||
|llama-3.1-70b|Meta Llama|5+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|
||||
|llama-3.1-70b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)|
|
||||
|llama-3.1-405b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.1-405B)|
|
||||
|llama-3.2-1b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)|
|
||||
|llama-3.2-3b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-3B)|
|
||||
|llama-3.2-11b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
|
||||
|llama-3.2-90b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision)|
|
||||
|llama-3.3-70b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-3/)|
|
||||
|llama-3.2-90b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision)|
|
||||
|llama-3.3-70b|Meta Llama|7+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-3/)|
|
||||
|mixtral-8x7b|Mistral|1+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
|
||||
|mixtral-8x22b|Mistral|1+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)|
|
||||
|mistral-nemo|Mistral|3+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
|
||||
|mixtral-small-24b|Mistral|1+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Small-24B-Instruct-2501)|
|
||||
|mixtral-small-28b|Mistral|3+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-small-28b/)|
|
||||
|mixtral-small-28b|Mistral|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-small-28b/)|
|
||||
|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|
||||
|phi-3.5-mini|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)|
|
||||
|phi-4|Microsoft|1+ Providers|[techcommunity.microsoft.com](https://techcommunity.microsoft.com/blog/aiplatformblog/introducing-phi-4-microsoft%E2%80%99s-newest-small-language-model-specializing-in-comple/4357090)|
|
||||
|phi-4|Microsoft|2+ Providers|[techcommunity.microsoft.com](https://techcommunity.microsoft.com/blog/aiplatformblog/introducing-phi-4-microsoft%E2%80%99s-newest-small-language-model-specializing-in-comple/4357090)|
|
||||
|wizardlm-2-7b|Microsoft|1+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)|
|
||||
|wizardlm-2-8x22b|Microsoft|2+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)|
|
||||
|gemini-2.0|Google DeepMind|1+|[deepmind.google](http://deepmind.google/technologies/gemini/)|
|
||||
|gemini-2.0|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)|
|
||||
|gemini-exp|Google DeepMind|1+ Providers|[blog.google](https://blog.google/feed/gemini-exp-1206/)|
|
||||
|gemini-1.5-flash|Google DeepMind|3+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|
||||
|gemini-1.5-pro|Google DeepMind|2+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|
||||
|gemini-2.0-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|
||||
|gemini-1.5-flash|Google DeepMind|6+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|
||||
|gemini-1.5-pro|Google DeepMind|6+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|
||||
|gemini-2.0-flash|Google DeepMind|3+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|
||||
|gemini-2.0-flash-thinking|Google DeepMind|1+ Providers|[ai.google.dev](https://ai.google.dev/gemini-api/docs/thinking-mode)|
|
||||
|gemini-2.0-pro|Google DeepMind|1+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash-thinking/)|
|
||||
|claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
|
||||
|claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|
||||
|claude-3-opus|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)|
|
||||
|claude-3.5-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)|
|
||||
|claude-3.7-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/claude/sonnet)|
|
||||
|claude-3.7-sonnet-thinking|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/claude/sonnet)|
|
||||
|reka-core|Reka AI|1+ Providers|[reka.ai](https://www.reka.ai/ourmodels)|
|
||||
|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|
||||
|blackboxai-pro|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)|
|
||||
|
|
@ -168,17 +191,18 @@ This document provides an overview of various AI providers and models, including
|
|||
|qwen-1.5-7b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-7B)|
|
||||
|qwen-2-72b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)|
|
||||
|qwen-2-vl-7b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-VL-7B)|
|
||||
|qwen-2.5-72b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct)|
|
||||
|qwen-2.5-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct)|
|
||||
|qwen-2.5-coder-32b|Qwen|3+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-Coder-32B)|
|
||||
|qwen-2.5-1m-demo|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-1M-Demo)|
|
||||
|qwq-32b|Qwen|3+ Providers|[huggingface.co](https://huggingface.co/Qwen/QwQ-32B-Preview)|
|
||||
|qvq-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/QVQ-72B-Preview)|
|
||||
|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)|
|
||||
|deepseek-chat|DeepSeek|3+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)|
|
||||
|deepseek-chat|DeepSeek|2+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)|
|
||||
|deepseek-v3|DeepSeek|4+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)|
|
||||
|deepseek-r1|DeepSeek|8+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)|
|
||||
|grok-3|x.ai|1+|[x.ai](https://x.ai/blog/grok-3)|
|
||||
|grok-3-r1|x.ai|1+|[x.ai](https://x.ai/blog/grok-3)|
|
||||
|janus-pro-7b|DeepSeek|2+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/docs/janus-pro-7b)|
|
||||
|grok-3|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-3)|
|
||||
|grok-3-r1|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-3)|
|
||||
|sonar|Perplexity AI|1+ Providers|[sonar.perplexity.ai](https://sonar.perplexity.ai/)|
|
||||
|sonar-pro|Perplexity AI|1+ Providers|[sonar.perplexity.ai](https://sonar.perplexity.ai/)|
|
||||
|sonar-reasoning|Perplexity AI|1+ Providers|[sonar.perplexity.ai](https://sonar.perplexity.ai/)|
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import re
|
|||
import json
|
||||
import random
|
||||
import string
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
|
@ -35,32 +36,28 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = "BLACKBOXAI"
|
||||
default_model = "blackboxai"
|
||||
default_vision_model = default_model
|
||||
default_image_model = 'ImageGeneration'
|
||||
|
||||
image_models = [default_image_model]
|
||||
vision_models = [default_vision_model, 'GPT-4o', 'o3-mini', 'Gemini-PRO', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b', 'Gemini-Flash-2.0']
|
||||
|
||||
premium_models = ['GPT-4o', 'o1', 'o3-mini', 'Gemini-PRO', 'Claude-Sonnet-3.5']
|
||||
vision_models = [default_vision_model, 'gpt-4o', 'o1', 'o3-mini', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b', 'gemini-2.0-flash', 'deepseek-v3']
|
||||
|
||||
userSelectedModel = ['DeepSeek-V3', 'DeepSeek-R1', 'BLACKBOXAI-PRO', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0'] + premium_models
|
||||
userSelectedModel = ['gpt-4o', 'o1', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.7', 'deepseek-v3', 'deepseek-r1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'dbrx-instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'gemini-2.0-flash']
|
||||
|
||||
agentMode = {
|
||||
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
|
||||
'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
|
||||
'deepseek-v3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
|
||||
'deepseek-r1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"},
|
||||
'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
|
||||
'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"},
|
||||
'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
|
||||
'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
|
||||
'dbrx-instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
|
||||
'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
|
||||
'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
|
||||
'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
|
||||
'gemini-2.0-flash': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"},
|
||||
}
|
||||
|
||||
trendingAgentMode = {
|
||||
"o1": {'mode': True, 'id': 'o1'},
|
||||
"o3-mini": {'mode': True, 'id': 'o3-mini'},
|
||||
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
||||
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
|
||||
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
||||
|
|
@ -77,7 +74,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
|
||||
'React Agent': {'mode': True, 'id': "React Agent"},
|
||||
'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
|
||||
'BLACKBOXAI-PRO': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
||||
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
||||
'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
|
||||
'Godot Agent': {'mode': True, 'id': "Godot Agent"},
|
||||
'Go Agent': {'mode': True, 'id': "Go Agent"},
|
||||
|
|
@ -100,38 +97,20 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
models = list(dict.fromkeys([default_model, *userSelectedModel, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
|
||||
|
||||
model_aliases = {
|
||||
"blackboxai": "BLACKBOXAI",
|
||||
"gemini-1.5-flash": "gemini-1.5-flash",
|
||||
"deepseek-v3": "DeepSeek-V3",
|
||||
"deepseek-r1": "DeepSeek-R1",
|
||||
"gemini-1.5-pro": "gemini-pro",
|
||||
"llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo",
|
||||
"mixtral-small-28b": "Mistral-Small-24B-Instruct-2501",
|
||||
"deepseek-chat": "DeepSeek-LLM-Chat-(67B)",
|
||||
"dbrx-instruct": "DBRX-Instruct",
|
||||
"qwq-32b": "Qwen-QwQ-32B-Preview",
|
||||
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
|
||||
"gemini-2.0-flash": "Gemini-Flash-2.0",
|
||||
"blackboxai-pro": "BLACKBOXAI-PRO",
|
||||
"claude-3.7-sonnet": "claude-sonnet-3.7",
|
||||
"flux": "ImageGeneration",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_models(cls) -> list[str]:
|
||||
models = super().get_models()
|
||||
filtered = [m for m in models if m not in cls.premium_models]
|
||||
filtered += [f"{m} (Premium)" for m in cls.premium_models]
|
||||
return filtered
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str, **kwargs) -> str:
|
||||
try:
|
||||
model = super().get_model(model, **kwargs)
|
||||
return model.split(" (Premium)")[0]
|
||||
except ModelNotSupportedError:
|
||||
base_model = model.split(" (Premium)")[0]
|
||||
if base_model in cls.premium_models:
|
||||
return base_model
|
||||
raise
|
||||
|
||||
ENCRYPTED_SESSION = "eyJ1c2VyIjogeyJuYW1lIjogIkJMQUNLQk9YIEFJIiwgImVtYWlsIjogImdpc2VsZUBibGFja2JveC5haSIsICJpbWFnZSI6ICJodHRwczovL3l0My5nb29nbGV1c2VyY29udGVudC5jb20vQjd6RVlVSzUxWnNQYmFSUFVhMF9ZbnQ1WV9URFZoTE4tVjAzdndRSHM0eF96a2g4a1psLXkxcXFxb3hoeFFzcS1wUVBHS0R0WFE9czE2MC1jLWstYzB4MDBmZmZmZmYtbm8tcmoifSwgImV4cGlyZXMiOiBudWxsfQ=="
|
||||
ENCRYPTED_SUBSCRIPTION_CACHE = "eyJzdGF0dXMiOiAiUFJFTUlVTSIsICJleHBpcnlUaW1lc3RhbXAiOiBudWxsLCAibGFzdENoZWNrZWQiOiBudWxsLCAiaXNUcmlhbFN1YnNjcmlwdGlvbiI6IHRydWV9"
|
||||
ENCRYPTED_IS_PREMIUM = "dHJ1ZQ=="
|
||||
|
||||
@classmethod
|
||||
async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
|
||||
|
|
@ -192,7 +171,21 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
def generate_id(cls, length: int = 7) -> str:
|
||||
chars = string.ascii_letters + string.digits
|
||||
return ''.join(random.choice(chars) for _ in range(length))
|
||||
|
||||
|
||||
@staticmethod
|
||||
def decrypt_data(encrypted_data):
|
||||
try:
|
||||
return json.loads(base64.b64decode(encrypted_data).decode('utf-8'))
|
||||
except:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def decrypt_bool(encrypted_data):
|
||||
try:
|
||||
return base64.b64decode(encrypted_data).decode('utf-8').lower() == 'true'
|
||||
except:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
|
|
@ -308,9 +301,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"additionalInfo": "",
|
||||
"enableNewChats": False
|
||||
},
|
||||
"session": None,
|
||||
"isPremium": False,
|
||||
"subscriptionCache": None,
|
||||
"session": cls.decrypt_data(cls.ENCRYPTED_SESSION),
|
||||
"isPremium": cls.decrypt_bool(cls.ENCRYPTED_IS_PREMIUM),
|
||||
"subscriptionCache": cls.decrypt_data(cls.ENCRYPTED_SUBSCRIPTION_CACHE),
|
||||
"beastMode": False,
|
||||
"webSearchMode": False
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,13 +2,18 @@ from __future__ import annotations
|
|||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
|
||||
from aiohttp import ClientSession
|
||||
try:
|
||||
from curl_cffi.requests import Session
|
||||
has_curl_cffi = True
|
||||
except ImportError:
|
||||
has_curl_cffi = False
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
from ..errors import MissingRequirementsError
|
||||
|
||||
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://chatgpt.es"
|
||||
|
|
@ -23,7 +28,7 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
models = ['gpt-4', default_model, 'gpt-4o-mini']
|
||||
|
||||
SYSTEM_PROMPT = "Your default language is English. Always respond in English unless the user's message is in a different language. If the user's message is not in English, respond in the language of the user's message. Maintain this language behavior throughout the conversation unless explicitly instructed otherwise. User input:"
|
||||
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
|
|
@ -32,39 +37,101 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if not has_curl_cffi:
|
||||
raise MissingRequirementsError('Install or update "curl_cffi" package | pip install -U curl_cffi')
|
||||
|
||||
model = cls.get_model(model)
|
||||
prompt = f"{cls.SYSTEM_PROMPT} {format_prompt(messages)}"
|
||||
|
||||
headers = {
|
||||
"authority": "chatgpt.es",
|
||||
"accept": "application/json",
|
||||
# Use curl_cffi with automatic Cloudflare bypass
|
||||
session = Session()
|
||||
session.headers.update({
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
|
||||
"referer": cls.url,
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/chat",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
})
|
||||
|
||||
if proxy:
|
||||
session.proxies = {"https": proxy, "http": proxy}
|
||||
|
||||
# First request to get nonce and post_id
|
||||
initial_response = session.get(cls.url, impersonate="chrome110")
|
||||
initial_text = initial_response.text
|
||||
|
||||
# More comprehensive nonce extraction
|
||||
nonce_patterns = [
|
||||
r'<input\s+type=[\'"]hidden[\'"]\s+name=[\'"]_wpnonce[\'"]\s+value=[\'"]([^\'"]+)[\'"]',
|
||||
r'"_wpnonce":"([^"]+)"',
|
||||
r'var\s+wpaicg_nonce\s*=\s*[\'"]([^\'"]+)[\'"]',
|
||||
r'wpaicg_nonce\s*:\s*[\'"]([^\'"]+)[\'"]'
|
||||
]
|
||||
|
||||
nonce_ = None
|
||||
for pattern in nonce_patterns:
|
||||
match = re.search(pattern, initial_text)
|
||||
if match:
|
||||
nonce_ = match.group(1)
|
||||
break
|
||||
|
||||
if not nonce_:
|
||||
# Try to find any nonce-like pattern as a last resort
|
||||
general_nonce = re.search(r'nonce[\'"]?\s*[=:]\s*[\'"]([a-zA-Z0-9]+)[\'"]', initial_text)
|
||||
if general_nonce:
|
||||
nonce_ = general_nonce.group(1)
|
||||
else:
|
||||
# Fallback, but this likely won't work
|
||||
nonce_ = "8cf9917be2"
|
||||
|
||||
# Look for post_id in HTML
|
||||
post_id_patterns = [
|
||||
r'<input\s+type=[\'"]hidden[\'"]\s+name=[\'"]post_id[\'"]\s+value=[\'"]([^\'"]+)[\'"]',
|
||||
r'"post_id":"([^"]+)"',
|
||||
r'var\s+post_id\s*=\s*[\'"]?(\d+)[\'"]?'
|
||||
]
|
||||
|
||||
post_id = None
|
||||
for pattern in post_id_patterns:
|
||||
match = re.search(pattern, initial_text)
|
||||
if match:
|
||||
post_id = match.group(1)
|
||||
break
|
||||
|
||||
if not post_id:
|
||||
post_id = "106" # Default from curl example
|
||||
|
||||
client_id = os.urandom(5).hex()
|
||||
|
||||
# Prepare data
|
||||
data = {
|
||||
'_wpnonce': nonce_,
|
||||
'post_id': post_id,
|
||||
'url': cls.url,
|
||||
'action': 'wpaicg_chat_shortcode_message',
|
||||
'message': prompt,
|
||||
'bot_id': '0',
|
||||
'chatbot_identity': 'shortcode',
|
||||
'wpaicg_chat_client_id': client_id,
|
||||
'wpaicg_chat_history': json.dumps([f"Human: {prompt}"])
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
initial_response = await session.get(cls.url)
|
||||
nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0]
|
||||
post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0]
|
||||
|
||||
prompt = f"{cls.SYSTEM_PROMPT} {format_prompt(messages)}"
|
||||
|
||||
payload = {
|
||||
'check_51710191': '1',
|
||||
'_wpnonce': nonce_,
|
||||
'post_id': post_id,
|
||||
'url': cls.url,
|
||||
'action': 'wpaicg_chat_shortcode_message',
|
||||
'message': prompt,
|
||||
'bot_id': '0',
|
||||
'chatbot_identity': 'shortcode',
|
||||
'wpaicg_chat_client_id': os.urandom(5).hex(),
|
||||
'wpaicg_chat_history': None
|
||||
}
|
||||
|
||||
async with session.post(cls.api_endpoint, headers=headers, data=payload) as response:
|
||||
await raise_for_status(response)
|
||||
result = await response.json()
|
||||
if "Du musst das Kästchen anklicken!" in result['data']:
|
||||
raise ValueError(result['data'])
|
||||
yield result['data']
|
||||
# Execute POST request
|
||||
response = session.post(
|
||||
cls.api_endpoint,
|
||||
data=data,
|
||||
impersonate="chrome110"
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ValueError(f"Error: {response.status_code} - {response.text}")
|
||||
|
||||
result = response.json()
|
||||
if "data" in result:
|
||||
if isinstance(result['data'], str) and "Du musst das Kästchen anklicken!" in result['data']:
|
||||
raise ValueError(result['data'])
|
||||
yield result['data']
|
||||
else:
|
||||
raise ValueError(f"Unexpected response format: {result}")
|
||||
|
|
|
|||
|
|
@ -9,16 +9,18 @@ class DeepInfraChat(OpenaiTemplate):
|
|||
api_base = "https://api.deepinfra.com/v1/openai"
|
||||
working = True
|
||||
|
||||
default_model = 'meta-llama/Llama-3.3-70B-Instruct-Turbo'
|
||||
default_model = 'deepseek-ai/DeepSeek-V3-Turbo'
|
||||
default_vision_model = 'meta-llama/Llama-3.2-90B-Vision-Instruct'
|
||||
vision_models = [default_vision_model, 'openbmb/MiniCPM-Llama3-V-2_5']
|
||||
models = [
|
||||
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
||||
default_model,
|
||||
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
||||
'meta-llama/Llama-3.3-70B-Instruct',
|
||||
'deepseek-ai/DeepSeek-V3',
|
||||
default_model,
|
||||
'mistralai/Mistral-Small-24B-Instruct-2501',
|
||||
'deepseek-ai/DeepSeek-R1',
|
||||
'deepseek-ai/DeepSeek-R1-Turbo',
|
||||
'deepseek-ai/DeepSeek-R1-Distill-Llama-70B',
|
||||
'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
|
||||
'microsoft/phi-4',
|
||||
|
|
@ -40,7 +42,9 @@ class DeepInfraChat(OpenaiTemplate):
|
|||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
|
||||
"deepseek-v3": "deepseek-ai/DeepSeek-V3",
|
||||
"deepseek-v3": default_model,
|
||||
"mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
||||
"deepseek-r1": "deepseek-ai/DeepSeek-R1-Turbo",
|
||||
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
|
||||
"deepseek-r1-distill-llama": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
||||
"deepseek-r1-distill-qwen": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||
|
|
|
|||
|
|
@ -14,10 +14,12 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|||
|
||||
class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://chat10.free2gpt.xyz"
|
||||
|
||||
working = True
|
||||
supports_message_history = True
|
||||
default_model = 'mistral-7b'
|
||||
models = [default_model]
|
||||
|
||||
default_model = 'gemini-1.5-pro'
|
||||
models = [default_model, 'gemini-1.5-flash']
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
|
@ -36,15 +38,6 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"Content-Type": "text/plain;charset=UTF-8",
|
||||
"Referer": f"{cls.url}/",
|
||||
"Origin": cls.url,
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Sec-Ch-Ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"Sec-Ch-Ua-Mobile": "?0",
|
||||
"Sec-Ch-Ua-Platform": '"Linux"',
|
||||
"Cache-Control": "no-cache",
|
||||
"Pragma": "no-cache",
|
||||
"Priority": "u=1, i",
|
||||
}
|
||||
async with ClientSession(
|
||||
connector=get_connector(connector, proxy), headers=headers
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
supports_system_message = True
|
||||
|
||||
default_model = 'gemini-1.5-pro'
|
||||
models = [default_model, 'gemini-1.5-flash']
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
|
|
|||
|
|
@ -10,29 +10,65 @@ from .helper import get_connector
|
|||
from ..requests import raise_for_status
|
||||
|
||||
models = {
|
||||
"gpt-4o-mini-free": {
|
||||
"id": "gpt-4o-mini-free",
|
||||
"name": "GPT-4o-Mini-Free",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 31200,
|
||||
"tokenLimit": 7800,
|
||||
"context": "8K",
|
||||
},
|
||||
"grok-3": {
|
||||
"id": "grok-3",
|
||||
"name": "Grok-3",
|
||||
"model": "Grok",
|
||||
"provider": "x.ai",
|
||||
"claude-3-5-sonnet-20241022": {
|
||||
"id": "claude-3-5-sonnet-20241022",
|
||||
"name": "Claude-3.5-Sonnet-V2",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"grok-3-r1": {
|
||||
"id": "grok-3-r1",
|
||||
"name": "Grok-3-Thinking",
|
||||
"model": "Grok",
|
||||
"provider": "x.ai",
|
||||
"claude-3-5-sonnet-20241022-t": {
|
||||
"id": "claude-3-5-sonnet-20241022-t",
|
||||
"name": "Claude-3.5-Sonnet-V2-T",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-7-sonnet-20250219": {
|
||||
"id": "claude-3-7-sonnet-20250219",
|
||||
"name": "Claude-3.7-Sonnet",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-7-sonnet-20250219-t": {
|
||||
"id": "claude-3-7-sonnet-20250219-t",
|
||||
"name": "Claude-3.7-Sonnet-T",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-7-sonnet-20250219-thinking": {
|
||||
"id": "claude-3-7-sonnet-20250219-thinking",
|
||||
"name": "Claude-3.7-Sonnet-Thinking",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-opus-20240229": {
|
||||
"id": "claude-3-opus-20240229",
|
||||
"name": "Claude-3-Opus",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-sonnet-20240229": {
|
||||
"id": "claude-3-sonnet-20240229",
|
||||
"name": "Claude-3-Sonnet",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
|
|
@ -64,96 +100,6 @@ models = {
|
|||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4o-2024-11-20": {
|
||||
"id": "gpt-4o-2024-11-20",
|
||||
"name": "GPT-4o",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4o-mini-2024-07-18": {
|
||||
"id": "gpt-4o-mini-2024-07-18",
|
||||
"name": "GPT-4o-Mini",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"o3-mini": {
|
||||
"id": "o3-mini",
|
||||
"name": "o3-mini",
|
||||
"model": "o3",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 400000,
|
||||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
},
|
||||
"o1-preview-2024-09-12": {
|
||||
"id": "o1-preview-2024-09-12",
|
||||
"name": "o1-preview",
|
||||
"model": "o1",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 400000,
|
||||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
},
|
||||
"claude-3-opus-20240229": {
|
||||
"id": "claude-3-opus-20240229",
|
||||
"name": "Claude-3-Opus",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-5-sonnet-20240620": {
|
||||
"id": "claude-3-5-sonnet-20240620",
|
||||
"name": "Claude-3.5-Sonnet",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-5-sonnet-20241022": {
|
||||
"id": "claude-3-5-sonnet-20241022",
|
||||
"name": "Claude-3.5-Sonnet-V2",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-sonnet-20240229": {
|
||||
"id": "claude-3-sonnet-20240229",
|
||||
"name": "Claude-3-Sonnet",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-opus-20240229-t": {
|
||||
"id": "claude-3-opus-20240229-t",
|
||||
"name": "Claude-3-Opus-T",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"claude-3-5-sonnet-20241022-t": {
|
||||
"id": "claude-3-5-sonnet-20241022-t",
|
||||
"name": "Claude-3.5-Sonnet-V2-T",
|
||||
"model": "Claude",
|
||||
"provider": "Anthropic",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"gemini-2.0-flash": {
|
||||
"id": "gemini-2.0-flash",
|
||||
"name": "Gemini-2.0-Flash",
|
||||
|
|
@ -181,34 +127,93 @@ models = {
|
|||
"tokenLimit": 1000000,
|
||||
"context": "1024K",
|
||||
},
|
||||
"gpt-4o-2024-08-06": {
|
||||
"id": "gpt-4o-2024-08-06",
|
||||
"name": "GPT-4o",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4o-mini-2024-07-18": {
|
||||
"id": "gpt-4o-mini-2024-07-18",
|
||||
"name": "GPT-4o-Mini",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 260000,
|
||||
"tokenLimit": 126000,
|
||||
"context": "128K",
|
||||
},
|
||||
"gpt-4o-mini-free": {
|
||||
"id": "gpt-4o-mini-free",
|
||||
"name": "GPT-4o-Mini-Free",
|
||||
"model": "ChatGPT",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 31200,
|
||||
"tokenLimit": 7800,
|
||||
"context": "8K",
|
||||
},
|
||||
"grok-3": {
|
||||
"id": "grok-3",
|
||||
"name": "Grok-3",
|
||||
"model": "Grok",
|
||||
"provider": "x.ai",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"grok-3-r1": {
|
||||
"id": "grok-3-r1",
|
||||
"name": "Grok-3-Thinking",
|
||||
"model": "Grok",
|
||||
"provider": "x.ai",
|
||||
"maxLength": 800000,
|
||||
"tokenLimit": 200000,
|
||||
"context": "200K",
|
||||
},
|
||||
"o3-mini": {
|
||||
"id": "o3-mini",
|
||||
"name": "o3-mini",
|
||||
"model": "o3",
|
||||
"provider": "OpenAI",
|
||||
"maxLength": 400000,
|
||||
"tokenLimit": 100000,
|
||||
"context": "128K",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://liaobots.site"
|
||||
working = True
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
|
||||
default_model = "gpt-4o-2024-11-20"
|
||||
default_model = "gpt-4o-2024-08-06"
|
||||
models = list(models.keys())
|
||||
model_aliases = {
|
||||
"gpt-4o-mini": "gpt-4o-mini-free",
|
||||
"gpt-4o": default_model,
|
||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
||||
"gpt-4": default_model,
|
||||
|
||||
"o1-preview": "o1-preview-2024-09-12",
|
||||
# Anthropic
|
||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
|
||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
|
||||
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
|
||||
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
|
||||
"claude-3.7-sonnet-thinking": "claude-3-7-sonnet-20250219-thinking",
|
||||
"claude-3-opus": "claude-3-opus-20240229",
|
||||
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
||||
|
||||
# DeepSeek
|
||||
"deepseek-r1": "deepseek-r1-distill-llama-70b",
|
||||
|
||||
"claude-3-opus": "claude-3-opus-20240229",
|
||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
|
||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
|
||||
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
||||
"claude-3-opus": "claude-3-opus-20240229-t",
|
||||
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
|
||||
|
||||
# Google
|
||||
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-pro": "gemini-2.0-pro-exp",
|
||||
|
||||
# OpenAI
|
||||
"gpt-4": default_model,
|
||||
"gpt-4o": default_model,
|
||||
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
||||
"gpt-4o-mini": "gpt-4o-mini-free",
|
||||
}
|
||||
|
||||
_auth_code = ""
|
||||
|
|
|
|||
|
|
@ -1,49 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .template import OpenaiTemplate
|
||||
|
||||
class Mhystical(OpenaiTemplate):
|
||||
url = "https://mhystical.cc"
|
||||
api_endpoint = "https://api.mhystical.cc/v1/completions"
|
||||
login_url = "https://mhystical.cc/dashboard"
|
||||
api_key = "mhystical"
|
||||
|
||||
working = True
|
||||
supports_stream = False # Set to False, as streaming is not specified in ChatifyAI
|
||||
supports_system_message = False
|
||||
|
||||
default_model = 'gpt-4'
|
||||
models = [default_model]
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str, **kwargs) -> str:
|
||||
cls.last_model = cls.default_model
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = False,
|
||||
api_key: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"x-api-key": cls.api_key,
|
||||
"Content-Type": "application/json",
|
||||
"accept": "*/*",
|
||||
"cache-control": "no-cache",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
|
||||
}
|
||||
return super().create_async_generator(
|
||||
model=model,
|
||||
messages=messages,
|
||||
stream=cls.supports_stream,
|
||||
api_endpoint=cls.api_endpoint,
|
||||
headers=headers,
|
||||
**kwargs
|
||||
)
|
||||
|
|
@ -13,7 +13,7 @@ from ..image import to_data_uri
|
|||
from ..errors import ModelNotFoundError
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from ..requests.aiohttp import get_connector
|
||||
from ..providers.response import ImageResponse, ImagePreview, FinishReason, Usage, Audio
|
||||
from ..providers.response import ImageResponse, ImagePreview, FinishReason, Usage
|
||||
from .. import debug
|
||||
|
||||
DEFAULT_HEADERS = {
|
||||
|
|
@ -32,8 +32,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
supports_message_history = True
|
||||
|
||||
# API endpoints
|
||||
text_api_endpoint = "https://text.pollinations.ai"
|
||||
openai_endpoint = "https://text.pollinations.ai/openai"
|
||||
text_api_endpoint = "https://text.pollinations.ai/openai"
|
||||
image_api_endpoint = "https://image.pollinations.ai/"
|
||||
|
||||
# Models configuration
|
||||
|
|
@ -44,24 +43,21 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
image_models = [default_image_model]
|
||||
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3"]
|
||||
vision_models = [default_vision_model, "gpt-4o-mini", "o1-mini"]
|
||||
extra_text_models = ["claude", "claude-email", "deepseek-reasoner", "deepseek-r1"] + vision_models
|
||||
audio_models = {}
|
||||
extra_text_models = vision_models
|
||||
_models_loaded = False
|
||||
model_aliases = {
|
||||
### Text Models ###
|
||||
"gpt-4o-mini": "openai",
|
||||
"gpt-4": "openai-large",
|
||||
"gpt-4o": "openai-large",
|
||||
"o1-mini": "openai-reasoning",
|
||||
"qwen-2.5-coder-32b": "qwen-coder",
|
||||
"llama-3.3-70b": "llama",
|
||||
"mistral-nemo": "mistral",
|
||||
"gpt-4o": "searchgpt",
|
||||
"deepseek-chat": "claude-hybridspace",
|
||||
"gpt-4o-mini": "searchgpt",
|
||||
"llama-3.1-8b": "llamalight",
|
||||
"gpt-4o-vision": "gpt-4o",
|
||||
"gpt-4o-mini-vision": "gpt-4o-mini",
|
||||
"deepseek-chat": "claude-email",
|
||||
"deepseek-r1": "deepseek-reasoner",
|
||||
"llama-3.3-70b": "llama-scaleway",
|
||||
"phi-4": "phi",
|
||||
"gemini-2.0": "gemini",
|
||||
"gemini-2.0-flash": "gemini",
|
||||
"gemini-2.0-flash-thinking": "gemini-thinking",
|
||||
|
|
@ -92,17 +88,10 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
# Update of text models
|
||||
text_response = requests.get("https://text.pollinations.ai/models")
|
||||
text_response.raise_for_status()
|
||||
models = text_response.json()
|
||||
original_text_models = [
|
||||
model.get("name")
|
||||
for model in models
|
||||
if model.get("type") == "chat"
|
||||
for model in text_response.json()
|
||||
]
|
||||
cls.audio_models = {
|
||||
model.get("name"): model.get("voices")
|
||||
for model in models
|
||||
if model.get("audio")
|
||||
}
|
||||
|
||||
# Combining text models
|
||||
combined_text = (
|
||||
|
|
@ -273,18 +262,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"seed": seed,
|
||||
"cache": cache
|
||||
})
|
||||
if "gemini" in model:
|
||||
data.pop("seed")
|
||||
if model in cls.audio_models:
|
||||
data["voice"] = random.choice(cls.audio_models[model])
|
||||
url = f"{cls.text_api_endpoint}"
|
||||
else:
|
||||
url = cls.openai_endpoint
|
||||
async with session.post(url, json=data) as response:
|
||||
async with session.post(cls.text_api_endpoint, json=data) as response:
|
||||
await raise_for_status(response)
|
||||
if response.headers["content-type"] == "audio/mpeg":
|
||||
yield Audio(await response.read())
|
||||
return
|
||||
result = await response.json()
|
||||
choice = result["choices"][0]
|
||||
message = choice.get("message", {})
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from ..typing import AsyncResult, Messages
|
|||
from .PollinationsAI import PollinationsAI
|
||||
|
||||
class PollinationsImage(PollinationsAI):
|
||||
label = "PollinationsImage"
|
||||
default_model = "flux"
|
||||
default_vision_model = None
|
||||
default_image_model = default_model
|
||||
|
|
|
|||
|
|
@ -12,19 +12,11 @@ from .helper import format_prompt
|
|||
class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://www.teach-anything.com"
|
||||
api_endpoint = "/api/generate"
|
||||
|
||||
working = True
|
||||
default_model = "llama-3.1-70b"
|
||||
models = [default_model]
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
|
||||
default_model = 'gemini-1.5-pro'
|
||||
models = [default_model, 'gemini-1.5-flash']
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
|
|
|||
|
|
@ -30,14 +30,12 @@ from .Glider import Glider
|
|||
from .ImageLabs import ImageLabs
|
||||
from .Jmuz import Jmuz
|
||||
from .Liaobots import Liaobots
|
||||
from .Mhystical import Mhystical
|
||||
from .OIVSCode import OIVSCode
|
||||
from .PerplexityLabs import PerplexityLabs
|
||||
from .Pi import Pi
|
||||
from .Pizzagpt import Pizzagpt
|
||||
from .PollinationsAI import PollinationsAI
|
||||
from .PollinationsImage import PollinationsImage
|
||||
from .Prodia import Prodia
|
||||
from .TeachAnything import TeachAnything
|
||||
from .You import You
|
||||
from .Yqcloud import Yqcloud
|
||||
|
|
|
|||
|
|
@ -4,14 +4,15 @@ from aiohttp import ClientSession
|
|||
import asyncio
|
||||
import random
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..providers.response import ImageResponse
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...providers.response import ImageResponse
|
||||
|
||||
class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://app.prodia.com"
|
||||
api_endpoint = "https://api.prodia.com/generate"
|
||||
working = True
|
||||
|
||||
working = False
|
||||
|
||||
default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
|
||||
default_image_model = default_model
|
||||
|
|
@ -19,6 +19,7 @@ from .Koala import Koala
|
|||
from .MagickPen import MagickPen
|
||||
from .MyShell import MyShell
|
||||
from .Poe import Poe
|
||||
from .Prodia import Prodia
|
||||
from .Raycast import Raycast
|
||||
from .ReplicateHome import ReplicateHome
|
||||
from .RobocodersAPI import RobocodersAPI
|
||||
|
|
|
|||
16
g4f/debug.py
16
g4f/debug.py
|
|
@ -1,17 +1,21 @@
|
|||
import sys
|
||||
from typing import Callable, List, Optional, Any
|
||||
|
||||
# Warning: name could conflict with Python's built-in logging module
|
||||
logging: bool = False
|
||||
version_check: bool = True
|
||||
version: str = None
|
||||
log_handler: callable = print
|
||||
logs: list = []
|
||||
version: Optional[str] = None
|
||||
log_handler: Callable = print # More specifically: Callable[[Any, Optional[Any]], None]
|
||||
logs: List[str] = []
|
||||
|
||||
def log(text, file = None):
|
||||
def log(text: Any, file: Optional[Any] = None) -> None:
|
||||
"""Log a message if logging is enabled."""
|
||||
if logging:
|
||||
log_handler(text, file=file)
|
||||
|
||||
def error(error, name: str = None):
|
||||
def error(error: Any, name: Optional[str] = None) -> None:
|
||||
"""Log an error message to stderr."""
|
||||
log(
|
||||
error if isinstance(error, str) else f"{type(error).__name__ if name is None else name}: {error}",
|
||||
file=sys.stderr
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from dataclasses import dataclass
|
|||
|
||||
from .Provider import IterListProvider, ProviderType
|
||||
from .Provider import (
|
||||
### no auth required ###
|
||||
### No Auth Required ###
|
||||
AllenAI,
|
||||
Blackbox,
|
||||
ChatGLM,
|
||||
|
|
@ -13,6 +13,8 @@ from .Provider import (
|
|||
Copilot,
|
||||
DDG,
|
||||
DeepInfraChat,
|
||||
Free2GPT,
|
||||
FreeGpt,
|
||||
HuggingSpace,
|
||||
G4F,
|
||||
Janus_Pro_7B,
|
||||
|
|
@ -20,7 +22,6 @@ from .Provider import (
|
|||
ImageLabs,
|
||||
Jmuz,
|
||||
Liaobots,
|
||||
Mhystical,
|
||||
OIVSCode,
|
||||
PerplexityLabs,
|
||||
Pi,
|
||||
|
|
@ -29,7 +30,7 @@ from .Provider import (
|
|||
TeachAnything,
|
||||
Yqcloud,
|
||||
|
||||
### needs auth ###
|
||||
### Needs Auth ###
|
||||
BingCreateImages,
|
||||
CopilotAccount,
|
||||
Gemini,
|
||||
|
|
@ -80,9 +81,13 @@ default = Model(
|
|||
Blackbox,
|
||||
Copilot,
|
||||
DeepInfraChat,
|
||||
ChatGptEs,
|
||||
AllenAI,
|
||||
PollinationsAI,
|
||||
OIVSCode,
|
||||
ChatGptEs,
|
||||
Free2GPT,
|
||||
FreeGpt,
|
||||
Glider,
|
||||
OpenaiChat,
|
||||
Jmuz,
|
||||
Cloudflare,
|
||||
|
|
@ -115,14 +120,14 @@ default_vision = Model(
|
|||
gpt_4 = Model(
|
||||
name = 'gpt-4',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([DDG, Jmuz, ChatGptEs, PollinationsAI, Yqcloud, Copilot, OpenaiChat, Liaobots, Mhystical])
|
||||
best_provider = IterListProvider([DDG, Jmuz, ChatGptEs, PollinationsAI, Yqcloud, Copilot, OpenaiChat, Liaobots])
|
||||
)
|
||||
|
||||
# gpt-4o
|
||||
gpt_4o = VisionModel(
|
||||
name = 'gpt-4o',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([Jmuz, ChatGptEs, PollinationsAI, Copilot, Liaobots, OpenaiChat])
|
||||
best_provider = IterListProvider([Blackbox, Jmuz, ChatGptEs, PollinationsAI, Copilot, Liaobots, OpenaiChat])
|
||||
)
|
||||
|
||||
gpt_4o_mini = Model(
|
||||
|
|
@ -135,20 +140,20 @@ gpt_4o_mini = Model(
|
|||
o1 = Model(
|
||||
name = 'o1',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = OpenaiAccount
|
||||
best_provider = IterListProvider([Blackbox, OpenaiAccount])
|
||||
)
|
||||
|
||||
o1_preview = Model(
|
||||
name = 'o1-preview',
|
||||
o1_mini = Model(
|
||||
name = 'o1-mini',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = Liaobots
|
||||
best_provider = PollinationsAI
|
||||
)
|
||||
|
||||
# o3
|
||||
o3_mini = Model(
|
||||
name = 'o3-mini',
|
||||
base_provider = 'OpenAI',
|
||||
best_provider = IterListProvider([DDG, Liaobots])
|
||||
best_provider = IterListProvider([DDG, Blackbox, Liaobots])
|
||||
)
|
||||
|
||||
### GigaChat ###
|
||||
|
|
@ -194,7 +199,7 @@ llama_3_1_8b = Model(
|
|||
llama_3_1_70b = Model(
|
||||
name = "llama-3.1-70b",
|
||||
base_provider = "Meta Llama",
|
||||
best_provider = IterListProvider([Blackbox, Glider, Jmuz, TeachAnything])
|
||||
best_provider = IterListProvider([Blackbox, Glider, Jmuz])
|
||||
)
|
||||
|
||||
llama_3_1_405b = Model(
|
||||
|
|
@ -289,7 +294,7 @@ phi_3_5_mini = Model(
|
|||
phi_4 = Model(
|
||||
name = "phi-4",
|
||||
base_provider = "Microsoft",
|
||||
best_provider = DeepInfraChat
|
||||
best_provider = IterListProvider([DeepInfraChat, PollinationsAI])
|
||||
)
|
||||
|
||||
# wizardlm
|
||||
|
|
@ -324,13 +329,13 @@ gemini_exp = Model(
|
|||
gemini_1_5_flash = Model(
|
||||
name = 'gemini-1.5-flash',
|
||||
base_provider = 'Google DeepMind',
|
||||
best_provider = IterListProvider([Blackbox, Jmuz, GeminiPro])
|
||||
best_provider = IterListProvider([Blackbox, Free2GPT, FreeGpt, TeachAnything, Jmuz, GeminiPro])
|
||||
)
|
||||
|
||||
gemini_1_5_pro = Model(
|
||||
name = 'gemini-1.5-pro',
|
||||
base_provider = 'Google DeepMind',
|
||||
best_provider = IterListProvider([Jmuz, GeminiPro])
|
||||
best_provider = IterListProvider([Blackbox, Free2GPT, FreeGpt, TeachAnything, Jmuz, GeminiPro])
|
||||
)
|
||||
|
||||
# gemini-2.0
|
||||
|
|
@ -346,6 +351,12 @@ gemini_2_0_flash_thinking = Model(
|
|||
best_provider = Liaobots
|
||||
)
|
||||
|
||||
gemini_2_0_pro = Model(
|
||||
name = 'gemini-2.0-pro',
|
||||
base_provider = 'Google DeepMind',
|
||||
best_provider = Liaobots
|
||||
)
|
||||
|
||||
### Anthropic ###
|
||||
# claude 3
|
||||
claude_3_haiku = Model(
|
||||
|
|
@ -374,6 +385,19 @@ claude_3_5_sonnet = Model(
|
|||
best_provider = IterListProvider([Jmuz, Liaobots])
|
||||
)
|
||||
|
||||
# claude 3.7
|
||||
claude_3_7_sonnet = Model(
|
||||
name = 'claude-3.7-sonnet',
|
||||
base_provider = 'Anthropic',
|
||||
best_provider = IterListProvider([Blackbox, Liaobots])
|
||||
)
|
||||
|
||||
claude_3_7_sonnet_thinking = Model(
|
||||
name = 'claude-3.7-sonnet-thinking',
|
||||
base_provider = 'Anthropic',
|
||||
best_provider = Liaobots
|
||||
)
|
||||
|
||||
### Reka AI ###
|
||||
reka_core = Model(
|
||||
name = 'reka-core',
|
||||
|
|
@ -468,7 +492,7 @@ pi = Model(
|
|||
deepseek_chat = Model(
|
||||
name = 'deepseek-chat',
|
||||
base_provider = 'DeepSeek',
|
||||
best_provider = IterListProvider([Blackbox, Jmuz, PollinationsAI])
|
||||
best_provider = IterListProvider([Blackbox, Jmuz])
|
||||
)
|
||||
|
||||
deepseek_v3 = Model(
|
||||
|
|
@ -720,7 +744,7 @@ class ModelUtils:
|
|||
|
||||
# o1
|
||||
o1.name: o1,
|
||||
o1_preview.name: o1_preview,
|
||||
o1_mini.name: o1_mini,
|
||||
|
||||
# o3
|
||||
o3_mini.name: o3_mini,
|
||||
|
|
@ -777,6 +801,7 @@ class ModelUtils:
|
|||
gemini_1_5_flash.name: gemini_1_5_flash,
|
||||
gemini_2_0_flash.name: gemini_2_0_flash,
|
||||
gemini_2_0_flash_thinking.name: gemini_2_0_flash_thinking,
|
||||
gemini_2_0_pro.name: gemini_2_0_pro,
|
||||
|
||||
### Anthropic ###
|
||||
# claude 3
|
||||
|
|
@ -786,6 +811,10 @@ class ModelUtils:
|
|||
|
||||
# claude 3.5
|
||||
claude_3_5_sonnet.name: claude_3_5_sonnet,
|
||||
|
||||
# claude 3.7
|
||||
claude_3_7_sonnet.name: claude_3_7_sonnet,
|
||||
claude_3_7_sonnet_thinking.name: claude_3_7_sonnet_thinking,
|
||||
|
||||
### Reka AI ###
|
||||
reka_core.name: reka_core,
|
||||
|
|
|
|||
|
|
@ -1,68 +1,112 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import base64
|
||||
from typing import Union
|
||||
from typing import Union, Dict, List, Optional
|
||||
from abc import abstractmethod
|
||||
from urllib.parse import quote_plus, unquote_plus
|
||||
|
||||
def quote_url(url: str) -> str:
|
||||
url = unquote_plus(url)
|
||||
url = url.split("//", maxsplit=1)
|
||||
"""
|
||||
Quote parts of a URL while preserving the domain structure.
|
||||
|
||||
Args:
|
||||
url: The URL to quote
|
||||
|
||||
Returns:
|
||||
str: The properly quoted URL
|
||||
"""
|
||||
# Only unquote if needed to avoid double-unquoting
|
||||
if '%' in url:
|
||||
url = unquote_plus(url)
|
||||
|
||||
url_parts = url.split("//", maxsplit=1)
|
||||
# If there is no "//" in the URL, then it is a relative URL
|
||||
if len(url) == 1:
|
||||
return quote_plus(url[0], '/?&=#')
|
||||
url[1] = url[1].split("/", maxsplit=1)
|
||||
if len(url_parts) == 1:
|
||||
return quote_plus(url_parts[0], '/?&=#')
|
||||
|
||||
protocol, rest = url_parts
|
||||
domain_parts = rest.split("/", maxsplit=1)
|
||||
# If there is no "/" after the domain, then it is a domain URL
|
||||
if len(url[1]) == 1:
|
||||
return url[0] + "//" + url[1][0]
|
||||
return url[0] + "//" + url[1][0] + "/" + quote_plus(url[1][1], '/?&=#')
|
||||
if len(domain_parts) == 1:
|
||||
return f"{protocol}//{domain_parts[0]}"
|
||||
|
||||
domain, path = domain_parts
|
||||
return f"{protocol}//{domain}/{quote_plus(path, '/?&=#')}"
|
||||
|
||||
def quote_title(title: str) -> str:
|
||||
if title:
|
||||
return " ".join(title.split())
|
||||
return ""
|
||||
"""
|
||||
Normalize whitespace in a title.
|
||||
|
||||
Args:
|
||||
title: The title to normalize
|
||||
|
||||
Returns:
|
||||
str: The title with normalized whitespace
|
||||
"""
|
||||
return " ".join(title.split()) if title else ""
|
||||
|
||||
def format_link(url: str, title: str = None) -> str:
|
||||
def format_link(url: str, title: Optional[str] = None) -> str:
|
||||
"""
|
||||
Format a URL and title as a markdown link.
|
||||
|
||||
Args:
|
||||
url: The URL to link to
|
||||
title: The title to display. If None, extracts from URL
|
||||
|
||||
Returns:
|
||||
str: The formatted markdown link
|
||||
"""
|
||||
if title is None:
|
||||
title = unquote_plus(url.split("//", maxsplit=1)[1].split("?")[0].replace("www.", ""))
|
||||
try:
|
||||
title = unquote_plus(url.split("//", maxsplit=1)[1].split("?")[0].replace("www.", ""))
|
||||
except IndexError:
|
||||
title = url
|
||||
return f"[{quote_title(title)}]({quote_url(url)})"
|
||||
|
||||
def format_image(image: str, alt: str, preview: str = None) -> str:
|
||||
def format_image(image: str, alt: str, preview: Optional[str] = None) -> str:
|
||||
"""
|
||||
Formats the given image as a markdown string.
|
||||
|
||||
Args:
|
||||
image: The image to format.
|
||||
alt (str): The alt for the image.
|
||||
preview (str, optional): The preview URL format. Defaults to "{image}?w=200&h=200".
|
||||
alt: The alt text for the image.
|
||||
preview: The preview URL format. Defaults to the original image.
|
||||
|
||||
Returns:
|
||||
str: The formatted markdown string.
|
||||
"""
|
||||
return f"[ if preview else image)})]({quote_url(image)})"
|
||||
preview_url = preview.replace('{image}', image) if preview else image
|
||||
return f"[})]({quote_url(image)})"
|
||||
|
||||
def format_images_markdown(images: Union[str, list], alt: str, preview: Union[str, list] = None) -> str:
|
||||
def format_images_markdown(images: Union[str, List[str]], alt: str,
|
||||
preview: Union[str, List[str]] = None) -> str:
|
||||
"""
|
||||
Formats the given images as a markdown string.
|
||||
|
||||
Args:
|
||||
images: The images to format.
|
||||
alt (str): The alt for the images.
|
||||
preview (str, optional): The preview URL format. Defaults to "{image}?w=200&h=200".
|
||||
images: The image or list of images to format.
|
||||
alt: The alt text for the images.
|
||||
preview: The preview URL format or list of preview URLs.
|
||||
If not provided, original images are used.
|
||||
|
||||
Returns:
|
||||
str: The formatted markdown string.
|
||||
"""
|
||||
if isinstance(images, list) and len(images) == 1:
|
||||
images = images[0]
|
||||
|
||||
if isinstance(images, str):
|
||||
result = format_image(images, alt, preview)
|
||||
else:
|
||||
result = "\n".join(
|
||||
format_image(image, f"#{idx+1} {alt}", preview[idx] if isinstance(preview, list) else preview)
|
||||
format_image(
|
||||
image,
|
||||
f"#{idx+1} {alt}",
|
||||
preview[idx] if isinstance(preview, list) and idx < len(preview) else preview
|
||||
)
|
||||
for idx, image in enumerate(images)
|
||||
)
|
||||
|
||||
start_flag = "<!-- generated images start -->\n"
|
||||
end_flag = "<!-- generated images end -->\n"
|
||||
return f"\n{start_flag}{result}\n{end_flag}\n"
|
||||
|
|
@ -70,21 +114,25 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
|
|||
class ResponseType:
|
||||
@abstractmethod
|
||||
def __str__(self) -> str:
|
||||
"""Convert the response to a string representation."""
|
||||
raise NotImplementedError
|
||||
|
||||
class JsonMixin:
|
||||
def __init__(self, **kwargs) -> None:
|
||||
"""Initialize with keyword arguments as attributes."""
|
||||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
def get_dict(self):
|
||||
def get_dict(self) -> Dict:
|
||||
"""Return a dictionary of non-private attributes."""
|
||||
return {
|
||||
key: value
|
||||
for key, value in self.__dict__.items()
|
||||
if not key.startswith("__")
|
||||
}
|
||||
|
||||
def reset(self):
|
||||
def reset(self) -> None:
|
||||
"""Reset all attributes."""
|
||||
self.__dict__ = {}
|
||||
|
||||
class RawResponse(ResponseType, JsonMixin):
|
||||
|
|
@ -92,17 +140,21 @@ class RawResponse(ResponseType, JsonMixin):
|
|||
|
||||
class HiddenResponse(ResponseType):
|
||||
def __str__(self) -> str:
|
||||
"""Hidden responses return an empty string."""
|
||||
return ""
|
||||
|
||||
class FinishReason(JsonMixin, HiddenResponse):
|
||||
def __init__(self, reason: str) -> None:
|
||||
"""Initialize with a reason."""
|
||||
self.reason = reason
|
||||
|
||||
class ToolCalls(HiddenResponse):
|
||||
def __init__(self, list: list):
|
||||
def __init__(self, list: List) -> None:
|
||||
"""Initialize with a list of tool calls."""
|
||||
self.list = list
|
||||
|
||||
def get_list(self) -> list:
|
||||
def get_list(self) -> List:
|
||||
"""Return the list of tool calls."""
|
||||
return self.list
|
||||
|
||||
class Usage(JsonMixin, HiddenResponse):
|
||||
|
|
@ -113,24 +165,28 @@ class AuthResult(JsonMixin, HiddenResponse):
|
|||
|
||||
class TitleGeneration(HiddenResponse):
|
||||
def __init__(self, title: str) -> None:
|
||||
"""Initialize with a title."""
|
||||
self.title = title
|
||||
|
||||
class DebugResponse(HiddenResponse):
|
||||
def __init__(self, log: str) -> None:
|
||||
"""Initialize with a log message."""
|
||||
self.log = log
|
||||
|
||||
class Reasoning(ResponseType):
|
||||
def __init__(
|
||||
self,
|
||||
token: str = None,
|
||||
status: str = None,
|
||||
is_thinking: str = None
|
||||
token: Optional[str] = None,
|
||||
status: Optional[str] = None,
|
||||
is_thinking: Optional[str] = None
|
||||
) -> None:
|
||||
"""Initialize with token, status, and thinking state."""
|
||||
self.token = token
|
||||
self.status = status
|
||||
self.is_thinking = is_thinking
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string representation based on available attributes."""
|
||||
if self.is_thinking is not None:
|
||||
return self.is_thinking
|
||||
if self.token is not None:
|
||||
|
|
@ -139,20 +195,23 @@ class Reasoning(ResponseType):
|
|||
return f"{self.status}\n"
|
||||
return ""
|
||||
|
||||
def get_dict(self):
|
||||
def get_dict(self) -> Dict:
|
||||
"""Return a dictionary representation of the reasoning."""
|
||||
if self.is_thinking is None:
|
||||
if self.status is None:
|
||||
return {"token": self.token}
|
||||
{"token": self.token, "status": self.status}
|
||||
return {"token": self.token, "status": self.status}
|
||||
return {"token": self.token, "status": self.status, "is_thinking": self.is_thinking}
|
||||
|
||||
class Sources(ResponseType):
|
||||
def __init__(self, sources: list[dict[str, str]]) -> None:
|
||||
def __init__(self, sources: List[Dict[str, str]]) -> None:
|
||||
"""Initialize with a list of source dictionaries."""
|
||||
self.list = []
|
||||
for source in sources:
|
||||
self.add_source(source)
|
||||
|
||||
def add_source(self, source: dict[str, str]):
|
||||
def add_source(self, source: Union[Dict[str, str], str]) -> None:
|
||||
"""Add a source to the list, cleaning the URL if necessary."""
|
||||
source = source if isinstance(source, dict) else {"url": source}
|
||||
url = source.get("url", source.get("link", None))
|
||||
if url is not None:
|
||||
|
|
@ -161,86 +220,98 @@ class Sources(ResponseType):
|
|||
self.list.append(source)
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return formatted sources as a string."""
|
||||
if not self.list:
|
||||
return ""
|
||||
return "\n\n\n\n" + ("\n>\n".join([
|
||||
f"> [{idx}] {format_link(link['url'], link.get('title', None))}"
|
||||
for idx, link in enumerate(self.list)
|
||||
]))
|
||||
|
||||
class YouTube(ResponseType):
|
||||
def __init__(self, ids: list[str]) -> None:
|
||||
def __init__(self, ids: List[str]) -> None:
|
||||
"""Initialize with a list of YouTube IDs."""
|
||||
self.ids = ids
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return YouTube embeds as a string."""
|
||||
if not self.ids:
|
||||
return ""
|
||||
return "\n\n" + ("\n".join([
|
||||
f'<iframe type="text/html" src="https://www.youtube.com/embed/{id}"></iframe>'
|
||||
for id in self.ids
|
||||
]))
|
||||
|
||||
class Audio(HiddenResponse):
|
||||
def __init__(self, data: bytes) -> None:
|
||||
self.data = data
|
||||
|
||||
def to_string(self) -> str:
|
||||
data_base64 = base64.b64encode(self.data).decode()
|
||||
return f"data:audio/mpeg;base64,{data_base64}"
|
||||
|
||||
class BaseConversation(ResponseType):
|
||||
def __str__(self) -> str:
|
||||
"""Return an empty string by default."""
|
||||
return ""
|
||||
|
||||
class JsonConversation(BaseConversation, JsonMixin):
|
||||
pass
|
||||
|
||||
class SynthesizeData(HiddenResponse, JsonMixin):
|
||||
def __init__(self, provider: str, data: dict):
|
||||
def __init__(self, provider: str, data: Dict) -> None:
|
||||
"""Initialize with provider and data."""
|
||||
self.provider = provider
|
||||
self.data = data
|
||||
|
||||
class RequestLogin(HiddenResponse):
|
||||
def __init__(self, label: str, login_url: str) -> None:
|
||||
"""Initialize with label and login URL."""
|
||||
self.label = label
|
||||
self.login_url = login_url
|
||||
|
||||
def to_string(self) -> str:
|
||||
"""Return formatted login link as a string."""
|
||||
return format_link(self.login_url, f"[Login to {self.label}]") + "\n\n"
|
||||
|
||||
class ImageResponse(ResponseType):
|
||||
def __init__(
|
||||
self,
|
||||
images: Union[str, list],
|
||||
images: Union[str, List[str]],
|
||||
alt: str,
|
||||
options: dict = {}
|
||||
):
|
||||
options: Dict = {}
|
||||
) -> None:
|
||||
"""Initialize with images, alt text, and options."""
|
||||
self.images = images
|
||||
self.alt = alt
|
||||
self.options = options
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return images as markdown."""
|
||||
return format_images_markdown(self.images, self.alt, self.get("preview"))
|
||||
|
||||
def get(self, key: str):
|
||||
def get(self, key: str) -> any:
|
||||
"""Get an option value by key."""
|
||||
return self.options.get(key)
|
||||
|
||||
def get_list(self) -> list[str]:
|
||||
def get_list(self) -> List[str]:
|
||||
"""Return images as a list."""
|
||||
return [self.images] if isinstance(self.images, str) else self.images
|
||||
|
||||
class ImagePreview(ImageResponse):
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
"""Return an empty string for preview."""
|
||||
return ""
|
||||
|
||||
def to_string(self):
|
||||
def to_string(self) -> str:
|
||||
"""Return images as markdown."""
|
||||
return super().__str__()
|
||||
|
||||
class PreviewResponse(HiddenResponse):
|
||||
def __init__(self, data: str):
|
||||
def __init__(self, data: str) -> None:
|
||||
"""Initialize with data."""
|
||||
self.data = data
|
||||
|
||||
def to_string(self):
|
||||
def to_string(self) -> str:
|
||||
"""Return data as a string."""
|
||||
return self.data
|
||||
|
||||
class Parameters(ResponseType, JsonMixin):
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
"""Return an empty string."""
|
||||
return ""
|
||||
|
||||
class ProviderInfo(JsonMixin, HiddenResponse):
|
||||
pass
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import json
|
|||
import asyncio
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional, Callable, AsyncIterator
|
||||
from typing import Optional, Callable, AsyncIterator, Dict, Any, Tuple, List, Union
|
||||
|
||||
from ..typing import Messages
|
||||
from ..providers.helper import filter_none
|
||||
|
|
@ -17,176 +17,289 @@ from .web_search import do_search, get_search_message
|
|||
from .files import read_bucket, get_bucket_dir
|
||||
from .. import debug
|
||||
|
||||
# Constants
|
||||
BUCKET_INSTRUCTIONS = """
|
||||
Instruction: Make sure to add the sources of cites using [[domain]](Url) notation after the reference. Example: [[a-z0-9.]](http://example.com)
|
||||
"""
|
||||
|
||||
def validate_arguments(data: dict) -> dict:
|
||||
if "arguments" in data:
|
||||
if isinstance(data["arguments"], str):
|
||||
data["arguments"] = json.loads(data["arguments"])
|
||||
if not isinstance(data["arguments"], dict):
|
||||
raise ValueError("Tool function arguments must be a dictionary or a json string")
|
||||
TOOL_NAMES = {
|
||||
"SEARCH": "search_tool",
|
||||
"CONTINUE": "continue_tool",
|
||||
"BUCKET": "bucket_tool"
|
||||
}
|
||||
|
||||
class ToolHandler:
|
||||
"""Handles processing of different tool types"""
|
||||
|
||||
@staticmethod
|
||||
def validate_arguments(data: dict) -> dict:
|
||||
"""Validate and parse tool arguments"""
|
||||
if "arguments" in data:
|
||||
if isinstance(data["arguments"], str):
|
||||
data["arguments"] = json.loads(data["arguments"])
|
||||
if not isinstance(data["arguments"], dict):
|
||||
raise ValueError("Tool function arguments must be a dictionary or a json string")
|
||||
else:
|
||||
return filter_none(**data["arguments"])
|
||||
else:
|
||||
return filter_none(**data["arguments"])
|
||||
else:
|
||||
return {}
|
||||
|
||||
def get_api_key_file(cls) -> Path:
|
||||
return Path(get_cookies_dir()) / f"api_key_{cls.parent if hasattr(cls, 'parent') else cls.__name__}.json"
|
||||
|
||||
async def async_iter_run_tools(provider: ProviderType, model: str, messages, tool_calls: Optional[list] = None, **kwargs):
|
||||
# Handle web_search from kwargs
|
||||
web_search = kwargs.get('web_search')
|
||||
sources = None
|
||||
if web_search:
|
||||
try:
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
async def process_search_tool(messages: Messages, tool: dict) -> Messages:
|
||||
"""Process search tool requests"""
|
||||
messages = messages.copy()
|
||||
args = ToolHandler.validate_arguments(tool["function"])
|
||||
messages[-1]["content"] = await do_search(
|
||||
messages[-1]["content"],
|
||||
**args
|
||||
)
|
||||
return messages
|
||||
|
||||
@staticmethod
|
||||
def process_continue_tool(messages: Messages, tool: dict, provider: Any) -> Tuple[Messages, Dict[str, Any]]:
|
||||
"""Process continue tool requests"""
|
||||
kwargs = {}
|
||||
if provider not in ("OpenaiAccount", "HuggingFace"):
|
||||
messages = messages.copy()
|
||||
web_search = web_search if isinstance(web_search, str) and web_search != "true" else None
|
||||
messages[-1]["content"], sources = await do_search(messages[-1]["content"], web_search)
|
||||
except Exception as e:
|
||||
debug.error(f"Couldn't do web search: {e.__class__.__name__}: {e}")
|
||||
# Keep web_search in kwargs for provider native support
|
||||
pass
|
||||
last_line = messages[-1]["content"].strip().splitlines()[-1]
|
||||
content = f"Carry on from this point:\n{last_line}"
|
||||
messages.append({"role": "user", "content": content})
|
||||
else:
|
||||
# Enable provider native continue
|
||||
kwargs["action"] = "continue"
|
||||
return messages, kwargs
|
||||
|
||||
@staticmethod
|
||||
def process_bucket_tool(messages: Messages, tool: dict) -> Messages:
|
||||
"""Process bucket tool requests"""
|
||||
messages = messages.copy()
|
||||
|
||||
def on_bucket(match):
|
||||
return "".join(read_bucket(get_bucket_dir(match.group(1))))
|
||||
|
||||
has_bucket = False
|
||||
for message in messages:
|
||||
if "content" in message and isinstance(message["content"], str):
|
||||
new_message_content = re.sub(r'{"bucket_id":"([^"]*)"}', on_bucket, message["content"])
|
||||
if new_message_content != message["content"]:
|
||||
has_bucket = True
|
||||
message["content"] = new_message_content
|
||||
|
||||
if has_bucket and isinstance(messages[-1]["content"], str):
|
||||
if "\nSource: " in messages[-1]["content"]:
|
||||
if isinstance(messages[-1]["content"], dict):
|
||||
messages[-1]["content"]["content"] += BUCKET_INSTRUCTIONS
|
||||
else:
|
||||
messages[-1]["content"] += BUCKET_INSTRUCTIONS
|
||||
|
||||
return messages
|
||||
|
||||
# Read api_key from config file
|
||||
if getattr(provider, "needs_auth", False) and "api_key" not in kwargs:
|
||||
auth_file = get_api_key_file(provider)
|
||||
if auth_file.exists():
|
||||
with auth_file.open("r") as f:
|
||||
auth_result = json.load(f)
|
||||
if "api_key" in auth_result:
|
||||
kwargs["api_key"] = auth_result["api_key"]
|
||||
|
||||
if tool_calls is not None:
|
||||
@staticmethod
|
||||
async def process_tools(messages: Messages, tool_calls: List[dict], provider: Any) -> Tuple[Messages, Dict[str, Any]]:
|
||||
"""Process all tool calls and return updated messages and kwargs"""
|
||||
if not tool_calls:
|
||||
return messages, {}
|
||||
|
||||
extra_kwargs = {}
|
||||
messages = messages.copy()
|
||||
|
||||
for tool in tool_calls:
|
||||
if tool.get("type") == "function":
|
||||
if tool.get("function", {}).get("name") == "search_tool":
|
||||
tool["function"]["arguments"] = validate_arguments(tool["function"])
|
||||
messages = messages.copy()
|
||||
messages[-1]["content"] = await do_search(
|
||||
messages[-1]["content"],
|
||||
**tool["function"]["arguments"]
|
||||
)
|
||||
elif tool.get("function", {}).get("name") == "continue":
|
||||
last_line = messages[-1]["content"].strip().splitlines()[-1]
|
||||
content = f"Carry on from this point:\n{last_line}"
|
||||
messages.append({"role": "user", "content": content})
|
||||
elif tool.get("function", {}).get("name") == "bucket_tool":
|
||||
def on_bucket(match):
|
||||
return "".join(read_bucket(get_bucket_dir(match.group(1))))
|
||||
has_bucket = False
|
||||
for message in messages:
|
||||
if "content" in message and isinstance(message["content"], str):
|
||||
new_message_content = re.sub(r'{"bucket_id":"([^"]*)"}', on_bucket, message["content"])
|
||||
if new_message_content != message["content"]:
|
||||
has_bucket = True
|
||||
message["content"] = new_message_content
|
||||
if has_bucket and isinstance(messages[-1]["content"], str):
|
||||
if "\nSource: " in messages[-1]["content"]:
|
||||
messages[-1]["content"] += BUCKET_INSTRUCTIONS
|
||||
if tool.get("type") != "function":
|
||||
continue
|
||||
|
||||
function_name = tool.get("function", {}).get("name")
|
||||
|
||||
if function_name == TOOL_NAMES["SEARCH"]:
|
||||
messages = await ToolHandler.process_search_tool(messages, tool)
|
||||
|
||||
elif function_name == TOOL_NAMES["CONTINUE"]:
|
||||
messages, kwargs = ToolHandler.process_continue_tool(messages, tool, provider)
|
||||
extra_kwargs.update(kwargs)
|
||||
|
||||
elif function_name == TOOL_NAMES["BUCKET"]:
|
||||
messages = ToolHandler.process_bucket_tool(messages, tool)
|
||||
|
||||
return messages, extra_kwargs
|
||||
|
||||
|
||||
class AuthManager:
|
||||
"""Handles API key management"""
|
||||
|
||||
@staticmethod
|
||||
def get_api_key_file(cls) -> Path:
|
||||
"""Get the path to the API key file for a provider"""
|
||||
return Path(get_cookies_dir()) / f"api_key_{cls.parent if hasattr(cls, 'parent') else cls.__name__}.json"
|
||||
|
||||
@staticmethod
|
||||
def load_api_key(provider: Any) -> Optional[str]:
|
||||
"""Load API key from config file if needed"""
|
||||
if not getattr(provider, "needs_auth", False):
|
||||
return None
|
||||
|
||||
auth_file = AuthManager.get_api_key_file(provider)
|
||||
try:
|
||||
if auth_file.exists():
|
||||
with auth_file.open("r") as f:
|
||||
auth_result = json.load(f)
|
||||
return auth_result.get("api_key")
|
||||
except (json.JSONDecodeError, PermissionError, FileNotFoundError) as e:
|
||||
debug.error(f"Failed to load API key: {e.__class__.__name__}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
class ThinkingProcessor:
|
||||
"""Processes thinking chunks"""
|
||||
|
||||
@staticmethod
|
||||
def process_thinking_chunk(chunk: str, start_time: float = 0) -> Tuple[float, List[Union[str, Reasoning]]]:
|
||||
"""Process a thinking chunk and return timing and results."""
|
||||
results = []
|
||||
|
||||
# Handle non-thinking chunk
|
||||
if not start_time and "<think>" not in chunk:
|
||||
return 0, [chunk]
|
||||
|
||||
# Handle thinking start
|
||||
if "<think>" in chunk and "`<think>`" not in chunk:
|
||||
before_think, *after = chunk.split("<think>", 1)
|
||||
|
||||
if before_think:
|
||||
results.append(before_think)
|
||||
|
||||
results.append(Reasoning(status="🤔 Is thinking...", is_thinking="<think>"))
|
||||
|
||||
if after and after[0]:
|
||||
results.append(Reasoning(after[0]))
|
||||
|
||||
return time.time(), results
|
||||
|
||||
# Handle thinking end
|
||||
if "</think>" in chunk:
|
||||
before_end, *after = chunk.split("</think>", 1)
|
||||
|
||||
if before_end:
|
||||
results.append(Reasoning(before_end))
|
||||
|
||||
thinking_duration = time.time() - start_time if start_time > 0 else 0
|
||||
|
||||
status = f"Thought for {thinking_duration:.2f}s" if thinking_duration > 1 else "Finished"
|
||||
results.append(Reasoning(status=status, is_thinking="</think>"))
|
||||
|
||||
if after and after[0]:
|
||||
results.append(after[0])
|
||||
|
||||
return 0, results
|
||||
|
||||
# Handle ongoing thinking
|
||||
if start_time:
|
||||
return start_time, [Reasoning(chunk)]
|
||||
|
||||
return start_time, [chunk]
|
||||
|
||||
|
||||
async def perform_web_search(messages: Messages, web_search_param: Any) -> Tuple[Messages, Optional[Sources]]:
|
||||
"""Perform web search and return updated messages and sources"""
|
||||
messages = messages.copy()
|
||||
sources = None
|
||||
|
||||
if not web_search_param:
|
||||
return messages, sources
|
||||
|
||||
try:
|
||||
search_query = web_search_param if isinstance(web_search_param, str) and web_search_param != "true" else None
|
||||
messages[-1]["content"], sources = await do_search(messages[-1]["content"], search_query)
|
||||
except Exception as e:
|
||||
debug.error(f"Couldn't do web search: {e.__class__.__name__}: {e}")
|
||||
|
||||
return messages, sources
|
||||
|
||||
|
||||
async def async_iter_run_tools(
|
||||
provider: ProviderType,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
tool_calls: Optional[List[dict]] = None,
|
||||
**kwargs
|
||||
) -> AsyncIterator:
|
||||
"""Asynchronously run tools and yield results"""
|
||||
# Process web search
|
||||
sources = None
|
||||
web_search = kwargs.get('web_search')
|
||||
if web_search:
|
||||
messages, sources = await perform_web_search(messages, web_search)
|
||||
|
||||
# Get API key if needed
|
||||
api_key = AuthManager.load_api_key(provider)
|
||||
if api_key and "api_key" not in kwargs:
|
||||
kwargs["api_key"] = api_key
|
||||
|
||||
# Process tool calls
|
||||
if tool_calls:
|
||||
messages, extra_kwargs = await ToolHandler.process_tools(messages, tool_calls, provider)
|
||||
kwargs.update(extra_kwargs)
|
||||
|
||||
# Generate response
|
||||
create_function = provider.get_async_create_function()
|
||||
response = to_async_iterator(create_function(model=model, messages=messages, **kwargs))
|
||||
|
||||
async for chunk in response:
|
||||
yield chunk
|
||||
if sources is not None:
|
||||
|
||||
# Yield sources if available
|
||||
if sources:
|
||||
yield sources
|
||||
|
||||
def process_thinking_chunk(chunk: str, start_time: float = 0) -> tuple[float, list]:
|
||||
"""Process a thinking chunk and return timing and results."""
|
||||
results = []
|
||||
|
||||
# Handle non-thinking chunk
|
||||
if not start_time and "<think>" not in chunk:
|
||||
return 0, [chunk]
|
||||
|
||||
# Handle thinking start
|
||||
if "<think>" in chunk and not "`<think>`" in chunk:
|
||||
before_think, *after = chunk.split("<think>", 1)
|
||||
|
||||
if before_think:
|
||||
results.append(before_think)
|
||||
|
||||
results.append(Reasoning(status="🤔 Is thinking...", is_thinking="<think>"))
|
||||
|
||||
if after and after[0]:
|
||||
results.append(Reasoning(after[0]))
|
||||
|
||||
return time.time(), results
|
||||
|
||||
# Handle thinking end
|
||||
if "</think>" in chunk:
|
||||
before_end, *after = chunk.split("</think>", 1)
|
||||
|
||||
if before_end:
|
||||
results.append(Reasoning(before_end))
|
||||
|
||||
thinking_duration = time.time() - start_time if start_time > 0 else 0
|
||||
|
||||
status = f"Thought for {thinking_duration:.2f}s" if thinking_duration > 1 else "Finished"
|
||||
results.append(Reasoning(status=status, is_thinking="</think>"))
|
||||
|
||||
if after and after[0]:
|
||||
results.append(after[0])
|
||||
|
||||
return 0, results
|
||||
|
||||
# Handle ongoing thinking
|
||||
if start_time:
|
||||
return start_time, [Reasoning(chunk)]
|
||||
|
||||
return start_time, [chunk]
|
||||
|
||||
|
||||
def iter_run_tools(
|
||||
iter_callback: Callable,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
provider: Optional[str] = None,
|
||||
tool_calls: Optional[list] = None,
|
||||
tool_calls: Optional[List[dict]] = None,
|
||||
**kwargs
|
||||
) -> AsyncIterator:
|
||||
# Handle web_search from kwargs
|
||||
"""Run tools synchronously and yield results"""
|
||||
# Process web search
|
||||
web_search = kwargs.get('web_search')
|
||||
sources = None
|
||||
|
||||
if web_search:
|
||||
try:
|
||||
messages = messages.copy()
|
||||
web_search = web_search if isinstance(web_search, str) and web_search != "true" else None
|
||||
messages[-1]["content"], sources = asyncio.run(do_search(messages[-1]["content"], web_search))
|
||||
search_query = web_search if isinstance(web_search, str) and web_search != "true" else None
|
||||
# Note: Using asyncio.run inside sync function is not ideal, but maintaining original pattern
|
||||
messages[-1]["content"], sources = asyncio.run(do_search(messages[-1]["content"], search_query))
|
||||
except Exception as e:
|
||||
debug.error(f"Couldn't do web search: {e.__class__.__name__}: {e}")
|
||||
# Keep web_search in kwargs for provider native support
|
||||
pass
|
||||
|
||||
# Read api_key from config file
|
||||
if provider is not None and provider.needs_auth and "api_key" not in kwargs:
|
||||
auth_file = get_api_key_file(provider)
|
||||
if auth_file.exists():
|
||||
with auth_file.open("r") as f:
|
||||
auth_result = json.load(f)
|
||||
if "api_key" in auth_result:
|
||||
kwargs["api_key"] = auth_result["api_key"]
|
||||
|
||||
if tool_calls is not None:
|
||||
|
||||
# Get API key if needed
|
||||
if provider is not None and getattr(provider, "needs_auth", False) and "api_key" not in kwargs:
|
||||
api_key = AuthManager.load_api_key(provider)
|
||||
if api_key:
|
||||
kwargs["api_key"] = api_key
|
||||
|
||||
# Process tool calls
|
||||
if tool_calls:
|
||||
for tool in tool_calls:
|
||||
if tool.get("type") == "function":
|
||||
if tool.get("function", {}).get("name") == "search_tool":
|
||||
tool["function"]["arguments"] = validate_arguments(tool["function"])
|
||||
function_name = tool.get("function", {}).get("name")
|
||||
|
||||
if function_name == TOOL_NAMES["SEARCH"]:
|
||||
tool["function"]["arguments"] = ToolHandler.validate_arguments(tool["function"])
|
||||
messages[-1]["content"] = get_search_message(
|
||||
messages[-1]["content"],
|
||||
raise_search_exceptions=True,
|
||||
**tool["function"]["arguments"]
|
||||
)
|
||||
elif tool.get("function", {}).get("name") == "continue_tool":
|
||||
elif function_name == TOOL_NAMES["CONTINUE"]:
|
||||
if provider not in ("OpenaiAccount", "HuggingFace"):
|
||||
last_line = messages[-1]["content"].strip().splitlines()[-1]
|
||||
content = f"Carry on from this point:\n{last_line}"
|
||||
messages.append({"role": "user", "content": content})
|
||||
else:
|
||||
# Enable provider native continue
|
||||
if "action" not in kwargs:
|
||||
kwargs["action"] = "continue"
|
||||
elif tool.get("function", {}).get("name") == "bucket_tool":
|
||||
kwargs["action"] = "continue"
|
||||
elif function_name == TOOL_NAMES["BUCKET"]:
|
||||
def on_bucket(match):
|
||||
return "".join(read_bucket(get_bucket_dir(match.group(1))))
|
||||
has_bucket = False
|
||||
|
|
@ -199,8 +312,11 @@ def iter_run_tools(
|
|||
if has_bucket and isinstance(messages[-1]["content"], str):
|
||||
if "\nSource: " in messages[-1]["content"]:
|
||||
messages[-1]["content"] = messages[-1]["content"]["content"] + BUCKET_INSTRUCTIONS
|
||||
|
||||
|
||||
# Process response chunks
|
||||
thinking_start_time = 0
|
||||
processor = ThinkingProcessor()
|
||||
|
||||
for chunk in iter_callback(model=model, messages=messages, provider=provider, **kwargs):
|
||||
if isinstance(chunk, FinishReason):
|
||||
if sources is not None:
|
||||
|
|
@ -214,10 +330,10 @@ def iter_run_tools(
|
|||
yield chunk
|
||||
continue
|
||||
|
||||
thinking_start_time, results = process_thinking_chunk(chunk, thinking_start_time)
|
||||
thinking_start_time, results = processor.process_thinking_chunk(chunk, thinking_start_time)
|
||||
|
||||
for result in results:
|
||||
yield result
|
||||
|
||||
if sources is not None:
|
||||
yield sources
|
||||
yield sources
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue