Update (docs/providers-and-models.md g4f/models.py g4f/gui/client/index.html g4f/Provider/Cloudflare.py)

This commit is contained in:
kqlio67 2024-11-06 13:56:33 +02:00
parent 3da7a14a72
commit 8e1a544d55
4 changed files with 52 additions and 110 deletions

View file

@ -38,7 +38,7 @@ This document provides an overview of various AI providers and models, including
|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌| |[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌| |[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chatify-ai.vercel.app](https://chatify-ai.vercel.app)|`g4f.Provider.ChatifyAI`|`llama-3.1-8b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[chatify-ai.vercel.app](https://chatify-ai.vercel.app)|`g4f.Provider.ChatifyAI`|`llama-3.1-8b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`german-7b, gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-11b, llama-3.2-1b, llama-3.2-3b, mistral-7b, openchat-3.5, phi-2, qwen-1.5-0.5b, qwen-1.5-1.8b, qwen-1.5-14b, qwen-1.5-7b, tinyllama-1.1b, cybertron-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, phi-2, qwen-1.5-0-5b, qwen-1.5-8b, qwen-1.5-14b, qwen-1.5-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| |[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| |[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|

View file

@ -5,11 +5,14 @@ import json
import uuid import uuid
import cloudscraper import cloudscraper
from typing import AsyncGenerator from typing import AsyncGenerator
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt from .helper import format_prompt
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
label = "Cloudflare AI"
url = "https://playground.ai.cloudflare.com" url = "https://playground.ai.cloudflare.com"
api_endpoint = "https://playground.ai.cloudflare.com/api/inference" api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
working = True working = True
@ -17,97 +20,62 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True supports_system_message = True
supports_message_history = True supports_message_history = True
default_model = '@cf/meta/llama-3.1-8b-instruct' default_model = '@cf/meta/llama-3.1-8b-instruct-awq'
models = [ models = [
'@cf/deepseek-ai/deepseek-math-7b-instruct', # Specific answer
'@cf/thebloke/discolm-german-7b-v1-awq',
'@cf/tiiuae/falcon-7b-instruct', # Specific answer '@cf/tiiuae/falcon-7b-instruct', # Specific answer
'@hf/google/gemma-7b-it', '@hf/google/gemma-7b-it',
'@cf/meta/llama-2-7b-chat-fp16', '@cf/meta/llama-2-7b-chat-fp16',
'@cf/meta/llama-2-7b-chat-int8', '@cf/meta/llama-2-7b-chat-int8',
'@cf/meta/llama-3-8b-instruct', '@cf/meta/llama-3-8b-instruct',
'@cf/meta/llama-3-8b-instruct-awq', '@cf/meta/llama-3-8b-instruct-awq',
default_model,
'@hf/meta-llama/meta-llama-3-8b-instruct', '@hf/meta-llama/meta-llama-3-8b-instruct',
'@cf/meta/llama-3.1-8b-instruct-awq', default_model,
'@cf/meta/llama-3.1-8b-instruct-fp8', '@cf/meta/llama-3.1-8b-instruct-fp8',
'@cf/meta/llama-3.2-11b-vision-instruct',
'@cf/meta/llama-3.2-1b-instruct',
'@cf/meta/llama-3.2-3b-instruct',
'@cf/mistral/mistral-7b-instruct-v0.1',
'@hf/mistral/mistral-7b-instruct-v0.2',
'@cf/openchat/openchat-3.5-0106', '@cf/meta/llama-3.2-1b-instruct',
'@hf/mistral/mistral-7b-instruct-v0.2',
'@cf/microsoft/phi-2', '@cf/microsoft/phi-2',
'@cf/qwen/qwen1.5-0.5b-chat', '@cf/qwen/qwen1.5-0.5b-chat',
'@cf/qwen/qwen1.5-1.8b-chat', '@cf/qwen/qwen1.5-1.8b-chat',
'@cf/qwen/qwen1.5-14b-chat-awq', '@cf/qwen/qwen1.5-14b-chat-awq',
'@cf/qwen/qwen1.5-7b-chat-awq', '@cf/qwen/qwen1.5-7b-chat-awq',
'@cf/defog/sqlcoder-7b-2', # Specific answer '@cf/defog/sqlcoder-7b-2',
'@cf/tinyllama/tinyllama-1.1b-chat-v1.0',
'@cf/fblgit/una-cybertron-7b-v2-bf16',
] ]
model_aliases = { model_aliases = {
"german-7b-v1": "@cf/thebloke/discolm-german-7b-v1-awq", #"falcon-7b": "@cf/tiiuae/falcon-7b-instruct",
"gemma-7b": "@hf/google/gemma-7b-it", "gemma-7b": "@hf/google/gemma-7b-it",
"llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16", "llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
"llama-2-7b": "@cf/meta/llama-2-7b-chat-int8", "llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
"llama-3-8b": "@cf/meta/llama-3-8b-instruct", "llama-3-8b": "@cf/meta/llama-3-8b-instruct",
"llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq", "llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
"llama-3-8b": "@cf/meta/llama-3.1-8b-instruct",
"llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct", "llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq", "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8", "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
"llama-3.2-11b": "@cf/meta/llama-3.2-11b-vision-instruct",
"llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct", "llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
"llama-3.2-3b": "@cf/meta/llama-3.2-3b-instruct",
"mistral-7b": "@cf/mistral/mistral-7b-instruct-v0.1",
"mistral-7b": "@hf/mistral/mistral-7b-instruct-v0.2",
"openchat-3.5": "@cf/openchat/openchat-3.5-0106",
"phi-2": "@cf/microsoft/phi-2", "phi-2": "@cf/microsoft/phi-2",
"qwen-1.5-0-5b": "@cf/qwen/qwen1.5-0.5b-chat",
"qwen-1.5-0.5b": "@cf/qwen/qwen1.5-0.5b-chat", "qwen-1.5-1-8b": "@cf/qwen/qwen1.5-1.8b-chat",
"qwen-1.5-1.8b": "@cf/qwen/qwen1.5-1.8b-chat",
"qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq", "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
"qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq", "qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
#"sqlcoder-7b": "@cf/defog/sqlcoder-7b-2",
"tinyllama-1.1b": "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
"cybertron-7b": "@cf/fblgit/una-cybertron-7b-v2-bf16",
} }
@classmethod @classmethod
@ -125,8 +93,6 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
max_tokens: str = 2048,
stream: bool = True,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
model = cls.get_model(model) model = cls.get_model(model)
@ -154,19 +120,17 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
scraper = cloudscraper.create_scraper() scraper = cloudscraper.create_scraper()
prompt = format_prompt(messages)
data = { data = {
"messages": [ "messages": [
{"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": format_prompt(messages)}
{"role": "user", "content": prompt}
], ],
"lora": None, "lora": None,
"model": model, "model": model,
"max_tokens": max_tokens, "max_tokens": 2048,
"stream": stream "stream": True
} }
max_retries = 3 max_retries = 5
for attempt in range(max_retries): for attempt in range(max_retries):
try: try:
response = scraper.post( response = scraper.post(
@ -174,8 +138,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
headers=headers, headers=headers,
cookies=cookies, cookies=cookies,
json=data, json=data,
stream=True, stream=True
proxies={'http': proxy, 'https': proxy} if proxy else None
) )
if response.status_code == 403: if response.status_code == 403:
@ -184,29 +147,23 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status() response.raise_for_status()
skip_tokens = ["</s>", "<s>", "[DONE]", "<|endoftext|>", "<|end|>"]
filtered_response = ""
for line in response.iter_lines(): for line in response.iter_lines():
if line.startswith(b'data: '): if line.startswith(b'data: '):
if line == b'data: [DONE]': if line == b'data: [DONE]':
break break
try: try:
content = json.loads(line[6:].decode('utf-8'))['response'] content = json.loads(line[6:].decode('utf-8'))
yield content response_text = content['response']
if not any(token in response_text for token in skip_tokens):
filtered_response += response_text
except Exception: except Exception:
continue continue
yield filtered_response.strip()
break break
except Exception as e: except Exception as e:
if attempt == max_retries - 1: if attempt == max_retries - 1:
raise raise
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> str:
full_response = ""
async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
full_response += response
return full_response

View file

@ -249,7 +249,6 @@
<option value="MetaAI">Meta AI</option> <option value="MetaAI">Meta AI</option>
<option value="DeepInfraChat">DeepInfraChat</option> <option value="DeepInfraChat">DeepInfraChat</option>
<option value="Blackbox">Blackbox</option> <option value="Blackbox">Blackbox</option>
<option value="HuggingChat">HuggingChat</option>
<option value="DDG">DDG</option> <option value="DDG">DDG</option>
<option value="Pizzagpt">Pizzagpt</option> <option value="Pizzagpt">Pizzagpt</option>
<option value="">----</option> <option value="">----</option>

View file

@ -238,13 +238,13 @@ llama_3_2_1b = Model(
llama_3_2_3b = Model( llama_3_2_3b = Model(
name = "llama-3.2-3b", name = "llama-3.2-3b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = IterListProvider([Cloudflare, Airforce]) best_provider = IterListProvider([Airforce])
) )
llama_3_2_11b = Model( llama_3_2_11b = Model(
name = "llama-3.2-11b", name = "llama-3.2-11b",
base_provider = "Meta Llama", base_provider = "Meta Llama",
best_provider = IterListProvider([Cloudflare, HuggingChat, Airforce, HuggingFace]) best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace])
) )
llama_3_2_90b = Model( llama_3_2_90b = Model(
@ -284,7 +284,7 @@ llamaguard_3_11b = Model(
mistral_7b = Model( mistral_7b = Model(
name = "mistral-7b", name = "mistral-7b",
base_provider = "Mistral", base_provider = "Mistral",
best_provider = IterListProvider([DeepInfraChat, Cloudflare, Airforce, DeepInfra]) best_provider = IterListProvider([DeepInfraChat, Airforce, DeepInfra])
) )
mixtral_8x7b = Model( mixtral_8x7b = Model(
@ -479,9 +479,9 @@ sparkdesk_v1_1 = Model(
### Qwen ### ### Qwen ###
# qwen 1 # qwen 1_5
qwen_1_5_0_5b = Model( qwen_1_5_5b = Model(
name = 'qwen-1.5-0.5b', name = 'qwen-1.5-5b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = Cloudflare best_provider = Cloudflare
) )
@ -489,13 +489,19 @@ qwen_1_5_0_5b = Model(
qwen_1_5_7b = Model( qwen_1_5_7b = Model(
name = 'qwen-1.5-7b', name = 'qwen-1.5-7b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([Cloudflare]) best_provider = Cloudflare
)
qwen_1_5_8b = Model(
name = 'qwen-1.5-8b',
base_provider = 'Qwen',
best_provider = Cloudflare
) )
qwen_1_5_14b = Model( qwen_1_5_14b = Model(
name = 'qwen-1.5-14b', name = 'qwen-1.5-14b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([FreeChatgpt, Cloudflare]) best_provider = IterListProvider([Cloudflare, FreeChatgpt])
) )
# qwen 2 # qwen 2
@ -617,12 +623,6 @@ lzlv_70b = Model(
### OpenChat ### ### OpenChat ###
openchat_3_5 = Model(
name = 'openchat-3.5',
base_provider = 'OpenChat',
best_provider = IterListProvider([Cloudflare])
)
openchat_3_6_8b = Model( openchat_3_6_8b = Model(
name = 'openchat-3.6-8b', name = 'openchat-3.6-8b',
base_provider = 'OpenChat', base_provider = 'OpenChat',
@ -673,22 +673,6 @@ sonar_chat = Model(
best_provider = PerplexityLabs best_provider = PerplexityLabs
) )
### TheBloke ###
german_7b = Model(
name = 'german-7b',
base_provider = 'TheBloke',
best_provider = Cloudflare
)
### Fblgit ###
cybertron_7b = Model(
name = 'cybertron-7b',
base_provider = 'Fblgit',
best_provider = Cloudflare
)
### Nvidia ### ### Nvidia ###
nemotron_70b = Model( nemotron_70b = Model(
name = 'nemotron-70b', name = 'nemotron-70b',
@ -1024,10 +1008,17 @@ class ModelUtils:
### Qwen ### ### Qwen ###
'qwen': qwen, 'qwen': qwen,
'qwen-1.5-0.5b': qwen_1_5_0_5b,
# qwen-1.5
'qwen-1.5-5b': qwen_1_5_5b,
'qwen-1.5-7b': qwen_1_5_7b, 'qwen-1.5-7b': qwen_1_5_7b,
'qwen-1.5-8b': qwen_1_5_8b,
'qwen-1.5-14b': qwen_1_5_14b, 'qwen-1.5-14b': qwen_1_5_14b,
# qwen-2
'qwen-2-72b': qwen_2_72b, 'qwen-2-72b': qwen_2_72b,
# qwen-2-5
'qwen-2-5-7b': qwen_2_5_7b, 'qwen-2-5-7b': qwen_2_5_7b,
'qwen-2-5-72b': qwen_2_5_72b, 'qwen-2-5-72b': qwen_2_5_72b,
@ -1073,7 +1064,6 @@ class ModelUtils:
### OpenChat ### ### OpenChat ###
'openchat-3.5': openchat_3_5,
'openchat-3.6-8b': openchat_3_6_8b, 'openchat-3.6-8b': openchat_3_6_8b,
@ -1097,10 +1087,6 @@ class ModelUtils:
### TheBloke ### ### TheBloke ###
'german-7b': german_7b, 'german-7b': german_7b,
### Fblgit ###
'cybertron-7b': cybertron_7b,
### Nvidia ### ### Nvidia ###