Update (docs/providers-and-models.md g4f/models.py g4f/gui/client/index.html g4f/Provider/Cloudflare.py)

This commit is contained in:
kqlio67 2024-11-06 13:56:33 +02:00
parent 3da7a14a72
commit 8e1a544d55
4 changed files with 52 additions and 110 deletions

View file

@ -38,7 +38,7 @@ This document provides an overview of various AI providers and models, including
|[chatgpt4o.one](https://chatgpt4o.one)|`g4f.Provider.Chatgpt4o`|✔|❌|❌|❌|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chatgptfree.ai](https://chatgptfree.ai)|`g4f.Provider.ChatgptFree`|`gpt-4o-mini`|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌|
|[chatify-ai.vercel.app](https://chatify-ai.vercel.app)|`g4f.Provider.ChatifyAI`|`llama-3.1-8b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`german-7b, gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-11b, llama-3.2-1b, llama-3.2-3b, mistral-7b, openchat-3.5, phi-2, qwen-1.5-0.5b, qwen-1.5-1.8b, qwen-1.5-14b, qwen-1.5-7b, tinyllama-1.1b, cybertron-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, phi-2, qwen-1.5-0-5b, qwen-1.5-8b, qwen-1.5-14b, qwen-1.5-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|

View file

@ -5,11 +5,14 @@ import json
import uuid
import cloudscraper
from typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
label = "Cloudflare AI"
url = "https://playground.ai.cloudflare.com"
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
working = True
@ -17,39 +20,27 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
default_model = '@cf/meta/llama-3.1-8b-instruct'
default_model = '@cf/meta/llama-3.1-8b-instruct-awq'
models = [
'@cf/deepseek-ai/deepseek-math-7b-instruct', # Specific answer
'@cf/thebloke/discolm-german-7b-v1-awq',
'@cf/tiiuae/falcon-7b-instruct', # Specific answer
'@hf/google/gemma-7b-it',
'@cf/meta/llama-2-7b-chat-fp16',
'@cf/meta/llama-2-7b-chat-int8',
'@cf/meta/llama-3-8b-instruct',
'@cf/meta/llama-3-8b-instruct-awq',
default_model,
'@hf/meta-llama/meta-llama-3-8b-instruct',
'@cf/meta/llama-3.1-8b-instruct-awq',
default_model,
'@cf/meta/llama-3.1-8b-instruct-fp8',
'@cf/meta/llama-3.2-11b-vision-instruct',
'@cf/meta/llama-3.2-1b-instruct',
'@cf/meta/llama-3.2-3b-instruct',
'@cf/mistral/mistral-7b-instruct-v0.1',
'@hf/mistral/mistral-7b-instruct-v0.2',
'@cf/openchat/openchat-3.5-0106',
'@cf/microsoft/phi-2',
'@cf/qwen/qwen1.5-0.5b-chat',
@ -57,57 +48,34 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
'@cf/qwen/qwen1.5-14b-chat-awq',
'@cf/qwen/qwen1.5-7b-chat-awq',
'@cf/defog/sqlcoder-7b-2', # Specific answer
'@cf/tinyllama/tinyllama-1.1b-chat-v1.0',
'@cf/fblgit/una-cybertron-7b-v2-bf16',
'@cf/defog/sqlcoder-7b-2',
]
model_aliases = {
"german-7b-v1": "@cf/thebloke/discolm-german-7b-v1-awq",
#"falcon-7b": "@cf/tiiuae/falcon-7b-instruct",
"gemma-7b": "@hf/google/gemma-7b-it",
"llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16",
"llama-2-7b": "@cf/meta/llama-2-7b-chat-int8",
"llama-3-8b": "@cf/meta/llama-3-8b-instruct",
"llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq",
"llama-3-8b": "@cf/meta/llama-3.1-8b-instruct",
"llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
"llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8",
"llama-3.2-11b": "@cf/meta/llama-3.2-11b-vision-instruct",
"llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct",
"llama-3.2-3b": "@cf/meta/llama-3.2-3b-instruct",
"mistral-7b": "@cf/mistral/mistral-7b-instruct-v0.1",
"mistral-7b": "@hf/mistral/mistral-7b-instruct-v0.2",
"openchat-3.5": "@cf/openchat/openchat-3.5-0106",
"phi-2": "@cf/microsoft/phi-2",
"qwen-1.5-0.5b": "@cf/qwen/qwen1.5-0.5b-chat",
"qwen-1.5-1.8b": "@cf/qwen/qwen1.5-1.8b-chat",
"qwen-1.5-0-5b": "@cf/qwen/qwen1.5-0.5b-chat",
"qwen-1.5-1-8b": "@cf/qwen/qwen1.5-1.8b-chat",
"qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
"qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
"tinyllama-1.1b": "@cf/tinyllama/tinyllama-1.1b-chat-v1.0",
"cybertron-7b": "@cf/fblgit/una-cybertron-7b-v2-bf16",
#"sqlcoder-7b": "@cf/defog/sqlcoder-7b-2",
}
@classmethod
@ -125,8 +93,6 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
max_tokens: str = 2048,
stream: bool = True,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
@ -154,19 +120,17 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
scraper = cloudscraper.create_scraper()
prompt = format_prompt(messages)
data = {
"messages": [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": prompt}
{"role": "user", "content": format_prompt(messages)}
],
"lora": None,
"model": model,
"max_tokens": max_tokens,
"stream": stream
"max_tokens": 2048,
"stream": True
}
max_retries = 3
max_retries = 5
for attempt in range(max_retries):
try:
response = scraper.post(
@ -174,8 +138,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
headers=headers,
cookies=cookies,
json=data,
stream=True,
proxies={'http': proxy, 'https': proxy} if proxy else None
stream=True
)
if response.status_code == 403:
@ -184,29 +147,23 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
skip_tokens = ["</s>", "<s>", "[DONE]", "<|endoftext|>", "<|end|>"]
filtered_response = ""
for line in response.iter_lines():
if line.startswith(b'data: '):
if line == b'data: [DONE]':
break
try:
content = json.loads(line[6:].decode('utf-8'))['response']
yield content
content = json.loads(line[6:].decode('utf-8'))
response_text = content['response']
if not any(token in response_text for token in skip_tokens):
filtered_response += response_text
except Exception:
continue
yield filtered_response.strip()
break
except Exception as e:
if attempt == max_retries - 1:
raise
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> str:
full_response = ""
async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
full_response += response
return full_response

View file

@ -249,7 +249,6 @@
<option value="MetaAI">Meta AI</option>
<option value="DeepInfraChat">DeepInfraChat</option>
<option value="Blackbox">Blackbox</option>
<option value="HuggingChat">HuggingChat</option>
<option value="DDG">DDG</option>
<option value="Pizzagpt">Pizzagpt</option>
<option value="">----</option>

View file

@ -238,13 +238,13 @@ llama_3_2_1b = Model(
llama_3_2_3b = Model(
name = "llama-3.2-3b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Cloudflare, Airforce])
best_provider = IterListProvider([Airforce])
)
llama_3_2_11b = Model(
name = "llama-3.2-11b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Cloudflare, HuggingChat, Airforce, HuggingFace])
best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace])
)
llama_3_2_90b = Model(
@ -284,7 +284,7 @@ llamaguard_3_11b = Model(
mistral_7b = Model(
name = "mistral-7b",
base_provider = "Mistral",
best_provider = IterListProvider([DeepInfraChat, Cloudflare, Airforce, DeepInfra])
best_provider = IterListProvider([DeepInfraChat, Airforce, DeepInfra])
)
mixtral_8x7b = Model(
@ -479,9 +479,9 @@ sparkdesk_v1_1 = Model(
### Qwen ###
# qwen 1
qwen_1_5_0_5b = Model(
name = 'qwen-1.5-0.5b',
# qwen 1_5
qwen_1_5_5b = Model(
name = 'qwen-1.5-5b',
base_provider = 'Qwen',
best_provider = Cloudflare
)
@ -489,13 +489,19 @@ qwen_1_5_0_5b = Model(
qwen_1_5_7b = Model(
name = 'qwen-1.5-7b',
base_provider = 'Qwen',
best_provider = IterListProvider([Cloudflare])
best_provider = Cloudflare
)
qwen_1_5_8b = Model(
name = 'qwen-1.5-8b',
base_provider = 'Qwen',
best_provider = Cloudflare
)
qwen_1_5_14b = Model(
name = 'qwen-1.5-14b',
base_provider = 'Qwen',
best_provider = IterListProvider([FreeChatgpt, Cloudflare])
best_provider = IterListProvider([Cloudflare, FreeChatgpt])
)
# qwen 2
@ -617,12 +623,6 @@ lzlv_70b = Model(
### OpenChat ###
openchat_3_5 = Model(
name = 'openchat-3.5',
base_provider = 'OpenChat',
best_provider = IterListProvider([Cloudflare])
)
openchat_3_6_8b = Model(
name = 'openchat-3.6-8b',
base_provider = 'OpenChat',
@ -673,22 +673,6 @@ sonar_chat = Model(
best_provider = PerplexityLabs
)
### TheBloke ###
german_7b = Model(
name = 'german-7b',
base_provider = 'TheBloke',
best_provider = Cloudflare
)
### Fblgit ###
cybertron_7b = Model(
name = 'cybertron-7b',
base_provider = 'Fblgit',
best_provider = Cloudflare
)
### Nvidia ###
nemotron_70b = Model(
name = 'nemotron-70b',
@ -1024,10 +1008,17 @@ class ModelUtils:
### Qwen ###
'qwen': qwen,
'qwen-1.5-0.5b': qwen_1_5_0_5b,
# qwen-1.5
'qwen-1.5-5b': qwen_1_5_5b,
'qwen-1.5-7b': qwen_1_5_7b,
'qwen-1.5-8b': qwen_1_5_8b,
'qwen-1.5-14b': qwen_1_5_14b,
# qwen-2
'qwen-2-72b': qwen_2_72b,
# qwen-2-5
'qwen-2-5-7b': qwen_2_5_7b,
'qwen-2-5-72b': qwen_2_5_72b,
@ -1073,7 +1064,6 @@ class ModelUtils:
### OpenChat ###
'openchat-3.5': openchat_3_5,
'openchat-3.6-8b': openchat_3_6_8b,
@ -1099,10 +1089,6 @@ class ModelUtils:
'german-7b': german_7b,
### Fblgit ###
'cybertron-7b': cybertron_7b,
### Nvidia ###
'nemotron-70b': nemotron_70b,