Add Llama2 and NoowAi Provider

This commit is contained in:
Heiner Lohaus 2023-10-15 19:10:25 +02:00
parent 9ae3bc4de8
commit c1adfbee8e
8 changed files with 158 additions and 10 deletions

View file

@ -325,12 +325,12 @@ asyncio.run(run_all())
##### Proxy Support:
All providers support specifying a proxy in the create function.
All providers support specifying a proxy in the create functions.
```py
import g4f
response = await g4f.ChatCompletion.create(
response = g4f.ChatCompletion.create(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
proxy="http://host:port",

View file

@ -44,7 +44,7 @@ class ChatForAi(AsyncGeneratorProvider):
**kwargs
},
"botSettings": {},
"prompt": prompt,
"prompt": prompt,
"messages": messages,
"timestamp": timestamp,
"sign": generate_signature(timestamp, prompt, conversation_id)

76
g4f/Provider/Llama2.py Normal file
View file

@ -0,0 +1,76 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
models = {
"7B": {"name": "Llama 2 7B", "version": "d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381", "shortened":"7B"},
"13B": {"name": "Llama 2 13B", "version": "9dff94b1bed5af738655d4a7cbcdcde2bd503aa85c94334fe1f42af7f3dd5ee3", "shortened":"13B"},
"70B": {"name": "Llama 2 70B", "version": "2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf", "shortened":"70B"},
"Llava": {"name": "Llava 13B", "version": "6bc1c7bb0d2a34e413301fee8f7cc728d2d4e75bfab186aa995f63292bda92fc", "shortened":"Llava"}
}
class Llama2(AsyncGeneratorProvider):
url = "https://www.llama2.ai"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "70B"
if model not in models:
raise ValueError(f"Model are not supported: {model}")
version = models[model]["version"]
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "text/plain;charset=UTF-8",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"version": version,
"systemPrompt": kwargs.get("system_message", "You are a helpful assistant."),
"temperature": kwargs.get("temperature", 0.75),
"topP": kwargs.get("top_p", 0.9),
"maxTokens": kwargs.get("max_tokens", 1024),
"image": None
}
started = False
async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if not started:
chunk = chunk.lstrip()
started = True
yield chunk.decode()
def format_prompt(messages: Messages):
messages = [
f"[INST]{message['content']}[/INST]"
if message["role"] == "user"
else message["content"]
for message in messages
]
return "\n".join(messages)

66
g4f/Provider/NoowAi.py Normal file
View file

@ -0,0 +1,66 @@
from __future__ import annotations
import random, string, json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class NoowAi(AsyncGeneratorProvider):
url = "https://noowai.com"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "noowai.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": "d49bc3670c3d858458576d75c8ea0f5d",
"session": "N/A",
"chatId": random_string(),
"contextId": 25,
"messages": messages,
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break
def random_string(length: int = 10):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))

View file

@ -23,9 +23,10 @@ from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .H2o import H2o
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .Myshell import Myshell
from .NoowAi import NoowAi
from .Opchatgpts import Opchatgpts
from .Phind import Phind
from .Vercel import Vercel
@ -82,9 +83,11 @@ class ProviderUtils:
'HuggingChat': HuggingChat,
'Komo': Komo,
'Liaobots': Liaobots,
'Llama2': Llama2,
'Lockchat': Lockchat,
'MikuChat': MikuChat,
'Myshell': Myshell,
'NoowAi': NoowAi,
'Opchatgpts': Opchatgpts,
'OpenAssistant': OpenAssistant,
'OpenaiChat': OpenaiChat,
@ -148,8 +151,10 @@ __all__ = [
'H2o',
'HuggingChat',
'Liaobots',
'Llama2',
'Lockchat',
'Myshell',
'NoowAi',
'Opchatgpts',
'Raycast',
'OpenaiChat',

View file

@ -5,13 +5,12 @@ import uuid
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt
class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
working = False
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@classmethod
@ -86,7 +85,6 @@ class H2o(AsyncGeneratorProvider):
async with session.delete(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
response.raise_for_status()

View file

@ -11,4 +11,5 @@ from .Wuguokai import Wuguokai
from .V50 import V50
from .FastGpt import FastGpt
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
from .Vitalentum import Vitalentum
from .H2o import H2o

View file

@ -16,6 +16,7 @@ from .Provider import (
Yqcloud,
Myshell,
FreeGpt,
NoowAi,
Vercel,
Aichat,
GPTalk,
@ -51,8 +52,9 @@ gpt_35_long = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
AiAsk, Aichat, ChatgptDemo, FreeGpt, GptGo, Liaobots, You,
AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You,
GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts,
NoowAi,
])
)