Restored provider (g4f/Provider/nexra/NexraBing.py)

This commit is contained in:
kqlio67 2024-10-21 20:01:10 +03:00
parent 238ecf4856
commit 6a3684a7b2
2 changed files with 73 additions and 76 deletions

View file

@ -1,95 +1,91 @@
from __future__ import annotations
from aiohttp import ClientSession
from aiohttp.client_exceptions import ContentTypeError
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
import json
import requests
from ...typing import CreateResult, Messages
from ..base_provider import ProviderModelMixin, AbstractProvider
from ..helper import format_prompt
class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
class NexraBing(AbstractProvider, ProviderModelMixin):
label = "Nexra Bing"
url = "https://nexra.aryahcr.cc/documentation/bing/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
working = False
supports_stream = False
working = True
supports_stream = True
default_model = 'Bing (Balanced)'
models = ['Bing (Balanced)', 'Bing (Creative)', 'Bing (Precise)']
default_model = 'Balanced'
models = [default_model, 'Creative', 'Precise']
model_aliases = {
"gpt-4": "Bing (Balanced)",
"gpt-4": "Bing (Creative)",
"gpt-4": "Bing (Precise)",
"gpt-4": "Balanced",
"gpt-4": "Creative",
"gpt-4": "Precise",
}
@classmethod
def get_model_and_style(cls, model: str) -> tuple[str, str]:
# Default to the default model if not found
model = cls.model_aliases.get(model, model)
if model not in cls.models:
model = cls.default_model
# Extract the base model and conversation style
base_model, conversation_style = model.split(' (')
conversation_style = conversation_style.rstrip(')')
return base_model, conversation_style
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
def create_completion(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
markdown: bool = False,
stream: bool,
**kwargs
) -> AsyncResult:
base_model, conversation_style = cls.get_model_and_style(model)
headers = {
"Content-Type": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/chat",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"messages": [
{
"role": "user",
"content": prompt
}
],
"conversation_style": conversation_style,
"markdown": markdown,
"stream": stream,
"model": base_model
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
try:
# Read the entire response text
text_response = await response.text()
# Split the response on the separator character
segments = text_response.split('\x1e')
complete_message = ""
for segment in segments:
if not segment.strip():
continue
try:
response_data = json.loads(segment)
if response_data.get('message'):
complete_message = response_data['message']
if response_data.get('finish'):
break
except json.JSONDecodeError:
raise Exception(f"Failed to parse segment: {segment}")
) -> CreateResult:
model = cls.get_model(model)
# Yield the complete message
yield complete_message
except ContentTypeError:
raise Exception("Failed to parse response content type.")
headers = {
'Content-Type': 'application/json'
}
data = {
"messages": [
{
"role": "user",
"content": format_prompt(messages)
}
],
"conversation_style": model,
"markdown": False,
"stream": stream,
"model": "Bing"
}
response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=True)
return cls.process_response(response)
@classmethod
def process_response(cls, response):
if response.status_code != 200:
yield f"Error: {response.status_code}"
return
full_message = ""
for chunk in response.iter_content(chunk_size=None):
if chunk:
messages = chunk.decode('utf-8').split('\x1e')
for message in messages:
try:
json_data = json.loads(message)
if json_data.get('finish', False):
return
current_message = json_data.get('message', '')
if current_message:
new_content = current_message[len(full_message):]
if new_content:
yield new_content
full_message = current_message
except json.JSONDecodeError:
continue
if not full_message:
yield "No message received"

View file

@ -39,6 +39,7 @@ from .Provider import (
Liaobots,
MagickPen,
MetaAI,
NexraBing,
NexraBlackbox,
NexraChatGPT,
NexraChatGPT4o,
@ -150,7 +151,7 @@ gpt_4_turbo = Model(
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'OpenAI',
best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraBing, NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
)
# o1