mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-26 11:53:07 -08:00
Restored provider (g4f/Provider/nexra/NexraBing.py)
This commit is contained in:
parent
238ecf4856
commit
6a3684a7b2
2 changed files with 73 additions and 76 deletions
|
|
@ -1,95 +1,91 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from aiohttp import ClientSession
|
|
||||||
from aiohttp.client_exceptions import ContentTypeError
|
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
||||||
from ..helper import format_prompt
|
|
||||||
import json
|
import json
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from ...typing import CreateResult, Messages
|
||||||
|
from ..base_provider import ProviderModelMixin, AbstractProvider
|
||||||
|
from ..helper import format_prompt
|
||||||
|
|
||||||
class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
|
class NexraBing(AbstractProvider, ProviderModelMixin):
|
||||||
label = "Nexra Bing"
|
label = "Nexra Bing"
|
||||||
url = "https://nexra.aryahcr.cc/documentation/bing/en"
|
url = "https://nexra.aryahcr.cc/documentation/bing/en"
|
||||||
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
|
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
|
||||||
working = False
|
working = True
|
||||||
supports_stream = False
|
supports_stream = True
|
||||||
|
|
||||||
default_model = 'Bing (Balanced)'
|
default_model = 'Balanced'
|
||||||
models = ['Bing (Balanced)', 'Bing (Creative)', 'Bing (Precise)']
|
models = [default_model, 'Creative', 'Precise']
|
||||||
|
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"gpt-4": "Bing (Balanced)",
|
"gpt-4": "Balanced",
|
||||||
"gpt-4": "Bing (Creative)",
|
"gpt-4": "Creative",
|
||||||
"gpt-4": "Bing (Precise)",
|
"gpt-4": "Precise",
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_model_and_style(cls, model: str) -> tuple[str, str]:
|
def get_model(cls, model: str) -> str:
|
||||||
# Default to the default model if not found
|
if model in cls.models:
|
||||||
model = cls.model_aliases.get(model, model)
|
return model
|
||||||
if model not in cls.models:
|
elif model in cls.model_aliases:
|
||||||
model = cls.default_model
|
return cls.model_aliases[model]
|
||||||
|
else:
|
||||||
# Extract the base model and conversation style
|
return cls.default_model
|
||||||
base_model, conversation_style = model.split(' (')
|
|
||||||
conversation_style = conversation_style.rstrip(')')
|
|
||||||
return base_model, conversation_style
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
def create_completion(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
proxy: str = None,
|
stream: bool,
|
||||||
stream: bool = False,
|
|
||||||
markdown: bool = False,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> CreateResult:
|
||||||
base_model, conversation_style = cls.get_model_and_style(model)
|
model = cls.get_model(model)
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
"origin": cls.url,
|
|
||||||
"referer": f"{cls.url}/chat",
|
|
||||||
}
|
|
||||||
async with ClientSession(headers=headers) as session:
|
|
||||||
prompt = format_prompt(messages)
|
|
||||||
data = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": prompt
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"conversation_style": conversation_style,
|
|
||||||
"markdown": markdown,
|
|
||||||
"stream": stream,
|
|
||||||
"model": base_model
|
|
||||||
}
|
|
||||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
|
||||||
response.raise_for_status()
|
|
||||||
try:
|
|
||||||
# Read the entire response text
|
|
||||||
text_response = await response.text()
|
|
||||||
# Split the response on the separator character
|
|
||||||
segments = text_response.split('\x1e')
|
|
||||||
|
|
||||||
complete_message = ""
|
|
||||||
for segment in segments:
|
|
||||||
if not segment.strip():
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
response_data = json.loads(segment)
|
|
||||||
if response_data.get('message'):
|
|
||||||
complete_message = response_data['message']
|
|
||||||
if response_data.get('finish'):
|
|
||||||
break
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
raise Exception(f"Failed to parse segment: {segment}")
|
|
||||||
|
|
||||||
# Yield the complete message
|
headers = {
|
||||||
yield complete_message
|
'Content-Type': 'application/json'
|
||||||
except ContentTypeError:
|
}
|
||||||
raise Exception("Failed to parse response content type.")
|
|
||||||
|
data = {
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": format_prompt(messages)
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"conversation_style": model,
|
||||||
|
"markdown": False,
|
||||||
|
"stream": stream,
|
||||||
|
"model": "Bing"
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=True)
|
||||||
|
|
||||||
|
return cls.process_response(response)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def process_response(cls, response):
|
||||||
|
if response.status_code != 200:
|
||||||
|
yield f"Error: {response.status_code}"
|
||||||
|
return
|
||||||
|
|
||||||
|
full_message = ""
|
||||||
|
for chunk in response.iter_content(chunk_size=None):
|
||||||
|
if chunk:
|
||||||
|
messages = chunk.decode('utf-8').split('\x1e')
|
||||||
|
for message in messages:
|
||||||
|
try:
|
||||||
|
json_data = json.loads(message)
|
||||||
|
if json_data.get('finish', False):
|
||||||
|
return
|
||||||
|
current_message = json_data.get('message', '')
|
||||||
|
if current_message:
|
||||||
|
new_content = current_message[len(full_message):]
|
||||||
|
if new_content:
|
||||||
|
yield new_content
|
||||||
|
full_message = current_message
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not full_message:
|
||||||
|
yield "No message received"
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,7 @@ from .Provider import (
|
||||||
Liaobots,
|
Liaobots,
|
||||||
MagickPen,
|
MagickPen,
|
||||||
MetaAI,
|
MetaAI,
|
||||||
|
NexraBing,
|
||||||
NexraBlackbox,
|
NexraBlackbox,
|
||||||
NexraChatGPT,
|
NexraChatGPT,
|
||||||
NexraChatGPT4o,
|
NexraChatGPT4o,
|
||||||
|
|
@ -150,7 +151,7 @@ gpt_4_turbo = Model(
|
||||||
gpt_4 = Model(
|
gpt_4 = Model(
|
||||||
name = 'gpt-4',
|
name = 'gpt-4',
|
||||||
base_provider = 'OpenAI',
|
base_provider = 'OpenAI',
|
||||||
best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
|
best_provider = IterListProvider([Chatgpt4Online, Ai4Chat, NexraBing, NexraChatGPT, NexraChatGptV2, NexraChatGptWeb, Airforce, Bing, OpenaiChat, gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider])
|
||||||
)
|
)
|
||||||
|
|
||||||
# o1
|
# o1
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue