Refactor GradientNetwork and ItalyGPT providers; update BAAI_Ling for improved functionality and model handling

This commit is contained in:
hlohaus 2025-11-30 11:20:29 +01:00
parent ed84c2dc6b
commit 1fd9b8d116
4 changed files with 41 additions and 41 deletions

View file

@ -3,7 +3,7 @@ from __future__ import annotations
import json import json
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from ..providers.response import Reasoning from ..providers.response import Reasoning, JsonResponse
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@ -23,7 +23,7 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True supports_system_message = True
supports_message_history = True supports_message_history = True
default_model = "Qwen3 235B" default_model = "GPT OSS 120B"
models = [ models = [
default_model, default_model,
"GPT OSS 120B", "GPT OSS 120B",
@ -40,9 +40,7 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
temperature: float = None, enable_thinking: bool = True,
max_tokens: int = None,
enable_thinking: bool = False,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
""" """
@ -52,8 +50,6 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
model: The model name to use model: The model name to use
messages: List of message dictionaries messages: List of message dictionaries
proxy: Optional proxy URL proxy: Optional proxy URL
temperature: Optional temperature parameter
max_tokens: Optional max tokens parameter
enable_thinking: Enable the thinking/analysis channel (maps to enableThinking in API) enable_thinking: Enable the thinking/analysis channel (maps to enableThinking in API)
**kwargs: Additional arguments **kwargs: Additional arguments
@ -66,24 +62,18 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
headers = { headers = {
"Accept": "application/x-ndjson", "Accept": "application/x-ndjson",
"Content-Type": "application/json", "Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
"Origin": cls.url, "Origin": cls.url,
"Referer": f"{cls.url}/", "Referer": f"{cls.url}/",
} }
payload = { payload = {
"clusterMode": "nvidia" if "GPT OSS" in model else "hybrid",
"model": model, "model": model,
"messages": messages, "messages": messages,
} }
if temperature is not None:
payload["temperature"] = temperature
if max_tokens is not None:
payload["max_tokens"] = max_tokens
if enable_thinking: if enable_thinking:
payload["enableThinking"] = enable_thinking payload["enableThinking"] = enable_thinking
async with StreamSession(headers=headers, proxy=proxy, impersonate="chrome") as session:
async with StreamSession(headers=headers, proxy=proxy) as session:
async with session.post( async with session.post(
cls.api_endpoint, cls.api_endpoint,
json=payload, json=payload,
@ -96,6 +86,7 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
try: try:
data = json.loads(line) data = json.loads(line)
yield JsonResponse.from_dict(data)
msg_type = data.get("type") msg_type = data.get("type")
if msg_type == "reply": if msg_type == "reply":
@ -113,4 +104,4 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
except json.JSONDecodeError: except json.JSONDecodeError:
# Skip non-JSON lines (may be partial data or empty) # Skip non-JSON lines (may be partial data or empty)
continue raise

View file

@ -1,5 +1,6 @@
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from ..requests import DEFAULT_HEADERS
from aiohttp import ClientSession from aiohttp import ClientSession
class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin): class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin):
@ -23,10 +24,10 @@ class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin):
) -> AsyncResult: ) -> AsyncResult:
model = cls.get_model(model) model = cls.get_model(model)
headers = { headers = {
**DEFAULT_HEADERS,
"content-type": "application/json", "content-type": "application/json",
"origin": "https://italygpt.it", "origin": "https://italygpt.it",
"referer": "https://italygpt.it/", "referer": "https://italygpt.it/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"
} }
payload = { payload = {
"messages": messages, "messages": messages,
@ -34,12 +35,12 @@ class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin):
} }
async with ClientSession() as session: async with ClientSession() as session:
async with session.post( async with session.post(
f"{cls.url}/api/chat/", f"{cls.url}/api/chat",
json=payload, json=payload,
headers=headers, headers=headers,
proxy=proxy, proxy=proxy,
) as resp: ) as resp:
resp.raise_for_status() resp.raise_for_status()
async for chunk in resp.content: async for chunk in resp.content.iter_any():
if chunk: if chunk:
yield chunk.decode() yield chunk.decode()

View file

@ -49,6 +49,7 @@ from .DeepInfra import DeepInfra
from .EasyChat import EasyChat from .EasyChat import EasyChat
from .GLM import GLM from .GLM import GLM
from .GradientNetwork import GradientNetwork from .GradientNetwork import GradientNetwork
from .ItalyGPT import ItalyGPT
from .LambdaChat import LambdaChat from .LambdaChat import LambdaChat
from .Mintlify import Mintlify from .Mintlify import Mintlify
from .OIVSCodeSer import OIVSCodeSer2, OIVSCodeSer0501 from .OIVSCodeSer import OIVSCodeSer2, OIVSCodeSer0501

View file

@ -8,12 +8,12 @@ from ...typing import AsyncResult, Messages
from ...providers.response import JsonConversation from ...providers.response import JsonConversation
from ...requests.raise_for_status import raise_for_status from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_last_user_message from ..helper import format_prompt, get_last_user_message, get_system_prompt
from ... import debug from ... import debug
class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin): class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
label = "BAAI Ling" label = "Ling & Ring Playground"
url = "https://instspace-ling-playground.hf.space" url = "https://cafe3310-ling-playground.hf.space"
api_endpoint = f"{url}/gradio_api/queue/join" api_endpoint = f"{url}/gradio_api/queue/join"
working = True working = True
@ -25,7 +25,7 @@ class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = { model_aliases = {
"ling": default_model, "ling": default_model,
} }
models = [default_model] models = ['ling-mini-2.0', 'ling-1t', 'ling-flash-2.0', 'ring-1t', 'ring-flash-2.0', 'ring-mini-2.0']
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@ -40,6 +40,7 @@ class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
if is_new_conversation: if is_new_conversation:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', '')[:12]) conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', '')[:12])
model = cls.get_model(model)
prompt = format_prompt(messages) if is_new_conversation else get_last_user_message(messages) prompt = format_prompt(messages) if is_new_conversation else get_last_user_message(messages)
headers = { headers = {
@ -52,10 +53,21 @@ class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
} }
payload = { payload = {
"data": [prompt], "data": [
prompt,
[
[
None,
"Hello! I'm Ling. Try selecting a scenario and a message example below to get started."
]
],
get_system_prompt(messages),
1,
model
],
"event_data": None, "event_data": None,
"fn_index": 0, "fn_index": 11,
"trigger_id": 5, "trigger_id": 14,
"session_hash": conversation.session_hash "session_hash": conversation.session_hash
} }
@ -79,27 +91,22 @@ class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
if decoded_line.startswith('data: '): if decoded_line.startswith('data: '):
try: try:
json_data = json.loads(decoded_line[6:]) json_data = json.loads(decoded_line[6:])
if json_data.get('msg') == 'process_generating': if json_data.get('msg') == 'process_generating':
if 'output' in json_data and 'data' in json_data['output']: if 'output' in json_data and 'data' in json_data['output']:
output_data = json_data['output']['data'] output_data = json_data['output']['data']
if output_data and len(output_data) > 0: if output_data and len(output_data) > 0:
text = output_data[0] parts = output_data[0][0]
if isinstance(text, str) and text.startswith(full_response): if len(parts) == 2:
yield text[len(full_response):] new_text = output_data[0][1].pop()
full_response = text full_response += new_text
elif isinstance(text, str): yield new_text
yield text if len(parts) > 2:
full_response = text new_text = parts[2]
full_response += new_text
yield new_text
elif json_data.get('msg') == 'process_completed': elif json_data.get('msg') == 'process_completed':
if 'output' in json_data and 'data' in json_data['output']: break
output_data = json_data['output']['data']
if output_data and len(output_data) > 0:
final_text = output_data[0]
if isinstance(final_text, str) and len(final_text) > len(full_response):
yield final_text[len(full_response):]
break
except json.JSONDecodeError: except json.JSONDecodeError:
debug.log("Could not parse JSON:", decoded_line) debug.log("Could not parse JSON:", decoded_line)