mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-05 18:20:35 -08:00
Add BAAI_Ling provider for Ling-1T model
Co-authored-by: hlohaus <983577+hlohaus@users.noreply.github.com>
This commit is contained in:
parent
07883bc9f0
commit
c364425250
2 changed files with 106 additions and 0 deletions
104
g4f/Provider/hf_space/BAAI_Ling.py
Normal file
104
g4f/Provider/hf_space/BAAI_Ling.py
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import aiohttp
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...providers.response import JsonConversation, Reasoning
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import format_prompt, get_last_user_message
|
||||
from ... import debug
|
||||
|
||||
class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "BAAI Ling"
|
||||
url = "https://instspace-ling-playground.hf.space"
|
||||
api_endpoint = f"{url}/gradio_api/queue/join"
|
||||
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = False
|
||||
|
||||
default_model = "ling-1t"
|
||||
models = [default_model]
|
||||
model_aliases = {
|
||||
"ling-1t": default_model,
|
||||
"ling": default_model,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
conversation: JsonConversation = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if conversation is None or not hasattr(conversation, 'session_hash'):
|
||||
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', '')[:12])
|
||||
|
||||
prompt = format_prompt(messages) if conversation is None else get_last_user_message(messages)
|
||||
|
||||
headers = {
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en-US,en;q=0.9',
|
||||
'content-type': 'application/json',
|
||||
'origin': cls.url,
|
||||
'referer': f'{cls.url}/',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
|
||||
}
|
||||
|
||||
payload = {
|
||||
"data": [prompt],
|
||||
"event_data": None,
|
||||
"fn_index": 0,
|
||||
"trigger_id": 5,
|
||||
"session_hash": conversation.session_hash
|
||||
}
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
await response.json()
|
||||
|
||||
data_url = f'{cls.url}/gradio_api/queue/data?session_hash={conversation.session_hash}'
|
||||
headers_data = {
|
||||
'accept': 'text/event-stream',
|
||||
'referer': f'{cls.url}/',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
|
||||
}
|
||||
|
||||
async with session.get(data_url, headers=headers_data, proxy=proxy) as response:
|
||||
full_response = ""
|
||||
async for line in response.content:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
try:
|
||||
json_data = json.loads(decoded_line[6:])
|
||||
|
||||
if json_data.get('msg') == 'process_generating':
|
||||
if 'output' in json_data and 'data' in json_data['output']:
|
||||
output_data = json_data['output']['data']
|
||||
if output_data and len(output_data) > 0:
|
||||
text = output_data[0]
|
||||
if isinstance(text, str) and text.startswith(full_response):
|
||||
yield text[len(full_response):]
|
||||
full_response = text
|
||||
elif isinstance(text, str):
|
||||
yield text
|
||||
full_response = text
|
||||
|
||||
elif json_data.get('msg') == 'process_completed':
|
||||
if 'output' in json_data and 'data' in json_data['output']:
|
||||
output_data = json_data['output']['data']
|
||||
if output_data and len(output_data) > 0:
|
||||
final_text = output_data[0]
|
||||
if isinstance(final_text, str) and len(final_text) > len(full_response):
|
||||
yield final_text[len(full_response):]
|
||||
break
|
||||
|
||||
except json.JSONDecodeError:
|
||||
debug.log("Could not parse JSON:", decoded_line)
|
||||
|
|
@ -6,6 +6,7 @@ from ...typing import AsyncResult, Messages, MediaListType
|
|||
from ...errors import ResponseError
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
from .BAAI_Ling import BAAI_Ling
|
||||
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
||||
from .BlackForestLabs_Flux1KontextDev import BlackForestLabs_Flux1KontextDev
|
||||
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
||||
|
|
@ -27,6 +28,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
default_image_model = BlackForestLabs_Flux1Dev.default_model
|
||||
default_vision_model = Microsoft_Phi_4_Multimodal.default_model
|
||||
providers = [
|
||||
BAAI_Ling,
|
||||
BlackForestLabs_Flux1Dev,
|
||||
BlackForestLabs_Flux1KontextDev,
|
||||
CohereForAI_C4AI_Command,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue