mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
* docs(docs/providers-and-models.md): Update provider listings and model information * feat(g4f/models.py): update model configurations and expand provider support * fix(g4f/gui/client/static/js/chat.v1.js): correct provider checkbox initialization logic * feat(g4f/Provider/Blackbox.py): update model configurations and premium handling * feat(g4f/Provider/ChatGLM.py): add finish reason handling and update default model * chore(g4f/Provider/DDG.py): Reorder model entries for consistency * feat(g4f/Provider/ImageLabs.py): Update default image model to sdxl-turbo * feat(g4f/Provider/Liaobots.py): update supported model configurations and aliases * feat(g4f/Provider/OIVSCode.py): Update API endpoint and expand model support * fix(g4f/Provider/needs_auth/CablyAI.py): Enforce authentication requirement * Removed the provider (g4f/Provider/BlackboxAPI.py) * fix(g4f/providers/base_provider.py): improve cache validation in AsyncAuthedProvider * Update g4f/models.py * fix(g4f/Provider/Liaobots.py): remove deprecated Gemini model aliases * chore(g4f/models.py): Remove Grok-2 and update Gemini provider configurations * chore(docs/providers-and-models.md): Remove deprecated Grok models from provider listings * New provider added (g4f/Provider/AllenAI.py) * feat(g4f/models.py): Add Ai2 models and update provider references * feat(docs/providers-and-models.md): update providers and models documentation * fix(g4f/models.py): update experimental model provider configuration * fix(g4f/Provider/PollinationsImage.py): Initialize image_models list and update label * fix(g4f/Provider/PollinationsAI.py): Resolve model initialization and alias conflicts * refactor(g4f/Provider/PollinationsAI.py): improve model initialization and error handling * refactor(g4f/Provider/PollinationsImage.py): Improve model synchronization and initialization * Update g4f/Provider/AllenAI.py --------- Co-authored-by: kqlio67 <>
97 lines
3.7 KiB
Python
97 lines
3.7 KiB
Python
from __future__ import annotations
|
|
|
|
import uuid
|
|
import json
|
|
|
|
from aiohttp import ClientSession
|
|
|
|
from ..typing import AsyncResult, Messages
|
|
from ..requests.raise_for_status import raise_for_status
|
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
from ..providers.response import FinishReason
|
|
|
|
class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin):
|
|
url = "https://chatglm.cn"
|
|
api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
|
|
|
|
working = True
|
|
supports_stream = True
|
|
supports_system_message = False
|
|
supports_message_history = False
|
|
|
|
default_model = "glm-4"
|
|
models = [default_model]
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
proxy: str = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
device_id = str(uuid.uuid4()).replace('-', '')
|
|
|
|
headers = {
|
|
'Accept-Language': 'en-US,en;q=0.9',
|
|
'App-Name': 'chatglm',
|
|
'Authorization': 'undefined',
|
|
'Content-Type': 'application/json',
|
|
'Origin': 'https://chatglm.cn',
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
|
|
'X-App-Platform': 'pc',
|
|
'X-App-Version': '0.0.1',
|
|
'X-Device-Id': device_id,
|
|
'Accept': 'text/event-stream'
|
|
}
|
|
|
|
async with ClientSession(headers=headers) as session:
|
|
data = {
|
|
"assistant_id": "65940acff94777010aa6b796",
|
|
"conversation_id": "",
|
|
"meta_data": {
|
|
"if_plus_model": False,
|
|
"is_test": False,
|
|
"input_question_type": "xxxx",
|
|
"channel": "",
|
|
"draft_id": "",
|
|
"quote_log_id": "",
|
|
"platform": "pc"
|
|
},
|
|
"messages": [
|
|
{
|
|
"role": message["role"],
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": message["content"]
|
|
}
|
|
]
|
|
}
|
|
for message in messages
|
|
]
|
|
}
|
|
|
|
yield_text = 0
|
|
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
|
await raise_for_status(response)
|
|
async for chunk in response.content:
|
|
if chunk:
|
|
decoded_chunk = chunk.decode('utf-8')
|
|
if decoded_chunk.startswith('data: '):
|
|
try:
|
|
json_data = json.loads(decoded_chunk[6:])
|
|
parts = json_data.get('parts', [])
|
|
if parts:
|
|
content = parts[0].get('content', [])
|
|
if content:
|
|
text_content = content[0].get('text', '')
|
|
text = text_content[yield_text:]
|
|
if text:
|
|
yield text
|
|
yield_text += len(text)
|
|
# Yield FinishReason when status is 'finish'
|
|
if json_data.get('status') == 'finish':
|
|
yield FinishReason("stop")
|
|
except json.JSONDecodeError:
|
|
pass
|