mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Fix response parsing: use type "reply" with data.content/reasoningContent, update models
Co-authored-by: hlohaus <983577+hlohaus@users.noreply.github.com>
This commit is contained in:
parent
f57663cbe8
commit
098b2401ea
1 changed files with 15 additions and 23 deletions
|
|
@ -11,7 +11,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
|
class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"""
|
"""
|
||||||
Provider for chat.gradient.network
|
Provider for chat.gradient.network
|
||||||
Supports streaming text generation with various Qwen models.
|
Supports streaming text generation with Qwen and GPT OSS models.
|
||||||
"""
|
"""
|
||||||
label = "Gradient Network"
|
label = "Gradient Network"
|
||||||
url = "https://chat.gradient.network"
|
url = "https://chat.gradient.network"
|
||||||
|
|
@ -23,18 +23,15 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
|
||||||
default_model = "qwen3-235b"
|
default_model = "Qwen3 235B"
|
||||||
models = [
|
models = [
|
||||||
default_model,
|
default_model,
|
||||||
"qwen3-32b",
|
"GPT OSS 120B",
|
||||||
"deepseek-r1-0528",
|
|
||||||
"deepseek-v3-0324",
|
|
||||||
"llama-4-maverick",
|
|
||||||
]
|
]
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"qwen-3-235b": "qwen3-235b",
|
"qwen-3-235b": "Qwen3 235B",
|
||||||
"deepseek-r1": "deepseek-r1-0528",
|
"qwen3-235b": "Qwen3 235B",
|
||||||
"deepseek-v3": "deepseek-v3-0324",
|
"gpt-oss-120b": "GPT OSS 120B",
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
@ -62,7 +59,7 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
|
|
||||||
Yields:
|
Yields:
|
||||||
str: Content chunks from the response
|
str: Content chunks from the response
|
||||||
Reasoning: Thinking content when enable_thinking is True
|
Reasoning: Reasoning content when enable_thinking is True
|
||||||
"""
|
"""
|
||||||
model = cls.get_model(model)
|
model = cls.get_model(model)
|
||||||
|
|
||||||
|
|
@ -101,22 +98,17 @@ class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
data = json.loads(line)
|
data = json.loads(line)
|
||||||
msg_type = data.get("type")
|
msg_type = data.get("type")
|
||||||
|
|
||||||
if msg_type == "text":
|
if msg_type == "reply":
|
||||||
# Regular text content
|
# Response chunks with content or reasoningContent
|
||||||
content = data.get("data")
|
reply_data = data.get("data", {})
|
||||||
|
content = reply_data.get("content")
|
||||||
|
reasoning_content = reply_data.get("reasoningContent")
|
||||||
|
|
||||||
|
if reasoning_content:
|
||||||
|
yield Reasoning(reasoning_content)
|
||||||
if content:
|
if content:
|
||||||
yield content
|
yield content
|
||||||
|
|
||||||
elif msg_type == "thinking":
|
|
||||||
# Thinking/reasoning content
|
|
||||||
content = data.get("data")
|
|
||||||
if content:
|
|
||||||
yield Reasoning(content)
|
|
||||||
|
|
||||||
elif msg_type == "done":
|
|
||||||
# Stream complete
|
|
||||||
break
|
|
||||||
|
|
||||||
elif msg_type in ("clusterInfo", "blockUpdate"):
|
elif msg_type in ("clusterInfo", "blockUpdate"):
|
||||||
# Skip GPU cluster visualization messages
|
# Skip GPU cluster visualization messages
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue