mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 10:40:43 -08:00
Fix LMAreana provider
This commit is contained in:
parent
5bacb669b2
commit
213e04bae7
5 changed files with 103 additions and 1054 deletions
|
|
@ -4,16 +4,18 @@ from typing import Optional
|
|||
from functools import partial
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from pydantic_ai.models import Model, KnownModelName, infer_model
|
||||
from pydantic_ai.models.openai import OpenAIModel, OpenAISystemPromptRole
|
||||
from pydantic_ai import ModelResponsePart, ThinkingPart, ToolCallPart
|
||||
from pydantic_ai.models import Model, ModelResponse, KnownModelName, infer_model
|
||||
from pydantic_ai.models.openai import OpenAIChatModel, UnexpectedModelBehavior
|
||||
from pydantic_ai.models.openai import OpenAISystemPromptRole, _CHAT_FINISH_REASON_MAP, _map_usage, _now_utc, number_to_datetime, split_content_into_text_and_thinking, replace
|
||||
|
||||
import pydantic_ai.models.openai
|
||||
pydantic_ai.models.openai.NOT_GIVEN = None
|
||||
|
||||
from ..client import AsyncClient
|
||||
from ..client import AsyncClient, ChatCompletion
|
||||
|
||||
@dataclass(init=False)
|
||||
class AIModel(OpenAIModel):
|
||||
class AIModel(OpenAIChatModel):
|
||||
"""A model that uses the G4F API."""
|
||||
|
||||
client: AsyncClient = field(repr=False)
|
||||
|
|
@ -53,6 +55,61 @@ class AIModel(OpenAIModel):
|
|||
if self._provider:
|
||||
return f'g4f:{self._provider}:{self._model_name}'
|
||||
return f'g4f:{self._model_name}'
|
||||
|
||||
def _process_response(self, response: ChatCompletion | str) -> ModelResponse:
|
||||
"""Process a non-streamed response, and prepare a message to return."""
|
||||
# Although the OpenAI SDK claims to return a Pydantic model (`ChatCompletion`) from the chat completions function:
|
||||
# * it hasn't actually performed validation (presumably they're creating the model with `model_construct` or something?!)
|
||||
# * if the endpoint returns plain text, the return type is a string
|
||||
# Thus we validate it fully here.
|
||||
if not isinstance(response, ChatCompletion):
|
||||
raise UnexpectedModelBehavior('Invalid response from OpenAI chat completions endpoint, expected JSON data')
|
||||
|
||||
if response.created:
|
||||
timestamp = number_to_datetime(response.created)
|
||||
else:
|
||||
timestamp = _now_utc()
|
||||
response.created = int(timestamp.timestamp())
|
||||
|
||||
# Workaround for local Ollama which sometimes returns a `None` finish reason.
|
||||
if response.choices and (choice := response.choices[0]) and choice.finish_reason is None: # pyright: ignore[reportUnnecessaryComparison]
|
||||
choice.finish_reason = 'stop'
|
||||
|
||||
choice = response.choices[0]
|
||||
items: list[ModelResponsePart] = []
|
||||
|
||||
# The `reasoning` field is only present in gpt-oss via Ollama and OpenRouter.
|
||||
# - https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot#chat-completions-api
|
||||
# - https://openrouter.ai/docs/use-cases/reasoning-tokens#basic-usage-with-reasoning-tokens
|
||||
if reasoning := getattr(choice.message, 'reasoning', None):
|
||||
items.append(ThinkingPart(id='reasoning', content=reasoning, provider_name=self.system))
|
||||
|
||||
# NOTE: We don't currently handle OpenRouter `reasoning_details`:
|
||||
# - https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks
|
||||
# If you need this, please file an issue.
|
||||
|
||||
if choice.message.content:
|
||||
items.extend(
|
||||
(replace(part, id='content', provider_name=self.system) if isinstance(part, ThinkingPart) else part)
|
||||
for part in split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags)
|
||||
)
|
||||
if choice.message.tool_calls is not None:
|
||||
for c in choice.message.tool_calls:
|
||||
items.append(ToolCallPart(c.get("function").get("name"), c.get("function").get("arguments"), tool_call_id=c.get("id")))
|
||||
|
||||
raw_finish_reason = choice.finish_reason
|
||||
finish_reason = _CHAT_FINISH_REASON_MAP.get(raw_finish_reason)
|
||||
|
||||
return ModelResponse(
|
||||
parts=items,
|
||||
usage=_map_usage(response, self._provider, "", self._model_name),
|
||||
model_name=response.model,
|
||||
timestamp=timestamp,
|
||||
provider_details=None,
|
||||
provider_response_id=response.id,
|
||||
provider_name=self._provider,
|
||||
finish_reason=finish_reason,
|
||||
)
|
||||
|
||||
def new_infer_model(model: Model | KnownModelName, api_key: str = None) -> Model:
|
||||
if isinstance(model, Model):
|
||||
|
|
@ -69,4 +126,4 @@ def patch_infer_model(api_key: str | None = None):
|
|||
import pydantic_ai.models
|
||||
|
||||
pydantic_ai.models.infer_model = partial(new_infer_model, api_key=api_key)
|
||||
pydantic_ai.models.AIModel = AIModel
|
||||
pydantic_ai.models.OpenAIChatModel = AIModel
|
||||
Loading…
Add table
Add a link
Reference in a new issue