gpt4free/g4f/api/stubs.py
Marcos Vinícius Claudiano 405868c576
feat: add tool_call emulation for OpenAI API (#3352)
* feat: add tool_call emulation for OpenAI API

Avoid forcing PollinationsAI when tools are present, and add an opt-in tool_emulation mode (or G4F_TOOL_EMULATION=1) to emit OpenAI-compatible tool_calls for providers that ignore tools.

* fix: avoid duplicate stream kwarg in tool emulation

Tool emulation calls the upstream provider with stream=False; remove stream/stream_timeout from forwarded kwargs to prevent conflicts.

* fix: prefer non-auth providers when api_key missing

When routing via AnyProvider without an api_key, try providers with needs_auth=false first to reduce MissingAuthError for tool-enabled clients like MarksCode.

* test: cover tool call emulation

Route tool_emulation through ToolSupportProvider (avoid circular imports) and add unittest coverage for multi-tool JSON plans and run_tools integration.
2026-02-09 12:01:52 +01:00

169 lines
4.8 KiB
Python

from __future__ import annotations
from pydantic import BaseModel, Field, model_validator
from typing import Union, Optional
from ..typing import Messages
class RequestConfig(BaseModel):
model: str = Field(default="")
provider: Optional[str] = None
media: Optional[list[tuple[str, str]]] = None
modalities: Optional[list[str]] = None
temperature: Optional[float] = None
presence_penalty: Optional[float] = None
frequency_penalty: Optional[float] = None
top_p: Optional[float] = None
max_tokens: Optional[int] = None
stop: Union[list[str], str, None] = None
api_key: Optional[Union[str, dict[str, str]]] = None
base_url: Optional[str] = None
web_search: Optional[bool] = None
proxy: Optional[str] = None
conversation: Optional[dict] = None
timeout: Optional[int] = None
stream_timeout: Optional[int] = None
tool_calls: list = Field(
default=[],
examples=[
[
{
"function": {
"arguments": {
"query": "search query",
"max_results": 5,
"max_words": 2500,
"backend": "auto",
"add_text": True,
"timeout": 5,
},
"name": "search_tool",
},
"type": "function",
}
]
],
)
reasoning_effort: Optional[str] = None
logit_bias: Optional[dict] = None
modalities: Optional[list[str]] = None
audio: Optional[dict] = None
response_format: Optional[dict] = None
download_media: bool = False
raw: bool = False
extra_body: Optional[dict] = None
# When set (or when env G4F_TOOL_EMULATION=1), the server will attempt to
# emulate OpenAI tool_calls for providers that don't support tools natively.
tool_emulation: Optional[bool] = None
class ChatCompletionsConfig(RequestConfig):
messages: Messages = Field(
examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]]
)
stream: bool = False
image: Optional[str] = None
image_name: Optional[str] = None
images: Optional[list[tuple[str, str]]] = None
tools: Optional[list] = None
parallel_tool_calls: Optional[bool] = None
tool_choice: Optional[str] = None
conversation_id: Optional[str] = None
class ResponsesConfig(RequestConfig):
input: Union[Messages, str]
class ImageGenerationConfig(BaseModel):
prompt: str
model: Optional[str] = None
provider: Optional[str] = None
response_format: Optional[str] = None
api_key: Optional[str] = None
proxy: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
num_inference_steps: Optional[int] = None
seed: Optional[int] = None
guidance_scale: Optional[int] = None
aspect_ratio: Optional[str] = None
n: Optional[int] = None
negative_prompt: Optional[str] = None
resolution: Optional[str] = None
audio: Optional[dict] = None
download_media: bool = True
@model_validator(mode="before")
def parse_size(cls, values):
if values.get("width") is not None and values.get("height") is not None:
return values
size = values.get("size")
if size:
try:
width, height = map(int, size.split("x"))
values["width"] = width
values["height"] = height
except (ValueError, AttributeError):
pass # If the format is incorrect, we simply ignore it.
return values
class ProviderResponseModel(BaseModel):
id: str
object: str = "provider"
created: int
url: Optional[str]
label: Optional[str]
class ProviderResponseDetailModel(ProviderResponseModel):
models: list[str]
image_models: list[str]
vision_models: list[str]
params: list[str]
class ModelResponseModel(BaseModel):
id: str
object: str = "model"
created: int
owned_by: Optional[str]
class UploadResponseModel(BaseModel):
bucket_id: str
url: str
class ErrorResponseModel(BaseModel):
error: ErrorResponseMessageModel
model: Optional[str] = None
provider: Optional[str] = None
class ErrorResponseMessageModel(BaseModel):
message: str
class FileResponseModel(BaseModel):
filename: str
class TranscriptionResponseModel(BaseModel):
text: str
model: str
provider: str
class AudioSpeechConfig(BaseModel):
input: str
model: Optional[str] = None
provider: Optional[str] = None
voice: Optional[str] = None
instrcutions: str = "Speech this text in a natural way."
response_format: Optional[str] = None
language: Optional[str] = None
download_media: bool = True