mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-05 18:20:35 -08:00
- **Docs**
- `docs/file.md`: update upload instructions to use inline `bucket` content parts instead of `tool_calls/bucket_tool`.
- `docs/media.md`: add asynchronous audio transcription example, detailed explanation, and notes.
- **New audio provider**
- Add `g4f/Provider/audio/EdgeTTS.py` implementing Edge Text‑to‑Speech (`EdgeTTS`).
- Create `g4f/Provider/audio/__init__.py` for provider export.
- Register provider in `g4f/Provider/__init__.py`.
- **Refactor image → media**
- Introduce `generated_media/` directory and `get_media_dir()` helper in `g4f/image/copy_images.py`; add `ensure_media_dir()`; keep back‑compat with legacy `generated_images/`.
- Replace `images_dir` references with `get_media_dir()` across:
- `g4f/api/__init__.py`
- `g4f/client/stubs.py`
- `g4f/gui/server/api.py`
- `g4f/gui/server/backend_api.py`
- `g4f/image/copy_images.py`
- Rename CLI/API config field/flag from `image_provider` to `media_provider` (`g4f/cli.py`, `g4f/api/__init__.py`, `g4f/client/__init__.py`).
- Extend `g4f/image/__init__.py`
- add `MEDIA_TYPE_MAP`, `get_extension()`
- revise `is_allowed_extension()`, `to_input_audio()` to support wider media types.
- **Provider adjustments**
- `g4f/Provider/ARTA.py`: swap `raise_error()` parameter order.
- `g4f/Provider/Cloudflare.py`: drop unused `MissingRequirementsError` import; move `get_args_from_nodriver()` inside try; handle `FileNotFoundError`.
- **Core enhancements**
- `g4f/providers/any_provider.py`: use `default_model` instead of literal `"default"`; broaden model/provider matching; update model list cleanup.
- `g4f/models.py`: safeguard provider count logic when model name is falsy.
- `g4f/providers/base_provider.py`: catch `json.JSONDecodeError` when reading auth cache, delete corrupted file.
- `g4f/providers/response.py`: allow `AudioResponse` to accept extra kwargs.
- **Misc**
- Remove obsolete `g4f/image.py`.
- `g4f/Provider/Cloudflare.py`, `g4f/client/types.py`: minor whitespace and import tidy‑ups.
235 lines
No EOL
7.2 KiB
Python
235 lines
No EOL
7.2 KiB
Python
from __future__ import annotations
|
|
|
|
import os
|
|
from typing import Optional, List
|
|
from time import time
|
|
|
|
from ..image import extract_data_uri
|
|
from ..image.copy_images import get_media_dir
|
|
from ..client.helper import filter_markdown
|
|
from .helper import filter_none
|
|
|
|
try:
|
|
from pydantic import BaseModel, field_serializer
|
|
except ImportError:
|
|
class BaseModel():
|
|
@classmethod
|
|
def model_construct(cls, **data):
|
|
new = cls()
|
|
for key, value in data.items():
|
|
setattr(new, key, value)
|
|
return new
|
|
class field_serializer():
|
|
def __init__(self, field_name):
|
|
self.field_name = field_name
|
|
def __call__(self, *args, **kwargs):
|
|
return args[0]
|
|
|
|
class BaseModel(BaseModel):
|
|
@classmethod
|
|
def model_construct(cls, **data):
|
|
if hasattr(super(), "model_construct"):
|
|
return super().model_construct(**data)
|
|
return cls.construct(**data)
|
|
|
|
class TokenDetails(BaseModel):
|
|
cached_tokens: int
|
|
|
|
class UsageModel(BaseModel):
|
|
prompt_tokens: int
|
|
completion_tokens: int
|
|
total_tokens: int
|
|
prompt_tokens_details: TokenDetails
|
|
completion_tokens_details: TokenDetails
|
|
|
|
@classmethod
|
|
def model_construct(cls, prompt_tokens=0, completion_tokens=0, total_tokens=0, prompt_tokens_details=None, completion_tokens_details=None, **kwargs):
|
|
return super().model_construct(
|
|
prompt_tokens=prompt_tokens,
|
|
completion_tokens=completion_tokens,
|
|
total_tokens=total_tokens,
|
|
prompt_tokens_details=TokenDetails.model_construct(**prompt_tokens_details if prompt_tokens_details else {"cached_tokens": 0}),
|
|
completion_tokens_details=TokenDetails.model_construct(**completion_tokens_details if completion_tokens_details else {}),
|
|
**kwargs
|
|
)
|
|
|
|
class ToolFunctionModel(BaseModel):
|
|
name: str
|
|
arguments: str
|
|
|
|
class ToolCallModel(BaseModel):
|
|
id: str
|
|
type: str
|
|
function: ToolFunctionModel
|
|
|
|
@classmethod
|
|
def model_construct(cls, function=None, **kwargs):
|
|
return super().model_construct(
|
|
**kwargs,
|
|
function=ToolFunctionModel.model_construct(**function),
|
|
)
|
|
|
|
class ChatCompletionChunk(BaseModel):
|
|
id: str
|
|
object: str
|
|
created: int
|
|
model: str
|
|
provider: Optional[str]
|
|
choices: List[ChatCompletionDeltaChoice]
|
|
usage: UsageModel
|
|
conversation: dict
|
|
|
|
@classmethod
|
|
def model_construct(
|
|
cls,
|
|
content: str,
|
|
finish_reason: str,
|
|
completion_id: str = None,
|
|
created: int = None,
|
|
usage: UsageModel = None,
|
|
conversation: dict = None
|
|
):
|
|
return super().model_construct(
|
|
id=f"chatcmpl-{completion_id}" if completion_id else None,
|
|
object="chat.completion.chunk",
|
|
created=created,
|
|
model=None,
|
|
provider=None,
|
|
choices=[ChatCompletionDeltaChoice.model_construct(
|
|
ChatCompletionDelta.model_construct(content),
|
|
finish_reason
|
|
)],
|
|
**filter_none(usage=usage, conversation=conversation)
|
|
)
|
|
|
|
@field_serializer('conversation')
|
|
def serialize_conversation(self, conversation: dict):
|
|
if hasattr(conversation, "get_dict"):
|
|
return conversation.get_dict()
|
|
return conversation
|
|
|
|
class ChatCompletionMessage(BaseModel):
|
|
role: str
|
|
content: str
|
|
tool_calls: list[ToolCallModel] = None
|
|
|
|
@classmethod
|
|
def model_construct(cls, content: str, tool_calls: list = None):
|
|
return super().model_construct(role="assistant", content=content, **filter_none(tool_calls=tool_calls))
|
|
|
|
@field_serializer('content')
|
|
def serialize_content(self, content: str):
|
|
return str(content)
|
|
|
|
def save(self, filepath: str, allowd_types = None):
|
|
if hasattr(self.content, "data"):
|
|
os.rename(self.content.data.replace("/media", get_media_dir()), filepath)
|
|
return
|
|
if self.content.startswith("data:"):
|
|
with open(filepath, "wb") as f:
|
|
f.write(extract_data_uri(self.content))
|
|
return
|
|
content = filter_markdown(self.content, allowd_types)
|
|
if content is not None:
|
|
with open(filepath, "w") as f:
|
|
f.write(content)
|
|
|
|
class ChatCompletionChoice(BaseModel):
|
|
index: int
|
|
message: ChatCompletionMessage
|
|
finish_reason: str
|
|
|
|
@classmethod
|
|
def model_construct(cls, message: ChatCompletionMessage, finish_reason: str):
|
|
return super().model_construct(index=0, message=message, finish_reason=finish_reason)
|
|
|
|
class ChatCompletion(BaseModel):
|
|
id: str
|
|
object: str
|
|
created: int
|
|
model: str
|
|
provider: Optional[str]
|
|
choices: list[ChatCompletionChoice]
|
|
usage: UsageModel
|
|
conversation: dict
|
|
|
|
@classmethod
|
|
def model_construct(
|
|
cls,
|
|
content: str,
|
|
finish_reason: str,
|
|
completion_id: str = None,
|
|
created: int = None,
|
|
tool_calls: list[ToolCallModel] = None,
|
|
usage: UsageModel = None,
|
|
conversation: dict = None
|
|
):
|
|
return super().model_construct(
|
|
id=f"chatcmpl-{completion_id}" if completion_id else None,
|
|
object="chat.completion",
|
|
created=created,
|
|
model=None,
|
|
provider=None,
|
|
choices=[ChatCompletionChoice.model_construct(
|
|
ChatCompletionMessage.model_construct(content, tool_calls),
|
|
finish_reason,
|
|
)],
|
|
**filter_none(usage=usage, conversation=conversation)
|
|
)
|
|
|
|
@field_serializer('conversation')
|
|
def serialize_conversation(self, conversation: dict):
|
|
if hasattr(conversation, "get_dict"):
|
|
return conversation.get_dict()
|
|
return conversation
|
|
|
|
class ChatCompletionDelta(BaseModel):
|
|
role: str
|
|
content: str
|
|
|
|
@classmethod
|
|
def model_construct(cls, content: Optional[str]):
|
|
return super().model_construct(role="assistant", content=content)
|
|
|
|
@field_serializer('content')
|
|
def serialize_content(self, content: str):
|
|
return str(content)
|
|
|
|
class ChatCompletionDeltaChoice(BaseModel):
|
|
index: int
|
|
delta: ChatCompletionDelta
|
|
finish_reason: Optional[str]
|
|
|
|
@classmethod
|
|
def model_construct(cls, delta: ChatCompletionDelta, finish_reason: Optional[str]):
|
|
return super().model_construct(index=0, delta=delta, finish_reason=finish_reason)
|
|
|
|
class Image(BaseModel):
|
|
url: Optional[str]
|
|
b64_json: Optional[str]
|
|
revised_prompt: Optional[str]
|
|
|
|
@classmethod
|
|
def model_construct(cls, url: str = None, b64_json: str = None, revised_prompt: str = None):
|
|
return super().model_construct(**filter_none(
|
|
url=url,
|
|
b64_json=b64_json,
|
|
revised_prompt=revised_prompt
|
|
))
|
|
|
|
class ImagesResponse(BaseModel):
|
|
data: List[Image]
|
|
model: str
|
|
provider: str
|
|
created: int
|
|
|
|
@classmethod
|
|
def model_construct(cls, data: List[Image], created: int = None, model: str = None, provider: str = None):
|
|
if created is None:
|
|
created = int(time())
|
|
return super().model_construct(
|
|
data=data,
|
|
model=model,
|
|
provider=provider,
|
|
created=created
|
|
) |