mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
- Modified g4f/providers/response.py to ensure format_images_markdown returns the result directly without additional flags in the 'format_images_markdown' function.
- Updated g4f/gui/server/api.py to add 'tempfiles' parameter with default empty list to '_create_response_stream' method.
- Changed or added code in API response handling to iterate over 'tempfiles' and attempt to remove each file after response completion, with exception handling (try-except block with logger.exception).
- Adjusted g4f/Tools/files.py to fix tempfile creation: corrected the 'suffix' parameter in 'get_tempfile' to use 'suffix' directly instead of splitting.
- In g4f/tools/media.py, changed 'render_part' function to handle 'text' key properly, checking 'part.get("text")' and returning a dictionary with 'type': 'text' and 'text': value, if present.
63 lines
No EOL
2.1 KiB
Python
63 lines
No EOL
2.1 KiB
Python
from __future__ import annotations
|
|
|
|
import os
|
|
import asyncio
|
|
from typing import Any
|
|
|
|
try:
|
|
from ...integration.markitdown import MarkItDown as MaItDo, StreamInfo
|
|
has_markitdown = True
|
|
except ImportError:
|
|
has_markitdown = False
|
|
|
|
from ...typing import AsyncResult, Messages, MediaListType
|
|
from ...tools.files import get_tempfile
|
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
|
|
|
class MarkItDown(AsyncGeneratorProvider, ProviderModelMixin):
|
|
working = has_markitdown
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
media: MediaListType = None,
|
|
llm_client: Any = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
if media is None:
|
|
raise ValueError("MarkItDown requires media to be provided.")
|
|
if not has_markitdown:
|
|
raise ImportError("MarkItDown is not installed. Please install it with `pip install markitdown`.")
|
|
md = MaItDo()
|
|
for file, filename in media:
|
|
text = None
|
|
try:
|
|
result = md.convert(
|
|
file,
|
|
stream_info=StreamInfo(filename=filename) if filename else None,
|
|
llm_client=llm_client,
|
|
llm_model=model
|
|
)
|
|
if asyncio.iscoroutine(result.text_content):
|
|
text = await result.text_content
|
|
else:
|
|
text = result.text_content
|
|
except TypeError:
|
|
copyfile = get_tempfile(file, filename)
|
|
try:
|
|
result = md.convert(
|
|
copyfile,
|
|
llm_client=llm_client,
|
|
llm_model=model
|
|
)
|
|
if asyncio.iscoroutine(result.text_content):
|
|
text = await result.text_content
|
|
else:
|
|
text = result.text_content
|
|
finally:
|
|
os.remove(copyfile)
|
|
text = text.split("### Audio Transcript:\n")[-1]
|
|
if text:
|
|
yield text |