diff --git a/etc/tool/md2html.py b/etc/tool/md2html.py
index eb57f032..988b4282 100644
--- a/etc/tool/md2html.py
+++ b/etc/tool/md2html.py
@@ -44,6 +44,7 @@ License: MIT
"""
import os
+import re
import sys
import requests
import time
@@ -411,14 +412,19 @@ def process_single_file_with_output(file_path: Path, template: str, output_path:
# Create output directory if needed
output_path.parent.mkdir(parents=True, exist_ok=True)
-
- # Generate final HTML
- final_html = template.replace("{{ article }}", html).replace("{{ title }}", title)
-
+
+ # If output file exists, read and replace title and content
+ if output_path.is_file():
+ output = output_path.read_text()
+ output = re.sub(r"
([\S\s]+?)", f"{title}", output)
+ output = re.sub(r'itemprop="text">[\S\s]+?', f'itemprop="text">{html}', output)
+ else:
+ # If output file does not exist, create it with template
+ output = template.replace("{{ article }}", html).replace("{{ title }}", title)
# Write output file
with open(output_path, 'w', encoding='utf-8') as f:
- f.write(final_html)
-
+ f.write(output)
+
print(f"✓ Created: {output_path}")
return True
diff --git a/g4f/Provider/Kimi.py b/g4f/Provider/Kimi.py
index e98ca79c..da4c9f5b 100644
--- a/g4f/Provider/Kimi.py
+++ b/g4f/Provider/Kimi.py
@@ -5,7 +5,7 @@ from typing import AsyncIterator
from .base_provider import AsyncAuthedProvider, ProviderModelMixin
from ..providers.helper import get_last_user_message
-from ..requests import StreamSession, see_stream, raise_for_status
+from ..requests import StreamSession, sse_stream, raise_for_status
from ..providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason
from ..typing import AsyncResult, Messages
@@ -90,7 +90,7 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin):
json=data
) as response:
await raise_for_status(response)
- async for line in see_stream(response):
+ async for line in sse_stream(response):
if line.get("event") == "cmpl":
yield line.get("text")
elif line.get("event") == "rename":
diff --git a/g4f/Provider/hf_space/BlackForestLabs_Flux1KontextDev.py b/g4f/Provider/hf_space/BlackForestLabs_Flux1KontextDev.py
index 9a0a58f5..a741ee6b 100644
--- a/g4f/Provider/hf_space/BlackForestLabs_Flux1KontextDev.py
+++ b/g4f/Provider/hf_space/BlackForestLabs_Flux1KontextDev.py
@@ -1,11 +1,10 @@
from __future__ import annotations
-import json
import uuid
from ...typing import AsyncResult, Messages, MediaListType
-from ...providers.response import ImageResponse, ImagePreview, JsonConversation, Reasoning
-from ...requests import StreamSession, FormData, see_stream
+from ...providers.response import ImageResponse, JsonConversation, Reasoning
+from ...requests import StreamSession, FormData, sse_stream
from ...tools.media import merge_media
from ...image import to_bytes, is_accepted_format
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -131,7 +130,7 @@ class BlackForestLabs_Flux1KontextDev(AsyncGeneratorProvider, ProviderModelMixin
# GET the event stream to receive updates and results asynchronously
async with cls.run("get", session, conversation) as event_response:
await raise_for_status(event_response)
- async for chunk in see_stream(event_response.iter_lines()):
+ async for chunk in sse_stream(event_response):
if chunk.get("msg") == "process_starts":
yield Reasoning(label="Processing started")
elif chunk.get("msg") == "progress":
diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py
index 9d93e870..9140fd5f 100644
--- a/g4f/Provider/needs_auth/GeminiPro.py
+++ b/g4f/Provider/needs_auth/GeminiPro.py
@@ -28,14 +28,13 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
needs_auth = True
- default_model = "gemini-2.5-flash-preview-04-17"
+ default_model = "gemini-2.5-flash"
default_vision_model = default_model
fallback_models = [
"gemini-2.0-flash",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-thinking-exp",
"gemini-2.5-flash",
- "gemini-2.5-flash-preview-04-17",
"gemma-3-1b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
diff --git a/g4f/Provider/needs_auth/PuterJS.py b/g4f/Provider/needs_auth/PuterJS.py
index e4157a10..09930e6d 100644
--- a/g4f/Provider/needs_auth/PuterJS.py
+++ b/g4f/Provider/needs_auth/PuterJS.py
@@ -9,7 +9,7 @@ from ...typing import AsyncResult, Messages, MediaListType
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...providers.response import FinishReason, Usage, Reasoning, ToolCalls
from ...tools.media import render_messages
-from ...requests import see_stream, raise_for_status
+from ...requests import sse_stream, raise_for_status
from ...errors import ResponseError, ModelNotFoundError, MissingAuthError
from ..helper import format_media_prompt
from .. import debug
@@ -405,7 +405,7 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
return
elif mime_type.startswith("text/event-stream"):
reasoning = False
- async for result in see_stream(response.content):
+ async for result in sse_stream(response.content):
if "error" in result:
raise ResponseError(result["error"].get("message", result["error"]))
choices = result.get("choices", [{}])
diff --git a/g4f/Provider/template/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py
index 21ac3380..b7480a7f 100644
--- a/g4f/Provider/template/OpenaiTemplate.py
+++ b/g4f/Provider/template/OpenaiTemplate.py
@@ -5,12 +5,12 @@ import requests
from ..helper import filter_none, format_media_prompt
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
from ...typing import Union, AsyncResult, Messages, MediaListType
-from ...requests import StreamSession, StreamResponse, raise_for_status, see_stream
+from ...requests import StreamSession, StreamResponse, raise_for_status, sse_stream
from ...image import use_aspect_ratio
from ...image.copy_images import save_response_media
from ...providers.response import FinishReason, ToolCalls, Usage, ImageResponse, ProviderInfo, AudioResponse, Reasoning
from ...tools.media import render_messages
-from ...errors import MissingAuthError, ResponseError
+from ...errors import MissingAuthError
from ... import debug
class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin):
@@ -178,7 +178,7 @@ async def read_response(response: StreamResponse, stream: bool, prompt: str, pro
reasoning = False
first = True
model_returned = False
- async for data in see_stream(response):
+ async for data in sse_stream(response):
OpenaiTemplate.raise_error(data)
model = data.get("model")
if not model_returned and model:
diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py
index d14d0578..9faf8c18 100644
--- a/g4f/requests/__init__.py
+++ b/g4f/requests/__init__.py
@@ -215,7 +215,7 @@ async def get_nodriver(
BrowserConfig.stop_browser = on_stop
return browser, on_stop
-async def see_stream(iter_lines: Iterator[bytes]) -> AsyncIterator[dict]:
+async def sse_stream(iter_lines: Iterator[bytes]) -> AsyncIterator[dict]:
if hasattr(iter_lines, "content"):
iter_lines = iter_lines.content
elif hasattr(iter_lines, "iter_lines"):