mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-05 18:20:35 -08:00
refactor: replace see_stream with sse_stream and update md2html output logic
- Replaced all imports and usages of `see_stream` with `sse_stream` across: - `g4f/Provider/Kimi.py` - `g4f/Provider/hf_space/BlackForestLabs_Flux1KontextDev.py` - `g4f/Provider/needs_auth/PuterJS.py` - `g4f/Provider/template/OpenaiTemplate.py` - `g4f/requests/__init__.py` (renamed function `see_stream` to `sse_stream`) - Modified `g4f/Provider/needs_auth/GeminiPro.py`: - Updated `default_model` from `gemini-2.5-flash-preview-04-17` to `gemini-2.5-flash` - Removed `gemini-2.5-flash-preview-04-17` from `fallback_models` - Updated `etc/tool/md2html.py`: - Added `re` import - Changed `process_single_file_with_output` to check if output file exists - If exists, uses regex to update `<title>` and `itemprop="text">` content instead of writing full template - If not, generates HTML using the template as before
This commit is contained in:
parent
f83c92446e
commit
499dcc0154
7 changed files with 24 additions and 20 deletions
|
|
@ -44,6 +44,7 @@ License: MIT
|
|||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import requests
|
||||
import time
|
||||
|
|
@ -411,14 +412,19 @@ def process_single_file_with_output(file_path: Path, template: str, output_path:
|
|||
|
||||
# Create output directory if needed
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Generate final HTML
|
||||
final_html = template.replace("{{ article }}", html).replace("{{ title }}", title)
|
||||
|
||||
|
||||
# If output file exists, read and replace title and content
|
||||
if output_path.is_file():
|
||||
output = output_path.read_text()
|
||||
output = re.sub(r"<title>([\S\s]+?)</title>", f"<title>{title}</title>", output)
|
||||
output = re.sub(r'itemprop="text">[\S\s]+?</article>', f'itemprop="text">{html}</article>', output)
|
||||
else:
|
||||
# If output file does not exist, create it with template
|
||||
output = template.replace("{{ article }}", html).replace("{{ title }}", title)
|
||||
# Write output file
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(final_html)
|
||||
|
||||
f.write(output)
|
||||
|
||||
print(f"✓ Created: {output_path}")
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from typing import AsyncIterator
|
|||
|
||||
from .base_provider import AsyncAuthedProvider, ProviderModelMixin
|
||||
from ..providers.helper import get_last_user_message
|
||||
from ..requests import StreamSession, see_stream, raise_for_status
|
||||
from ..requests import StreamSession, sse_stream, raise_for_status
|
||||
from ..providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason
|
||||
from ..typing import AsyncResult, Messages
|
||||
|
||||
|
|
@ -90,7 +90,7 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin):
|
|||
json=data
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
async for line in see_stream(response):
|
||||
async for line in sse_stream(response):
|
||||
if line.get("event") == "cmpl":
|
||||
yield line.get("text")
|
||||
elif line.get("event") == "rename":
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from ...typing import AsyncResult, Messages, MediaListType
|
||||
from ...providers.response import ImageResponse, ImagePreview, JsonConversation, Reasoning
|
||||
from ...requests import StreamSession, FormData, see_stream
|
||||
from ...providers.response import ImageResponse, JsonConversation, Reasoning
|
||||
from ...requests import StreamSession, FormData, sse_stream
|
||||
from ...tools.media import merge_media
|
||||
from ...image import to_bytes, is_accepted_format
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
|
@ -131,7 +130,7 @@ class BlackForestLabs_Flux1KontextDev(AsyncGeneratorProvider, ProviderModelMixin
|
|||
# GET the event stream to receive updates and results asynchronously
|
||||
async with cls.run("get", session, conversation) as event_response:
|
||||
await raise_for_status(event_response)
|
||||
async for chunk in see_stream(event_response.iter_lines()):
|
||||
async for chunk in sse_stream(event_response):
|
||||
if chunk.get("msg") == "process_starts":
|
||||
yield Reasoning(label="Processing started")
|
||||
elif chunk.get("msg") == "progress":
|
||||
|
|
|
|||
|
|
@ -28,14 +28,13 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
supports_system_message = True
|
||||
needs_auth = True
|
||||
|
||||
default_model = "gemini-2.5-flash-preview-04-17"
|
||||
default_model = "gemini-2.5-flash"
|
||||
default_vision_model = default_model
|
||||
fallback_models = [
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-lite",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-preview-04-17",
|
||||
"gemma-3-1b-it",
|
||||
"gemma-3-12b-it",
|
||||
"gemma-3-27b-it",
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ from ...typing import AsyncResult, Messages, MediaListType
|
|||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...providers.response import FinishReason, Usage, Reasoning, ToolCalls
|
||||
from ...tools.media import render_messages
|
||||
from ...requests import see_stream, raise_for_status
|
||||
from ...requests import sse_stream, raise_for_status
|
||||
from ...errors import ResponseError, ModelNotFoundError, MissingAuthError
|
||||
from ..helper import format_media_prompt
|
||||
from .. import debug
|
||||
|
|
@ -405,7 +405,7 @@ class PuterJS(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
return
|
||||
elif mime_type.startswith("text/event-stream"):
|
||||
reasoning = False
|
||||
async for result in see_stream(response.content):
|
||||
async for result in sse_stream(response.content):
|
||||
if "error" in result:
|
||||
raise ResponseError(result["error"].get("message", result["error"]))
|
||||
choices = result.get("choices", [{}])
|
||||
|
|
|
|||
|
|
@ -5,12 +5,12 @@ import requests
|
|||
from ..helper import filter_none, format_media_prompt
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
|
||||
from ...typing import Union, AsyncResult, Messages, MediaListType
|
||||
from ...requests import StreamSession, StreamResponse, raise_for_status, see_stream
|
||||
from ...requests import StreamSession, StreamResponse, raise_for_status, sse_stream
|
||||
from ...image import use_aspect_ratio
|
||||
from ...image.copy_images import save_response_media
|
||||
from ...providers.response import FinishReason, ToolCalls, Usage, ImageResponse, ProviderInfo, AudioResponse, Reasoning
|
||||
from ...tools.media import render_messages
|
||||
from ...errors import MissingAuthError, ResponseError
|
||||
from ...errors import MissingAuthError
|
||||
from ... import debug
|
||||
|
||||
class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin):
|
||||
|
|
@ -178,7 +178,7 @@ async def read_response(response: StreamResponse, stream: bool, prompt: str, pro
|
|||
reasoning = False
|
||||
first = True
|
||||
model_returned = False
|
||||
async for data in see_stream(response):
|
||||
async for data in sse_stream(response):
|
||||
OpenaiTemplate.raise_error(data)
|
||||
model = data.get("model")
|
||||
if not model_returned and model:
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ async def get_nodriver(
|
|||
BrowserConfig.stop_browser = on_stop
|
||||
return browser, on_stop
|
||||
|
||||
async def see_stream(iter_lines: Iterator[bytes]) -> AsyncIterator[dict]:
|
||||
async def sse_stream(iter_lines: Iterator[bytes]) -> AsyncIterator[dict]:
|
||||
if hasattr(iter_lines, "content"):
|
||||
iter_lines = iter_lines.content
|
||||
elif hasattr(iter_lines, "iter_lines"):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue