diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index d5f2af4f..0584b54d 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -105,9 +105,12 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): cls.audio_models = { model.get("name"): model.get("voices") for model in models - if "output_modalities" in model and "audio" in model["output_modalities"] + if "output_modalities" in model and "audio" in model["output_modalities"] and model.get("name") != "gemini" } + if cls.default_audio_model in cls.audio_models: + cls.audio_models = {**cls.audio_models, **{voice: {} for voice in cls.audio_models[cls.default_audio_model]}} + # Create a set of unique text models starting with default model unique_text_models = cls.text_models.copy() @@ -120,6 +123,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): if model_name and "input_modalities" in model and "text" in model["input_modalities"]: unique_text_models.append(model_name) + if cls.default_audio_model in cls.audio_models: + unique_text_models.extend([voice for voice in cls.audio_models[cls.default_audio_model]]) + # Convert to list and update text_models cls.text_models = list(dict.fromkeys(unique_text_models)) @@ -207,6 +213,11 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): "role": "user", "content": prompt }] + if model and model in cls.audio_models[cls.default_audio_model]: + kwargs["audio"] = { + "voice": model, + } + model = cls.default_audio_model async for result in cls._generate_text( model=model, messages=messages, @@ -359,6 +370,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): if finish_reason: yield FinishReason(finish_reason) else: - async for chunk in save_response_media(response, format_image_prompt(messages), [model]): + async for chunk in save_response_media(response, format_image_prompt(messages), [model, extra_parameters.get("audio", {}).get("voice")]): yield chunk return diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 107e25f4..05abd9ad 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -254,16 +254,25 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): return messages @classmethod - async def get_generated_image(cls, session: StreamSession, auth_result: AuthResult, element: dict, prompt: str = None) -> ImageResponse: + async def get_generated_image(cls, session: StreamSession, auth_result: AuthResult, element: dict, prompt: str, conversation_id: str) -> ImageResponse: try: prompt = element["metadata"]["dalle"]["prompt"] - file_id = element["asset_pointer"].split("file-service://", 1)[1] + except IndexError: + pass + try: + file_id = element["asset_pointer"] + if "file-service://" in file_id: + file_id = file_id.split("file-service://", 1)[-1] + url = f"{cls.url}/backend-api/files/{file_id}/download" + else: + file_id = file_id.split("sediment://")[-1] + url = f"{cls.url}/backend-api/conversation/{conversation_id}/attachment/{file_id}/download" except TypeError: return except Exception as e: - raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}") + raise RuntimeError(f"No Image: {element} - {e}") try: - async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=auth_result.headers) as response: + async with session.get(url, headers=auth_result.headers) as response: cls._update_request_args(auth_result, session) await raise_for_status(response) download_url = (await response.json())["download_url"] @@ -285,6 +294,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): media: MediaListType = None, return_conversation: bool = False, web_search: bool = False, + prompt: str = None, **kwargs ) -> AsyncResult: """ @@ -403,10 +413,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): if conversation.conversation_id is not None: data["conversation_id"] = conversation.conversation_id debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}") + prompt = get_last_user_message(messages) if prompt is None else prompt if action != "continue": data["parent_message_id"] = getattr(conversation, "parent_message_id", conversation.message_id) conversation.parent_message_id = None - messages = messages if conversation.conversation_id is None else [{"role": "user", "content": get_last_user_message(messages)}] + messages = messages if conversation.conversation_id is None else [{"role": "user", "content": prompt}] data["messages"] = cls.create_messages(messages, image_requests, ["search"] if web_search else None) headers = { **cls._headers, @@ -433,7 +444,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): await raise_for_status(response) buffer = u"" async for line in response.iter_lines(): - async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources): + async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources, prompt): if isinstance(chunk, str): chunk = chunk.replace("\ue203", "").replace("\ue204", "").replace("\ue206", "") buffer += chunk @@ -475,7 +486,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): yield FinishReason(conversation.finish_reason) @classmethod - async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes, fields: Conversation, sources: Sources) -> AsyncIterator: + async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes, fields: Conversation, sources: Sources, prompt: str) -> AsyncIterator: if not line.startswith(b"data: "): return elif line.startswith(b"data: [DONE]"): @@ -490,7 +501,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): if line["type"] == "title_generation": yield TitleGeneration(line["title"]) fields.p = line.get("p", fields.p) - if fields.p.startswith("/message/content/thoughts"): + if fields.p is not None and fields.p.startswith("/message/content/thoughts"): if fields.p.endswith("/content"): if fields.thoughts_summary: yield Reasoning(token="", status=fields.thoughts_summary) @@ -539,7 +550,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): generated_images = [] for element in c.get("parts"): if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer": - image = cls.get_generated_image(session, auth_result, element) + image = cls.get_generated_image(session, auth_result, element, prompt, fields.conversation_id) generated_images.append(image) for image_response in await asyncio.gather(*generated_images): if image_response is not None: diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index a8e082ba..15fb7db1 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -360,7 +360,7 @@ class Backend_Api(Api): if seed not in ["true", "True", "1"]: random.seed(seed) return redirect(f"/media/{random.choice(match_files)}"), 302 - return redirect(f"/media/{match_files[int(request.args.get('skip', 0))]}", 302) + return redirect(f"/media/{match_files[int(request.args.get('skip') or 0)]}", 302) @app.route('/backend-api/v2/upload_cookies', methods=['POST']) def upload_cookies(): diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py index 6b530976..1d056a54 100644 --- a/g4f/gui/server/website.py +++ b/g4f/gui/server/website.py @@ -3,28 +3,36 @@ from __future__ import annotations import os import requests from datetime import datetime -from flask import send_from_directory, redirect +from flask import send_from_directory, redirect, request from ...image.copy_images import secure_filename, get_media_dir, ensure_media_dir from ...errors import VersionNotFoundError from ... import version GPT4FREE_URL = "https://gpt4free.github.io" +DIST_DIR = "./gpt4free.github.io/dist" def redirect_home(): return redirect('/chat') -def render(filename = "chat"): +def render(filename = "chat", add_origion = True): + if request.args.get("live"): + add_origion = False + if os.path.exists(DIST_DIR): + path = os.path.abspath(os.path.join(os.path.dirname(DIST_DIR), (filename + ("" if "." in filename else ".html")))) + print( f"Debug mode: {path}") + return send_from_directory(os.path.dirname(path), os.path.basename(path)) try: latest_version = version.utils.latest_version except VersionNotFoundError: latest_version = version.utils.current_version today = datetime.today().strftime('%Y-%m-%d') - cache_file = os.path.join(get_media_dir(), f"{today}.{secure_filename(filename)}.{version.utils.current_version}-{latest_version}.html") + cache_file = os.path.join(get_media_dir(), f"{today}.{secure_filename(filename)}.{version.utils.current_version}-{latest_version}{'.live' if add_origion else ''}.html") if not os.path.exists(cache_file): ensure_media_dir() html = requests.get(f"{GPT4FREE_URL}/{filename}.html").text - html = html.replace("../dist/", f"{GPT4FREE_URL}/dist/") - html = html.replace('"dist/', f"\"{GPT4FREE_URL}/dist/") + if add_origion: + html = html.replace("../dist/", f"dist/") + html = html.replace("\"dist/", f"\"{GPT4FREE_URL}/dist/") with open(cache_file, 'w', encoding='utf-8') as f: f.write(html) return send_from_directory(os.path.abspath(get_media_dir()), os.path.basename(cache_file)) @@ -57,6 +65,10 @@ class Website: 'function': redirect_home, 'methods': ['GET', 'POST'] }, + '/dist/': { + 'function': self._dist, + 'methods': ['GET'] + }, } def _index(self, filename = "index"): @@ -64,10 +76,13 @@ class Website: def _qrcode(self, filename = "qrcode"): return render(filename) - + def _background(self, filename = "background"): return render(filename) - + def _chat(self, filename = "chat"): filename = "chat/index" if filename == 'chat' else secure_filename(filename) return render(filename) + + def _dist(self, name: str): + return send_from_directory(os.path.abspath(DIST_DIR), name) \ No newline at end of file