diff --git a/etc/tool/commit.py b/etc/tool/commit.py index 2bf024b8..17bb7a15 100755 --- a/etc/tool/commit.py +++ b/etc/tool/commit.py @@ -100,6 +100,7 @@ from typing import Optional, Any, List from g4f.client import Client from g4f.models import ModelUtils +from g4f.cookies import read_cookie_files from g4f import debug debug.logging = True @@ -295,6 +296,8 @@ def generate_commit_message(diff_text: str, model: str = DEFAULT_MODEL, max_retr if not diff_text or diff_text.strip() == "": return "No changes staged for commit" + read_cookie_files() # Load cookies for g4f client + # Filter sensitive data filtered_diff = filter_sensitive_data(diff_text) diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index 990ae7e0..b28e666b 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -46,11 +46,12 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin): active_by_default = True default_model = "Copilot" - models = [default_model, "Think Deeper"] + models = [default_model, "Think Deeper", "Smart (GPT-5)"] model_aliases = { "o1": "Think Deeper", "gpt-4": default_model, "gpt-4o": default_model, + "gpt-5": "GPT-5", } websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2" @@ -172,6 +173,12 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin): uploaded_images.append({"type":"image", "url": media}) wss = await session.ws_connect(cls.websocket_url, timeout=3) + if "Think" in model: + mode = "reasoning" + elif model.startswith("gpt-5") or "GPT-5" in model: + mode = "smart" + else: + mode = "chat" await wss.send(json.dumps({ "event": "send", "conversationId": conversation_id, @@ -179,7 +186,7 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin): "type": "text", "text": prompt, }], - "mode": "reasoning" if "Think" in model else "chat", + "mode": mode, }).encode(), CurlWsFlag.TEXT) done = False diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py index 47708745..5114c05b 100644 --- a/g4f/Provider/EasyChat.py +++ b/g4f/Provider/EasyChat.py @@ -29,6 +29,15 @@ class EasyChat(OpenaiTemplate, AuthFileMixin): captchaToken: dict = None + @classmethod + def get_models(cls, **kwargs) -> list[str]: + if not cls.models: + models = super().get_models(**kwargs) + models = {m.replace("-free", ""): m for m in models if m.endswith("-free")} + cls.model_aliases.update(models) + cls.models = list(models) + return cls.models + @classmethod async def create_async_generator( cls, @@ -38,6 +47,7 @@ class EasyChat(OpenaiTemplate, AuthFileMixin): extra_body: dict = None, **kwargs ) -> AsyncResult: + model = cls.get_model(model) args = None auth_file = cls.get_cache_file() if auth_file.exists(): @@ -47,6 +57,7 @@ class EasyChat(OpenaiTemplate, AuthFileMixin): if cls.captchaToken: debug.log("EasyChat: Using cached captchaToken.") async def callback(page): + cls.captchaToken = None def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None): if event.request.url != cls.api_endpoint: return @@ -81,30 +92,33 @@ class EasyChat(OpenaiTemplate, AuthFileMixin): if cls.captchaToken: break await asyncio.sleep(3) - if not args: - args = await get_args_from_nodriver(cls.url, proxy=proxy, callback=callback) - if extra_body is None: - extra_body = {} - extra_body.setdefault("captchaToken", cls.captchaToken) - try: - last_chunk = None - async for chunk in super().create_async_generator( - model=model, - messages=messages, - extra_body=extra_body, - **args - ): - # Remove provided by - if last_chunk == "\n" and chunk == "\n": - break - last_chunk = chunk - yield chunk - except Exception as e: - if "CLEAR-CAPTCHA-TOKEN" in str(e): - auth_file.unlink(missing_ok=True) - cls.captchaToken = None - debug.log("EasyChat: Captcha token cleared, please try again.") - raise e + for _ in range(2): + if not args: + args = await get_args_from_nodriver(cls.url, proxy=proxy, callback=callback) + if extra_body is None: + extra_body = {} + extra_body.setdefault("captchaToken", cls.captchaToken) + try: + last_chunk = None + async for chunk in super().create_async_generator( + model=model, + messages=messages, + extra_body=extra_body, + **args + ): + # Remove provided by + if last_chunk == "\n" and chunk == "\n": + break + last_chunk = chunk + yield chunk + except Exception as e: + if "CLEAR-CAPTCHA-TOKEN" in str(e): + debug.log("EasyChat: Captcha token expired, clearing auth file.") + auth_file.unlink(missing_ok=True) + args = None + continue + raise e + break with auth_file.open("w") as f: json.dump({**args, "captchaToken": cls.captchaToken}, f) - \ No newline at end of file + \ No newline at end of file diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 410dbb9e..aa53382a 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -362,6 +362,10 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): model = cls.get_model(model) except ModelNotFoundError: pass + image_model = False + if model in cls.image_models: + image_model = True + model = cls.default_model if conversation is None: conversation = Conversation(None, str(uuid.uuid4()), getattr(auth_result, "cookies", {}).get("oai-did")) else: @@ -378,15 +382,17 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): if cls._api_key is not None: data = { "action": "next", - "fork_from_shared_post":False, + "fork_from_shared_post": False, "parent_message_id": conversation.message_id, "model": model, - "timezone_offset_min":-120, - "timezone":"Europe/Berlin", - "conversation_mode":{"kind":"primary_assistant"}, - "system_hints":[], - "supports_buffering":True, - "supported_encodings":["v1"] + "timezone_offset_min": -120, + "timezone": "Europe/Berlin", + "conversation_mode": {"kind": "primary_assistant"}, + "system_hints": [ + "picture_v2" + ] if image_model else [], + "supports_buffering": True, + "supported_encodings": ["v1"] } async with session.post( prepare_url, @@ -835,16 +841,14 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request) await page.reload() user_agent = await page.evaluate("window.navigator.userAgent", return_by_value=True) - textarea = None - while not textarea: - try: - textarea = await page.evaluate("document.getElementById('prompt-textarea')?.id") - except: - pass - await asyncio.sleep(1) - while not await page.evaluate("document.querySelector('[data-testid=\"send-button\"]')?.type"): - await asyncio.sleep(1) - await page.evaluate("document.querySelector('[data-testid=\"send-button\"]').click()") + if cls.needs_auth: + await page.select('[data-testid="accounts-profile-button"]', 300) + textarea = await page.select("#prompt-textarea", 300) + await textarea.send_keys("Hello") + await asyncio.sleep(1) + button = await page.select("[data-testid=\"send-button\"]") + if button: + await button.click() while True: body = await page.evaluate("JSON.stringify(window.__remixContext)", return_by_value=True) if hasattr(body, "value"): diff --git a/g4f/Provider/openai/models.py b/g4f/Provider/openai/models.py index 2f3baabc..d7aa8a78 100644 --- a/g4f/Provider/openai/models.py +++ b/g4f/Provider/openai/models.py @@ -1,11 +1,12 @@ default_model = "auto" -default_image_model = "dall-e-3" +default_image_model = "gpt-image" image_models = [default_image_model] -text_models = [default_model, "gpt-4", "gpt-4.1", "gpt-4.1-mini", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"] +text_models = [default_model, "gpt-5", "gpt-5-thinking", "gpt-4", "gpt-4.1", "gpt-4.1-mini", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"] vision_models = text_models models = text_models + image_models model_aliases = { "gpt-4.1": "gpt-4-1", "gpt-4.1-mini": "gpt-4-1-mini", "gpt-4.5": "gpt-4-5", + "dall-e-3": default_image_model, } \ No newline at end of file