feat: add new GPT-5 support and improve captcha handling

- **g4f/Provider/Copilot.py**
  - Added `"Smart (GPT-5)"` to `models` list.
  - Added `"gpt-5"` alias mapping to `"GPT-5"` in `model_aliases`.
  - Introduced `mode` selection logic to support `"smart"` mode for GPT-5 models alongside existing `"reasoning"` and `"chat"` modes.
- **g4f/Provider/EasyChat.py**
  - Added `get_models` class method to map `-free` models to aliases and store them in `cls.models`.
  - Resolved model via `cls.get_model(model)` at start of `create_async_generator`.
  - Reset `cls.captchaToken` to `None` at the beginning of `callback`.
  - Wrapped main generator logic in a loop to allow retry once if `CLEAR-CAPTCHA-TOKEN` error occurs, clearing auth file and resetting args.
- **g4f/Provider/needs_auth/OpenaiChat.py**
  - Added handling for image models: detect and set `image_model` flag, use `default_model` when sending requests if image model selected, and include `"picture_v2"` in `system_hints` when applicable.
  - Replaced textarea/button detection code in page load sequence with `nodriver` `select` calls, sending "Hello" before clicking send button, and included profile button selection if class needs auth.
- **g4f/Provider/openai/models.py**
  - Changed `default_image_model` from `"dall-e-3"` to `"gpt-image"`.
  - Added `"gpt-5"` and `"gpt-5-thinking"` to `text_models` list.
  - Added alias mapping for `"dall-e-3"` pointing to new `default_image_model`.
This commit is contained in:
hlohaus 2025-08-09 01:33:56 +02:00
parent ba1f9bb3c3
commit f3923f8e50
5 changed files with 75 additions and 46 deletions

View file

@ -100,6 +100,7 @@ from typing import Optional, Any, List
from g4f.client import Client
from g4f.models import ModelUtils
from g4f.cookies import read_cookie_files
from g4f import debug
debug.logging = True
@ -295,6 +296,8 @@ def generate_commit_message(diff_text: str, model: str = DEFAULT_MODEL, max_retr
if not diff_text or diff_text.strip() == "":
return "No changes staged for commit"
read_cookie_files() # Load cookies for g4f client
# Filter sensitive data
filtered_diff = filter_sensitive_data(diff_text)

View file

@ -46,11 +46,12 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin):
active_by_default = True
default_model = "Copilot"
models = [default_model, "Think Deeper"]
models = [default_model, "Think Deeper", "Smart (GPT-5)"]
model_aliases = {
"o1": "Think Deeper",
"gpt-4": default_model,
"gpt-4o": default_model,
"gpt-5": "GPT-5",
}
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
@ -172,6 +173,12 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin):
uploaded_images.append({"type":"image", "url": media})
wss = await session.ws_connect(cls.websocket_url, timeout=3)
if "Think" in model:
mode = "reasoning"
elif model.startswith("gpt-5") or "GPT-5" in model:
mode = "smart"
else:
mode = "chat"
await wss.send(json.dumps({
"event": "send",
"conversationId": conversation_id,
@ -179,7 +186,7 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin):
"type": "text",
"text": prompt,
}],
"mode": "reasoning" if "Think" in model else "chat",
"mode": mode,
}).encode(), CurlWsFlag.TEXT)
done = False

View file

@ -29,6 +29,15 @@ class EasyChat(OpenaiTemplate, AuthFileMixin):
captchaToken: dict = None
@classmethod
def get_models(cls, **kwargs) -> list[str]:
if not cls.models:
models = super().get_models(**kwargs)
models = {m.replace("-free", ""): m for m in models if m.endswith("-free")}
cls.model_aliases.update(models)
cls.models = list(models)
return cls.models
@classmethod
async def create_async_generator(
cls,
@ -38,6 +47,7 @@ class EasyChat(OpenaiTemplate, AuthFileMixin):
extra_body: dict = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
args = None
auth_file = cls.get_cache_file()
if auth_file.exists():
@ -47,6 +57,7 @@ class EasyChat(OpenaiTemplate, AuthFileMixin):
if cls.captchaToken:
debug.log("EasyChat: Using cached captchaToken.")
async def callback(page):
cls.captchaToken = None
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
if event.request.url != cls.api_endpoint:
return
@ -81,30 +92,33 @@ class EasyChat(OpenaiTemplate, AuthFileMixin):
if cls.captchaToken:
break
await asyncio.sleep(3)
if not args:
args = await get_args_from_nodriver(cls.url, proxy=proxy, callback=callback)
if extra_body is None:
extra_body = {}
extra_body.setdefault("captchaToken", cls.captchaToken)
try:
last_chunk = None
async for chunk in super().create_async_generator(
model=model,
messages=messages,
extra_body=extra_body,
**args
):
# Remove provided by
if last_chunk == "\n" and chunk == "\n":
break
last_chunk = chunk
yield chunk
except Exception as e:
if "CLEAR-CAPTCHA-TOKEN" in str(e):
auth_file.unlink(missing_ok=True)
cls.captchaToken = None
debug.log("EasyChat: Captcha token cleared, please try again.")
raise e
for _ in range(2):
if not args:
args = await get_args_from_nodriver(cls.url, proxy=proxy, callback=callback)
if extra_body is None:
extra_body = {}
extra_body.setdefault("captchaToken", cls.captchaToken)
try:
last_chunk = None
async for chunk in super().create_async_generator(
model=model,
messages=messages,
extra_body=extra_body,
**args
):
# Remove provided by
if last_chunk == "\n" and chunk == "\n":
break
last_chunk = chunk
yield chunk
except Exception as e:
if "CLEAR-CAPTCHA-TOKEN" in str(e):
debug.log("EasyChat: Captcha token expired, clearing auth file.")
auth_file.unlink(missing_ok=True)
args = None
continue
raise e
break
with auth_file.open("w") as f:
json.dump({**args, "captchaToken": cls.captchaToken}, f)

View file

@ -362,6 +362,10 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
model = cls.get_model(model)
except ModelNotFoundError:
pass
image_model = False
if model in cls.image_models:
image_model = True
model = cls.default_model
if conversation is None:
conversation = Conversation(None, str(uuid.uuid4()), getattr(auth_result, "cookies", {}).get("oai-did"))
else:
@ -378,15 +382,17 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
if cls._api_key is not None:
data = {
"action": "next",
"fork_from_shared_post":False,
"fork_from_shared_post": False,
"parent_message_id": conversation.message_id,
"model": model,
"timezone_offset_min":-120,
"timezone":"Europe/Berlin",
"conversation_mode":{"kind":"primary_assistant"},
"system_hints":[],
"supports_buffering":True,
"supported_encodings":["v1"]
"timezone_offset_min": -120,
"timezone": "Europe/Berlin",
"conversation_mode": {"kind": "primary_assistant"},
"system_hints": [
"picture_v2"
] if image_model else [],
"supports_buffering": True,
"supported_encodings": ["v1"]
}
async with session.post(
prepare_url,
@ -835,16 +841,14 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
await page.reload()
user_agent = await page.evaluate("window.navigator.userAgent", return_by_value=True)
textarea = None
while not textarea:
try:
textarea = await page.evaluate("document.getElementById('prompt-textarea')?.id")
except:
pass
await asyncio.sleep(1)
while not await page.evaluate("document.querySelector('[data-testid=\"send-button\"]')?.type"):
await asyncio.sleep(1)
await page.evaluate("document.querySelector('[data-testid=\"send-button\"]').click()")
if cls.needs_auth:
await page.select('[data-testid="accounts-profile-button"]', 300)
textarea = await page.select("#prompt-textarea", 300)
await textarea.send_keys("Hello")
await asyncio.sleep(1)
button = await page.select("[data-testid=\"send-button\"]")
if button:
await button.click()
while True:
body = await page.evaluate("JSON.stringify(window.__remixContext)", return_by_value=True)
if hasattr(body, "value"):

View file

@ -1,11 +1,12 @@
default_model = "auto"
default_image_model = "dall-e-3"
default_image_model = "gpt-image"
image_models = [default_image_model]
text_models = [default_model, "gpt-4", "gpt-4.1", "gpt-4.1-mini", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"]
text_models = [default_model, "gpt-5", "gpt-5-thinking", "gpt-4", "gpt-4.1", "gpt-4.1-mini", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"]
vision_models = text_models
models = text_models + image_models
model_aliases = {
"gpt-4.1": "gpt-4-1",
"gpt-4.1-mini": "gpt-4-1-mini",
"gpt-4.5": "gpt-4-5",
"dall-e-3": default_image_model,
}