gpt4free/g4f/Provider/needs_auth/Grok.py
kqlio67 f4cd4890d3
feat: enhance provider support and add PuterJS provider (#2999)
* feat: enhance provider support and add PuterJS provider

- Add new PuterJS provider with extensive model support and authentication handling
- Add three new OIVSCode providers (OIVSCodeSer2, OIVSCodeSer5, OIVSCodeSer0501)
- Fix Blackbox provider with improved model handling and session generation
- Update model aliases across multiple providers for consistency
- Mark DDG provider as not working
- Move TypeGPT to not_working directory
- Fix model name formatting in DeepInfraChat and other providers (qwen3 → qwen-3)
- Add get_model method to LambdaChat and other providers for better model alias handling
- Add ModelNotFoundError import to providers that need it
- Update model definitions in models.py with new providers and aliases
- Fix client/stubs.py to allow arbitrary types in ChatCompletionMessage

* Fix conflicts g4f/Provider/needs_auth/Grok.py

* fix: update Blackbox provider default settings

- Changed  parameter to use only the passed value without fallback to 1024
- Set  to  instead of  in request payload

* feat: add WeWordle provider with gpt-4 support

- Created new WeWordle.py provider file implementing AsyncGeneratorProvider
- Added WeWordle class with API endpoint at wewordle.org/gptapi/v1/web/turbo
- Set provider properties: working=True, needs_auth=False, supports_stream=True
- Configured default_model as 'gpt-4' with retry mechanism for API requests
- Implemented URL sanitization logic to handle malformed URLs
- Added response parsing for different JSON response formats
- Added WeWordle to Provider/__init__.py imports
- Added WeWordle to default model providers list in models.py
- Added WeWordle to gpt_4 best_provider list in models.py

* feat: add DocsBot provider with GPT-4o support

- Added new DocsBot.py provider file implementing AsyncGeneratorProvider and ProviderModelMixin
- Created Conversation class extending JsonConversation to track conversation state
- Implemented create_async_generator method with support for:
  - Streaming and non-streaming responses
  - System messages
  - Message history
  - Image handling via data URIs
  - Conversation tracking
- Added DocsBot to Provider/__init__.py imports
- Added DocsBot to default and default_vision model providers in models.py
- Added DocsBot as a provider for gpt_4o model in models.py
- Set default_model and vision support to 'gpt-4o'
- Implemented API endpoint communication with docsbot.ai

* feat: add OpenAIFM provider and update audio model references

- Added new OpenAIFM provider in g4f/Provider/audio/OpenAIFM.py for text-to-speech functionality
- Updated PollinationsAI.py to rename "gpt-4o-audio" to "gpt-4o-mini-audio"
- Added OpenAIFM to audio provider imports in g4f/Provider/audio/__init__.py
- Modified save_response_media() in g4f/image/copy_images.py to handle source_url separately from media_url
- Added new gpt_4o_mini_tts AudioModel in g4f/models.py with OpenAIFM as best provider
- Updated ModelUtils dictionary in models.py to include both gpt_4o_mini_audio and gpt_4o_mini_tts

* fix: improve PuterJS provider and add Gemini to best providers

- Changed client_id generation in PuterJS from time-based to UUID format
- Fixed duplicate json import in PuterJS.py
- Added uuid module import in PuterJS.py
- Changed host header from "api.puter.com" to "puter.com"
- Modified error handling to use Exception instead of RateLimitError
- Added Gemini to best_provider list for gemini-2.5-flash model
- Added Gemini to best_provider list for gemini-2.5-pro model
- Fixed missing newline at end of Gemini.py file

---------

Co-authored-by: kqlio67 <kqlio67.noreply.github.com>
2025-05-19 11:44:15 +02:00

153 lines
7 KiB
Python

from __future__ import annotations
import os
import json
import time
import asyncio
from typing import Dict, Any, AsyncIterator
try:
import nodriver
except ImportError:
pass
from ...typing import Messages, AsyncResult
from ...providers.response import JsonConversation, Reasoning, ImagePreview, ImageResponse, TitleGeneration, AuthResult, RequestLogin
from ...requests import StreamSession, get_nodriver, DEFAULT_HEADERS
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncAuthedProvider, ProviderModelMixin
from ..helper import format_prompt, get_last_user_message
class Conversation(JsonConversation):
def __init__(self,
conversation_id: str
) -> None:
self.conversation_id = conversation_id
class Grok(AsyncAuthedProvider, ProviderModelMixin):
label = "Grok AI"
url = "https://grok.com"
cookie_domain = ".grok.com"
assets_url = "https://assets.grok.com"
conversation_url = "https://grok.com/rest/app-chat/conversations"
needs_auth = True
working = True
default_model = "grok-3"
models = [default_model, "grok-3-thinking", "grok-2"]
model_aliases = {"grok-3-r1": "grok-3-thinking"}
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
auth_result = AuthResult(headers=DEFAULT_HEADERS, impersonate="chrome")
auth_result.headers["referer"] = cls.url + "/"
browser, stop_browser = await get_nodriver(proxy=proxy)
yield RequestLogin(cls.__name__, os.environ.get("G4F_LOGIN_URL") or "")
try:
page = browser.main_tab
has_headers = False
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
nonlocal has_headers
if event.request.url.startswith(cls.conversation_url + "/new"):
for key, value in event.request.headers.items():
auth_result.headers[key.lower()] = value
has_headers = True
await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
page = await browser.get(cls.url)
auth_result.headers["user-agent"] = await page.evaluate("window.navigator.userAgent", return_by_value=True)
while True:
if has_headers:
break
await asyncio.sleep(1)
auth_result.cookies = {}
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
auth_result.cookies[c.name] = c.value
await page.close()
finally:
stop_browser()
yield auth_result
@classmethod
async def _prepare_payload(cls, model: str, message: str) -> Dict[str, Any]:
return {
"temporary": True,
"modelName": "grok-latest" if model == "grok-2" else "grok-3",
"message": message,
"fileAttachments": [],
"imageAttachments": [],
"disableSearch": False,
"enableImageGeneration": True,
"returnImageBytes": False,
"returnRawGrokInXaiRequest": False,
"enableImageStreaming": True,
"imageGenerationCount": 2,
"forceConcise": False,
"toolOverrides": {},
"enableSideBySide": True,
"isPreset": False,
"sendFinalMetadata": True,
"customInstructions": "",
"deepsearchPreset": "",
"isReasoning": model.endswith("-thinking") or model.endswith("-r1"),
}
@classmethod
async def create_authed(
cls,
model: str,
messages: Messages,
auth_result: AuthResult,
conversation: Conversation = None,
**kwargs
) -> AsyncResult:
conversation_id = None if conversation is None else conversation.conversation_id
prompt = format_prompt(messages) if conversation_id is None else get_last_user_message(messages)
async with StreamSession(
**auth_result.get_dict()
) as session:
payload = await cls._prepare_payload(model, prompt)
if conversation_id is None:
url = f"{cls.conversation_url}/new"
else:
url = f"{cls.conversation_url}/{conversation_id}/responses"
async with session.post(url, json=payload) as response:
await raise_for_status(response)
thinking_duration = None
async for line in response.iter_lines():
if line:
try:
json_data = json.loads(line)
result = json_data.get("result", {})
if conversation_id is None:
conversation_id = result.get("conversation", {}).get("conversationId")
response_data = result.get("response", {})
image = response_data.get("streamingImageGenerationResponse", None)
if image is not None:
yield ImagePreview(f'{cls.assets_url}/{image["imageUrl"]}', "", {"cookies": auth_result.cookies, "headers": auth_result.headers})
token = response_data.get("token", result.get("token"))
is_thinking = response_data.get("isThinking", result.get("isThinking"))
if token:
if is_thinking:
if thinking_duration is None:
thinking_duration = time.time()
yield Reasoning(status="🤔 Is thinking...")
yield Reasoning(token)
else:
if thinking_duration is not None:
thinking_duration = time.time() - thinking_duration
status = f"Thought for {thinking_duration:.2f}s" if thinking_duration > 1 else "Finished"
thinking_duration = None
yield Reasoning(status=status)
yield token
generated_images = response_data.get("modelResponse", {}).get("generatedImageUrls", None)
if generated_images:
yield ImageResponse([f'{cls.assets_url}/{image}' for image in generated_images], "", {"cookies": auth_result.cookies, "headers": auth_result.headers})
title = result.get("title", {}).get("newTitle", "")
if title:
yield TitleGeneration(title)
except json.JSONDecodeError:
continue
# if conversation_id is not None:
# yield Conversation(conversation_id)