Fix links in Readme, update OpenaiChat provider

This commit is contained in:
Heiner Lohaus 2024-12-07 05:06:24 +01:00
parent fc4fe21199
commit 7f8e5181f2
5 changed files with 49 additions and 36 deletions

View file

@ -5,4 +5,8 @@ from .OpenaiChat import OpenaiChat
class OpenaiAccount(OpenaiChat):
needs_auth = True
parent = "OpenaiChat"
image_models = ["dall-e"]
image_models = ["dall-e-3", "gpt-4", "gpt-4o"]
default_vision_model = "gpt-4o"
default_image_model = "dall-e-3"
models = [*OpenaiChat.fallback_models, default_image_model]
model_aliases = {default_image_model: default_vision_model}

View file

@ -7,6 +7,7 @@ import json
import base64
import time
import requests
import random
from copy import copy
try:
@ -77,11 +78,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
supports_system_message = True
default_model = "auto"
default_vision_model = "gpt-4o"
default_image_model = "dall-e-3"
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini", default_image_model]
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
vision_models = fallback_models
image_models = fallback_models
synthesize_content_type = "audio/mpeg"
_api_key: str = None
@ -97,7 +95,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
data = response.json()
cls.models = [model.get("slug") for model in data.get("models")]
cls.models.append(cls.default_image_model)
except Exception:
cls.models = cls.fallback_models
return cls.models
@ -296,27 +293,28 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""
if cls.needs_auth:
await cls.login(proxy)
async with StreamSession(
proxy=proxy,
impersonate="chrome",
timeout=timeout
) as session:
image_request = None
if not cls.needs_auth:
cls._create_request_args(cookies)
RequestConfig.proof_token = get_config(cls._headers.get("user-agent"))
async with session.get(cls.url, headers=cls._headers) as response:
cls._update_request_args(session)
await raise_for_status(response)
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
image_request = None
debug.log("OpenaiChat: Upload image failed")
debug.log(f"{e.__class__.__name__}: {e}")
if cls._headers is None:
cls._create_request_args(cookies)
async with session.get(cls.url, headers=INIT_HEADERS) as response:
cls._update_request_args(session)
await raise_for_status(response)
else:
async with session.get(cls.url, headers=cls._headers) as response:
cls._update_request_args(session)
await raise_for_status(response)
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
debug.log("OpenaiChat: Upload image failed")
debug.log(f"{e.__class__.__name__}: {e}")
model = cls.get_model(model)
if model == cls.default_image_model:
model = cls.default_vision_model
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
else:
@ -347,6 +345,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise MissingAuthError("No arkose token found in .har file")
if "proofofwork" in chat_requirements:
if RequestConfig.proof_token is None:
RequestConfig.proof_token = get_config(cls._headers.get("user-agent"))
proofofwork = generate_proof_token(
**chat_requirements["proofofwork"],
user_agent=cls._headers.get("user-agent"),
@ -364,7 +364,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"model": model,
"timezone_offset_min":-60,
"timezone":"Europe/Berlin",
"history_and_training_disabled": history_disabled and not auto_continue and not return_conversation,
"history_and_training_disabled": history_disabled and not auto_continue and not return_conversation or not cls.needs_auth,
"conversation_mode":{"kind":"primary_assistant","plugin_ids":None},
"force_paragen":False,
"force_paragen_model_slug":"",
@ -374,7 +374,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"system_hints": ["search"] if web_search else None,
"supported_encodings":["v1"],
"conversation_origin":None,
"client_contextual_info":{"is_dark_mode":False,"time_since_loaded":14,"page_height":578,"page_width":1850,"pixel_ratio":1,"screen_height":1080,"screen_width":1920},
"client_contextual_info":{"is_dark_mode":False,"time_since_loaded":random.randint(20, 500),"page_height":578,"page_width":1850,"pixel_ratio":1,"screen_height":1080,"screen_width":1920},
"paragen_stream_type_override":None,
"paragen_cot_summary_display_override":"allow",
"supports_buffering":True
@ -501,8 +501,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
await get_request_config(proxy)
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
cls._set_api_key(RequestConfig.access_token)
if RequestConfig.proof_token is None:
RequestConfig.proof_token = get_config(cls._headers.get("user-agent"))
except NoValidHarFileError:
if has_nodriver:
if RequestConfig.access_token is None:
@ -516,7 +514,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
page = browser.main_tab
def on_request(event: nodriver.cdp.network.RequestWillBeSent):
if event.request.url == start_url or event.request.url.startswith(conversation_url):
RequestConfig.access_request_id = event.request_id
RequestConfig.headers = event.request.headers
elif event.request.url in (backend_url, backend_anon_url):
if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
@ -538,20 +535,25 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
page = await browser.get(cls.url)
await asyncio.sleep(1)
body = await page.evaluate("JSON.stringify(window.__remixContext)")
if body:
match = re.search(r'"accessToken":"(.*?)"', body)
if match:
RequestConfig.access_token = match.group(1)
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
RequestConfig.cookies[c.name] = c.value
user_agent = await page.evaluate("window.navigator.userAgent")
await page.select("#prompt-textarea", 240)
while True:
if RequestConfig.access_token:
break
body = await page.evaluate("JSON.stringify(window.__remixContext)")
if body:
match = re.search(r'"accessToken":"(.*?)"', body)
if match:
RequestConfig.access_token = match.group(1)
break
await asyncio.sleep(1)
while True:
if RequestConfig.proof_token:
break
await asyncio.sleep(1)
RequestConfig.data_build = await page.evaluate("document.documentElement.getAttribute('data-build')")
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
RequestConfig.cookies[c.name] = c.value
await page.close()
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
cls._set_api_key(RequestConfig.access_token)

View file

@ -25,7 +25,6 @@ conversation_url = "https://chatgpt.com/c/"
class RequestConfig:
cookies: dict = None
headers: dict = None
access_request_id: str = None
access_token: str = None
proof_token: list = None
turnstile_token: str = None
@ -33,6 +32,7 @@ class RequestConfig:
arkose_token: str = None
headers: dict = {}
cookies: dict = {}
data_build: str = "prod-697873d7e78bb14df6e13af3a91fa237cc4db415"
class arkReq:
def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):

View file

@ -14,6 +14,8 @@ from datetime import (
timezone
)
from .har_file import RequestConfig
cores = [16, 24, 32]
screens = [3000, 4000, 6000]
maxAttempts = 500000
@ -386,7 +388,7 @@ def get_config(user_agent):
random.random(),
user_agent,
None,
"prod-0b673b9a04fb6983c1417b587f2f31173eafa605", #document.documentElement.getAttribute("data-build"),
RequestConfig.data_build, #document.documentElement.getAttribute("data-build"),
"en-US",
"en-US,es-US,en,es",
0,
@ -396,7 +398,8 @@ def get_config(user_agent):
time.perf_counter(),
str(uuid.uuid4()),
"",
8
8,
int(time.time()),
]
return config

View file

@ -8,6 +8,10 @@ here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
long_description = '\n' + fh.read()
long_description = long_description.replace("[!NOTE]", "")
long_description = long_description.replace("(docs/", "(https://github.com/xtekky/gpt4free/blob/main/docs/")
long_description = long_description.replace("(docs/images/", "(https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/images/")
INSTALL_REQUIRE = [
"requests",
"aiohttp",