mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Fix links in Readme, update OpenaiChat provider
This commit is contained in:
parent
fc4fe21199
commit
7f8e5181f2
5 changed files with 49 additions and 36 deletions
|
|
@ -5,4 +5,8 @@ from .OpenaiChat import OpenaiChat
|
||||||
class OpenaiAccount(OpenaiChat):
|
class OpenaiAccount(OpenaiChat):
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
parent = "OpenaiChat"
|
parent = "OpenaiChat"
|
||||||
image_models = ["dall-e"]
|
image_models = ["dall-e-3", "gpt-4", "gpt-4o"]
|
||||||
|
default_vision_model = "gpt-4o"
|
||||||
|
default_image_model = "dall-e-3"
|
||||||
|
models = [*OpenaiChat.fallback_models, default_image_model]
|
||||||
|
model_aliases = {default_image_model: default_vision_model}
|
||||||
|
|
@ -7,6 +7,7 @@ import json
|
||||||
import base64
|
import base64
|
||||||
import time
|
import time
|
||||||
import requests
|
import requests
|
||||||
|
import random
|
||||||
from copy import copy
|
from copy import copy
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
@ -77,11 +78,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
supports_system_message = True
|
supports_system_message = True
|
||||||
default_model = "auto"
|
default_model = "auto"
|
||||||
default_vision_model = "gpt-4o"
|
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
|
||||||
default_image_model = "dall-e-3"
|
|
||||||
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini", default_image_model]
|
|
||||||
vision_models = fallback_models
|
vision_models = fallback_models
|
||||||
image_models = fallback_models
|
|
||||||
synthesize_content_type = "audio/mpeg"
|
synthesize_content_type = "audio/mpeg"
|
||||||
|
|
||||||
_api_key: str = None
|
_api_key: str = None
|
||||||
|
|
@ -97,7 +95,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
data = response.json()
|
data = response.json()
|
||||||
cls.models = [model.get("slug") for model in data.get("models")]
|
cls.models = [model.get("slug") for model in data.get("models")]
|
||||||
cls.models.append(cls.default_image_model)
|
|
||||||
except Exception:
|
except Exception:
|
||||||
cls.models = cls.fallback_models
|
cls.models = cls.fallback_models
|
||||||
return cls.models
|
return cls.models
|
||||||
|
|
@ -296,27 +293,28 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"""
|
"""
|
||||||
if cls.needs_auth:
|
if cls.needs_auth:
|
||||||
await cls.login(proxy)
|
await cls.login(proxy)
|
||||||
|
|
||||||
async with StreamSession(
|
async with StreamSession(
|
||||||
proxy=proxy,
|
proxy=proxy,
|
||||||
impersonate="chrome",
|
impersonate="chrome",
|
||||||
timeout=timeout
|
timeout=timeout
|
||||||
) as session:
|
) as session:
|
||||||
|
image_request = None
|
||||||
if not cls.needs_auth:
|
if not cls.needs_auth:
|
||||||
cls._create_request_args(cookies)
|
if cls._headers is None:
|
||||||
RequestConfig.proof_token = get_config(cls._headers.get("user-agent"))
|
cls._create_request_args(cookies)
|
||||||
async with session.get(cls.url, headers=cls._headers) as response:
|
async with session.get(cls.url, headers=INIT_HEADERS) as response:
|
||||||
cls._update_request_args(session)
|
cls._update_request_args(session)
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
try:
|
else:
|
||||||
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
|
async with session.get(cls.url, headers=cls._headers) as response:
|
||||||
except Exception as e:
|
cls._update_request_args(session)
|
||||||
image_request = None
|
await raise_for_status(response)
|
||||||
debug.log("OpenaiChat: Upload image failed")
|
try:
|
||||||
debug.log(f"{e.__class__.__name__}: {e}")
|
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
|
||||||
|
except Exception as e:
|
||||||
|
debug.log("OpenaiChat: Upload image failed")
|
||||||
|
debug.log(f"{e.__class__.__name__}: {e}")
|
||||||
model = cls.get_model(model)
|
model = cls.get_model(model)
|
||||||
if model == cls.default_image_model:
|
|
||||||
model = cls.default_vision_model
|
|
||||||
if conversation is None:
|
if conversation is None:
|
||||||
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
|
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
|
||||||
else:
|
else:
|
||||||
|
|
@ -347,6 +345,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
raise MissingAuthError("No arkose token found in .har file")
|
raise MissingAuthError("No arkose token found in .har file")
|
||||||
|
|
||||||
if "proofofwork" in chat_requirements:
|
if "proofofwork" in chat_requirements:
|
||||||
|
if RequestConfig.proof_token is None:
|
||||||
|
RequestConfig.proof_token = get_config(cls._headers.get("user-agent"))
|
||||||
proofofwork = generate_proof_token(
|
proofofwork = generate_proof_token(
|
||||||
**chat_requirements["proofofwork"],
|
**chat_requirements["proofofwork"],
|
||||||
user_agent=cls._headers.get("user-agent"),
|
user_agent=cls._headers.get("user-agent"),
|
||||||
|
|
@ -364,7 +364,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"model": model,
|
"model": model,
|
||||||
"timezone_offset_min":-60,
|
"timezone_offset_min":-60,
|
||||||
"timezone":"Europe/Berlin",
|
"timezone":"Europe/Berlin",
|
||||||
"history_and_training_disabled": history_disabled and not auto_continue and not return_conversation,
|
"history_and_training_disabled": history_disabled and not auto_continue and not return_conversation or not cls.needs_auth,
|
||||||
"conversation_mode":{"kind":"primary_assistant","plugin_ids":None},
|
"conversation_mode":{"kind":"primary_assistant","plugin_ids":None},
|
||||||
"force_paragen":False,
|
"force_paragen":False,
|
||||||
"force_paragen_model_slug":"",
|
"force_paragen_model_slug":"",
|
||||||
|
|
@ -374,7 +374,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
"system_hints": ["search"] if web_search else None,
|
"system_hints": ["search"] if web_search else None,
|
||||||
"supported_encodings":["v1"],
|
"supported_encodings":["v1"],
|
||||||
"conversation_origin":None,
|
"conversation_origin":None,
|
||||||
"client_contextual_info":{"is_dark_mode":False,"time_since_loaded":14,"page_height":578,"page_width":1850,"pixel_ratio":1,"screen_height":1080,"screen_width":1920},
|
"client_contextual_info":{"is_dark_mode":False,"time_since_loaded":random.randint(20, 500),"page_height":578,"page_width":1850,"pixel_ratio":1,"screen_height":1080,"screen_width":1920},
|
||||||
"paragen_stream_type_override":None,
|
"paragen_stream_type_override":None,
|
||||||
"paragen_cot_summary_display_override":"allow",
|
"paragen_cot_summary_display_override":"allow",
|
||||||
"supports_buffering":True
|
"supports_buffering":True
|
||||||
|
|
@ -501,8 +501,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
await get_request_config(proxy)
|
await get_request_config(proxy)
|
||||||
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
|
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
|
||||||
cls._set_api_key(RequestConfig.access_token)
|
cls._set_api_key(RequestConfig.access_token)
|
||||||
if RequestConfig.proof_token is None:
|
|
||||||
RequestConfig.proof_token = get_config(cls._headers.get("user-agent"))
|
|
||||||
except NoValidHarFileError:
|
except NoValidHarFileError:
|
||||||
if has_nodriver:
|
if has_nodriver:
|
||||||
if RequestConfig.access_token is None:
|
if RequestConfig.access_token is None:
|
||||||
|
|
@ -516,7 +514,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
page = browser.main_tab
|
page = browser.main_tab
|
||||||
def on_request(event: nodriver.cdp.network.RequestWillBeSent):
|
def on_request(event: nodriver.cdp.network.RequestWillBeSent):
|
||||||
if event.request.url == start_url or event.request.url.startswith(conversation_url):
|
if event.request.url == start_url or event.request.url.startswith(conversation_url):
|
||||||
RequestConfig.access_request_id = event.request_id
|
|
||||||
RequestConfig.headers = event.request.headers
|
RequestConfig.headers = event.request.headers
|
||||||
elif event.request.url in (backend_url, backend_anon_url):
|
elif event.request.url in (backend_url, backend_anon_url):
|
||||||
if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
|
if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
|
||||||
|
|
@ -538,20 +535,25 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
await page.send(nodriver.cdp.network.enable())
|
await page.send(nodriver.cdp.network.enable())
|
||||||
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
||||||
page = await browser.get(cls.url)
|
page = await browser.get(cls.url)
|
||||||
await asyncio.sleep(1)
|
|
||||||
body = await page.evaluate("JSON.stringify(window.__remixContext)")
|
|
||||||
if body:
|
|
||||||
match = re.search(r'"accessToken":"(.*?)"', body)
|
|
||||||
if match:
|
|
||||||
RequestConfig.access_token = match.group(1)
|
|
||||||
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
|
|
||||||
RequestConfig.cookies[c.name] = c.value
|
|
||||||
user_agent = await page.evaluate("window.navigator.userAgent")
|
user_agent = await page.evaluate("window.navigator.userAgent")
|
||||||
await page.select("#prompt-textarea", 240)
|
await page.select("#prompt-textarea", 240)
|
||||||
while True:
|
while True:
|
||||||
if RequestConfig.access_token:
|
if RequestConfig.access_token:
|
||||||
break
|
break
|
||||||
|
body = await page.evaluate("JSON.stringify(window.__remixContext)")
|
||||||
|
if body:
|
||||||
|
match = re.search(r'"accessToken":"(.*?)"', body)
|
||||||
|
if match:
|
||||||
|
RequestConfig.access_token = match.group(1)
|
||||||
|
break
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
|
while True:
|
||||||
|
if RequestConfig.proof_token:
|
||||||
|
break
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
RequestConfig.data_build = await page.evaluate("document.documentElement.getAttribute('data-build')")
|
||||||
|
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
|
||||||
|
RequestConfig.cookies[c.name] = c.value
|
||||||
await page.close()
|
await page.close()
|
||||||
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
|
cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
|
||||||
cls._set_api_key(RequestConfig.access_token)
|
cls._set_api_key(RequestConfig.access_token)
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,6 @@ conversation_url = "https://chatgpt.com/c/"
|
||||||
class RequestConfig:
|
class RequestConfig:
|
||||||
cookies: dict = None
|
cookies: dict = None
|
||||||
headers: dict = None
|
headers: dict = None
|
||||||
access_request_id: str = None
|
|
||||||
access_token: str = None
|
access_token: str = None
|
||||||
proof_token: list = None
|
proof_token: list = None
|
||||||
turnstile_token: str = None
|
turnstile_token: str = None
|
||||||
|
|
@ -33,6 +32,7 @@ class RequestConfig:
|
||||||
arkose_token: str = None
|
arkose_token: str = None
|
||||||
headers: dict = {}
|
headers: dict = {}
|
||||||
cookies: dict = {}
|
cookies: dict = {}
|
||||||
|
data_build: str = "prod-697873d7e78bb14df6e13af3a91fa237cc4db415"
|
||||||
|
|
||||||
class arkReq:
|
class arkReq:
|
||||||
def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
|
def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,8 @@ from datetime import (
|
||||||
timezone
|
timezone
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from .har_file import RequestConfig
|
||||||
|
|
||||||
cores = [16, 24, 32]
|
cores = [16, 24, 32]
|
||||||
screens = [3000, 4000, 6000]
|
screens = [3000, 4000, 6000]
|
||||||
maxAttempts = 500000
|
maxAttempts = 500000
|
||||||
|
|
@ -386,7 +388,7 @@ def get_config(user_agent):
|
||||||
random.random(),
|
random.random(),
|
||||||
user_agent,
|
user_agent,
|
||||||
None,
|
None,
|
||||||
"prod-0b673b9a04fb6983c1417b587f2f31173eafa605", #document.documentElement.getAttribute("data-build"),
|
RequestConfig.data_build, #document.documentElement.getAttribute("data-build"),
|
||||||
"en-US",
|
"en-US",
|
||||||
"en-US,es-US,en,es",
|
"en-US,es-US,en,es",
|
||||||
0,
|
0,
|
||||||
|
|
@ -396,7 +398,8 @@ def get_config(user_agent):
|
||||||
time.perf_counter(),
|
time.perf_counter(),
|
||||||
str(uuid.uuid4()),
|
str(uuid.uuid4()),
|
||||||
"",
|
"",
|
||||||
8
|
8,
|
||||||
|
int(time.time()),
|
||||||
]
|
]
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
|
|
||||||
4
setup.py
4
setup.py
|
|
@ -8,6 +8,10 @@ here = os.path.abspath(os.path.dirname(__file__))
|
||||||
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
|
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
|
||||||
long_description = '\n' + fh.read()
|
long_description = '\n' + fh.read()
|
||||||
|
|
||||||
|
long_description = long_description.replace("[!NOTE]", "")
|
||||||
|
long_description = long_description.replace("(docs/", "(https://github.com/xtekky/gpt4free/blob/main/docs/")
|
||||||
|
long_description = long_description.replace("(docs/images/", "(https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/images/")
|
||||||
|
|
||||||
INSTALL_REQUIRE = [
|
INSTALL_REQUIRE = [
|
||||||
"requests",
|
"requests",
|
||||||
"aiohttp",
|
"aiohttp",
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue