mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
refactor: streamline auth handling and CLI structure
- Added `fallback_model = "deepseek"` to `PollinationsAI` class in `PollinationsAI.py` - Modified `PollinationsAI._agenerate` to safely call `get_model` only if `model` is not None - Removed unused login loop in `OpenaiChat.synthesize` method in `OpenaiChat.py` - Replaced full CLI parser and main function implementation in `__main__.py` with import from `.main` - Added `get_auth_result` method to `AsyncAuthedProvider` in `base_provider.py` for reusable auth retrieval - Replaced repeated auth loading logic in `create_completion` and `create_streaming_completion` with call to `get_auth_result` in `base_provider.py
This commit is contained in:
parent
dcbeebd662
commit
78c0d67d54
4 changed files with 21 additions and 104 deletions
|
|
@ -78,6 +78,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
|
||||
# Models configuration
|
||||
default_model = "openai"
|
||||
fallback_model = "deepseek"
|
||||
default_image_model = "flux"
|
||||
default_vision_model = default_model
|
||||
default_audio_model = "openai-audio"
|
||||
|
|
@ -288,7 +289,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
break
|
||||
model = cls.default_audio_model if has_audio else model
|
||||
try:
|
||||
model = cls.get_model(model)
|
||||
model = cls.get_model(model) if model else None
|
||||
except ModelNotFoundError:
|
||||
pass
|
||||
if model in cls.image_models:
|
||||
|
|
|
|||
|
|
@ -603,8 +603,6 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
|||
|
||||
@classmethod
|
||||
async def synthesize(cls, params: dict) -> AsyncIterator[bytes]:
|
||||
async for _ in cls.login():
|
||||
pass
|
||||
async with StreamSession(
|
||||
impersonate="chrome",
|
||||
timeout=0
|
||||
|
|
|
|||
|
|
@ -1,88 +1,6 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
from argparse import ArgumentParser
|
||||
from .client import get_parser, run_client_args
|
||||
|
||||
from g4f import Provider
|
||||
from g4f.gui.run import gui_parser, run_gui_args
|
||||
import g4f.cookies
|
||||
|
||||
def get_api_parser():
|
||||
api_parser = ArgumentParser(description="Run the API and GUI")
|
||||
api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
|
||||
api_parser.add_argument("--port", "-p", default=None, help="Change the port of the server.")
|
||||
api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
|
||||
api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Start also the gui.")
|
||||
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
|
||||
default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--media-provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working and bool(getattr(provider, "image_models", False))],
|
||||
default=None, help="Default provider for image generation. (incompatible with --reload and --workers)"),
|
||||
api_parser.add_argument("--proxy", default=None, help="Default used proxy. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--workers", type=int, default=None, help="Number of workers.")
|
||||
api_parser.add_argument("--disable-colors", action="store_true", help="Don't use colors.")
|
||||
api_parser.add_argument("--ignore-cookie-files", action="store_true", help="Don't read .har and cookie files. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--g4f-api-key", type=str, default=None, help="Sets an authentication key for your API. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
|
||||
default=[], help="List of providers to ignore when processing request. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers],
|
||||
default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--reload", action="store_true", help="Enable reloading.")
|
||||
api_parser.add_argument("--demo", action="store_true", help="Enable demo mode.")
|
||||
api_parser.add_argument("--timeout", type=int, default=600, help="Default timeout for requests in seconds. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--ssl-keyfile", type=str, default=None, help="Path to SSL key file for HTTPS.")
|
||||
api_parser.add_argument("--ssl-certfile", type=str, default=None, help="Path to SSL certificate file for HTTPS.")
|
||||
api_parser.add_argument("--log-config", type=str, default=None, help="Custom log config.")
|
||||
|
||||
return api_parser
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Run gpt4free")
|
||||
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
|
||||
subparsers.add_parser("api", parents=[get_api_parser()], add_help=False)
|
||||
subparsers.add_parser("gui", parents=[gui_parser()], add_help=False)
|
||||
subparsers.add_parser("client", parents=[get_parser()], add_help=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.mode == "api":
|
||||
run_api_args(args)
|
||||
elif args.mode == "gui":
|
||||
run_gui_args(args)
|
||||
elif args.mode == "client":
|
||||
run_client_args(args)
|
||||
else:
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
|
||||
def run_api_args(args):
|
||||
from g4f.api import AppConfig, run_api
|
||||
|
||||
AppConfig.set_config(
|
||||
ignore_cookie_files=args.ignore_cookie_files,
|
||||
ignored_providers=args.ignored_providers,
|
||||
g4f_api_key=args.g4f_api_key,
|
||||
provider=args.provider,
|
||||
media_provider=args.media_provider,
|
||||
proxy=args.proxy,
|
||||
model=args.model,
|
||||
gui=args.gui,
|
||||
demo=args.demo,
|
||||
timeout=args.timeout,
|
||||
)
|
||||
if args.cookie_browsers:
|
||||
g4f.cookies.browsers = [g4f.cookies[browser] for browser in args.cookie_browsers]
|
||||
run_api(
|
||||
bind=args.bind,
|
||||
port=args.port,
|
||||
debug=args.debug,
|
||||
workers=args.workers,
|
||||
use_colors=not args.disable_colors,
|
||||
reload=args.reload,
|
||||
ssl_keyfile=args.ssl_keyfile,
|
||||
ssl_certfile=args.ssl_certfile,
|
||||
log_config=args.log_config,
|
||||
)
|
||||
from . import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -448,6 +448,22 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
|
|||
elif cache_file.exists():
|
||||
cache_file.unlink()
|
||||
|
||||
@classmethod
|
||||
def get_auth_result(cls) -> AuthResult:
|
||||
"""
|
||||
Retrieves the authentication result from cache.
|
||||
"""
|
||||
cache_file = cls.get_cache_file()
|
||||
if cache_file.exists():
|
||||
try:
|
||||
with cache_file.open("r") as f:
|
||||
return AuthResult(**json.load(f))
|
||||
except json.JSONDecodeError:
|
||||
cache_file.unlink()
|
||||
raise MissingAuthError(f"Invalid auth file: {cache_file}")
|
||||
else:
|
||||
raise MissingAuthError
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
cls,
|
||||
|
|
@ -458,15 +474,7 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
|
|||
auth_result: AuthResult = None
|
||||
cache_file = cls.get_cache_file()
|
||||
try:
|
||||
if cache_file.exists():
|
||||
try:
|
||||
with cache_file.open("r") as f:
|
||||
auth_result = AuthResult(**json.load(f))
|
||||
except json.JSONDecodeError:
|
||||
cache_file.unlink()
|
||||
raise MissingAuthError(f"Invalid auth file: {cache_file}")
|
||||
else:
|
||||
raise MissingAuthError
|
||||
auth_result = cls.get_auth_result()
|
||||
yield from to_sync_generator(cls.create_authed(model, messages, auth_result, **kwargs))
|
||||
except (MissingAuthError, NoValidHarFileError):
|
||||
response = cls.on_auth(**kwargs)
|
||||
|
|
@ -491,15 +499,7 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
|
|||
auth_result: AuthResult = None
|
||||
cache_file = cls.get_cache_file()
|
||||
try:
|
||||
if cache_file.exists():
|
||||
try:
|
||||
with cache_file.open("r") as f:
|
||||
auth_result = AuthResult(**json.load(f))
|
||||
except json.JSONDecodeError:
|
||||
cache_file.unlink()
|
||||
raise MissingAuthError(f"Invalid auth file: {cache_file}")
|
||||
else:
|
||||
raise MissingAuthError
|
||||
auth_result = cls.get_auth_result()
|
||||
response = to_async_iterator(cls.create_authed(model, messages, **kwargs, auth_result=auth_result))
|
||||
async for chunk in response:
|
||||
yield chunk
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue