mirror of
https://github.com/xtekky/gpt4free.git
synced 2026-02-04 23:01:46 -08:00
* feat: add repository path support and new md2html converter tool
- Add `--repo` argument to commit.py for specifying git repository path with validation
- Add `validate_git_repository()` function to check repository existence and git status
- Add `get_repository_info()` function to extract branch and remote information
- Update `get_git_diff()` and `make_commit()` functions to accept repository path parameter
- Add Path import and repository validation in main workflow
- Enhance error messages with repository-specific guidance and context
- Update argument parser description and help text for new repository functionality
- Expand module docstring with comprehensive usage examples and feature descriptions
- Add new md2html.py tool for converting Markdown files to HTML using GitHub API
- Add template.html file with GitHub-styled CSS and responsive design
- Implement batch processing, retry logic, and rate limit handling in md2html converter
- Add comprehensive command-line interface with directory processing and custom output options
* refactor: Update provider configurations and model handling
- Removed Dynaspark provider entirely by deleting `g4f/Provider/Dynaspark.py`
- Deprecated DDG provider by moving to `not_working` directory and updating imports
- Restructured HuggingFace and MiniMax providers into `needs_auth` subpackage:
- Moved all HuggingFace provider files to `needs_auth/hf/`
- Moved MiniMax providers to `needs_auth/mini_max/`
- Updated ARTA provider:
- Expanded `model_aliases` with new tattoo styles and added aliases
- Added `get_model()` method for model resolution with list support
- Simplified Blackbox provider:
- Removed openrouter models and agentMode configurations
- Reduced model lists to core GPT variants
- Set session/subscriptionCache to None in payload
- Added model resolution to Gemini providers:
- Implemented `get_model()` in Gemini.py and GeminiPro.py
- Added alias handling with list support
- Updated model definitions in `g4f/models.py`:
- Removed references to Dynaspark and DDG providers
- Added new SDXL image models with ARTA provider
- Adjusted best_provider assignments across multiple models
- Removed Dynaspark/DDG references from provider imports and AnyProvider
- Added DDG to not_working providers in `__init__.py`
* feat: Add new models to DeepInfraChat, LambdaChat, and models
- Add 'deepseek-ai/DeepSeek-R1-0528' model to DeepInfraChat provider's models list
- Include alias 'deepseek-r1-0528' for DeepSeek-R1-0528 in DeepInfraChat's model_aliases
- Add 'apriel-5b-instruct' model to LambdaChat provider's models list
- Define new 'deepseek-r1-0528' model in models.py with DeepSeek base provider and DeepInfraChat as best provider
* refactor: simplify model registry and add validation
- Remove unused imports: sys, inspect, Set, Type
- Remove ModelRegistry._discovered flag and automatic discovery mechanism
- Add ModelRegistry.clear() method for resetting registry state
- Implement ModelRegistry.list_models_by_provider() for provider-based filtering
- Add ModelRegistry.validate_all_models() for configuration checks
- Remove Model._registered field and simplify registration logic
- Fix gemma_3_12b model name from empty string to 'gemma-3-12b'
- Add image model section header in model definitions
- Replace ModelUtils.convert dict with dynamic property
- Remove ModelUtils.refresh() method
- Register 'gemini' alias directly in ModelRegistry after model creation
- Remove module-level model discovery and ModelUtils.convert initialization
* refactor: Replace ModelUtils.convert property with class variable
- Add class variable `convert` to `ModelUtils` initialized as empty dictionary
- Replace `@property convert` method with `refresh()` class method that updates `convert`
- Remove dynamic property returning `ModelRegistry.all_models()`
- Add module-level assignment to initialize `ModelUtils.convert` with `ModelRegistry.all_models()`
- Include comment for clarity on filling the convert dictionary
* refactor: Reorganize providers and update model configuration
- Removed unused providers from `g4f/Provider/__init__.py`: ChatGpt, Pi, Pizzagpt, PuterJS, You
- Moved LMArenaBeta provider to `needs_auth` directory with updated relative imports
- Moved Pi provider to `needs_auth` directory with updated relative imports
- Moved PuterJS provider to `needs_auth` directory with updated relative imports
- Moved You provider to `needs_auth` directory with updated relative imports
- Added LMArenaBeta, Pi, PuterJS, You to `needs_auth/__init__.py`
- Moved ChatGpt provider to `not_working` directory with updated relative imports
- Moved Pizzagpt provider to `not_working` directory with updated relative imports
- Added ChatGpt, Pizzagpt to `not_working/__init__.py`
- Updated `g4f/models.py` to remove Reka import and change reka_core model provider
- Changed reka_core model's best_provider from IterListProvider to LegacyLMArena in `g4f/models.py`
* feat: add Together provider and update model handling
- Add new provider `Together` in `g4f/Provider/Together.py` with model aliases and configuration
- Implement `get_activation_key` and `get_models` methods in `Together` provider
- Add `get_model` method to resolve aliases in `Together` and `DeepInfraChat`
- Update `DeepInfraChat` model mappings to support multiple versions
- Change "deepseek-v3" to list with two model options
- Change "deepseek-r1" to list with two model options
- Remove duplicate "deepseek-v3" entry
- Remove "mistral-small" alias
- Remove "midjourney" from `PollinationsAI.extra_image_models`
- Register `Together` provider in `g4f/Provider/__init__.py`
- Update `g4f/models.py` with new providers and models
- Add `Together` to default and default_vision provider lists
- Add `Together` as provider for multiple existing models
- Add new vision model `qwen_2_vl_72b`
- Add new text models: `qwen_2_5_7b`, `deepseek_r1_distill_qwen_1_5b`, `deepseek_r1_distill_qwen_14b`
- Add new image models: `flux_redux`, `flux_depth`, `flux_canny`, `flux_kontext_max`, `flux_dev_lora`, `flux_kontext_pro`
- Remove `pi` model definition
- Update provider assignments for multiple models to include `Together`
* refactor: Remove LegacyLMArena provider and update model best_providers
- Remove LegacyLMArena import from Provider list in models.py
- Delete LegacyLMArena from default model's best_provider IterListProvider
- Remove multiple obsolete model definitions (gpt_3_5_turbo, gpt_4_turbo, phi_3_small, etc.) that exclusively used LegacyLMArena
- Update best_provider for all remaining models to remove LegacyLMArena from IterListProvider arguments
- Replace LegacyLMArena with alternative providers in model definitions (e.g., OpenaiChat, Together, DeepInfraChat)
- Simplify model definitions by removing redundant IterListProvider wrappers for single providers
- Expand provider imports in any_provider.py to include Blackboxapi, OIVSCodeSer2, etc.
- Extend provider list in AnyProvider with additional working providers for fallback support
* refactor: Remove Blackboxapi provider
- Deleted Blackboxapi provider implementation file
- Removed Blackboxapi import from provider __init__ file
- Updated default model configuration to exclude Blackboxapi provider
- Removed Blackboxapi from llama-3.1-70b model's best_provider
- Updated any_provider to exclude Blackboxapi from provider list
* fix: add missing parameters to Together.get_model method signature
- Add api_key and api_base parameters to get_model method in Together class
- Import random module at the top of the file
- Add inline import comment for random module inside get_model method
* fix: remove broken providers and update model configurations
- Remove non-working providers: ChatGLM, DocsBot, GizAI, OIVSCodeSer5
- Fix Blackbox provider by removing userSelectedModel logic
- Update DeepInfraChat default model to 'deepseek-ai/DeepSeek-V3-0324'
- Add random model selection for DeepInfraChat aliases
- Update LambdaChat default model to 'deepseek-v3-0324' and expand model list
- Fix LegacyLMArena model loading with better error handling and caching
- Add retry logic and timeouts to LegacyLMArena streaming responses
- Improve LegacyLMArena response parsing to handle various data formats
- Update model references across g4f/models.py to remove deleted providers
- Fix AnyProvider model categorization logic for better grouping
- Add LegacyLMArena and ARTA to special provider handling in AnyProvider
- Update provider imports in __init__.py to exclude removed providers
- Add needs_auth flag to You.com and HailuoAI providers
- Fix GeminiPro get_model method signature to accept kwargs
* fix (g4f/Provider/LambdaChat.py)
* refactor: format models list in LMArenaBeta provider
- Convert single-line models array to multi-line format
- Add 11 new models (hunyuan, flux-kontext-pro, cobalt variants, etc.)
- Remove 6 models (bagel, goldmane, redsword, etc.)
- Update stephen model ID
---------
Co-authored-by: kqlio67 <kqlio67.noreply.github.com>
96 lines
3.7 KiB
Python
96 lines
3.7 KiB
Python
from __future__ import annotations
|
|
|
|
import asyncio
|
|
import hashlib
|
|
import json
|
|
from urllib.parse import quote
|
|
|
|
from ....providers.response import JsonMixin
|
|
from ....requests import Tab
|
|
|
|
API_PATH = "/v4/api/chat/msg"
|
|
|
|
class CallbackResults(JsonMixin):
|
|
def __init__(self):
|
|
self.token: str = None
|
|
self.path_and_query: str = None
|
|
self.timestamp: int = None
|
|
|
|
def hash_function(base_string: str) -> str:
|
|
"""
|
|
Mimics the hashFunction using MD5.
|
|
"""
|
|
return hashlib.md5(base_string.encode()).hexdigest()
|
|
|
|
def generate_yy_header(has_search_params_path: str, body_to_yy: dict, time: int) -> str:
|
|
"""
|
|
Python equivalent of the generateYYHeader function.
|
|
"""
|
|
# print("Encoded Path:", quote(has_search_params_path, ""))
|
|
# print("Stringified Body:", s)
|
|
# print("Hashed Time:", hash_function(str(time)))
|
|
|
|
encoded_path = quote(has_search_params_path, "")
|
|
time_hash = hash_function(str(time))
|
|
combined_string = f"{encoded_path}_{body_to_yy}{time_hash}ooui"
|
|
|
|
# print("Combined String:", combined_string)
|
|
# print("Hashed Combined String:", hash_function(combined_string))
|
|
return hash_function(combined_string)
|
|
|
|
def get_body_to_yy(l):
|
|
L = l["msgContent"].replace("\r\n", "").replace("\n", "").replace("\r", "")
|
|
M = hash_function(l["characterID"]) + hash_function(L) + hash_function(l["chatID"])
|
|
M += hash_function("") # Mimics hashFunction(undefined) in JS
|
|
|
|
# print("bodyToYY:", M)
|
|
return M
|
|
|
|
def get_body_json(s):
|
|
return json.dumps(s, ensure_ascii=True, sort_keys=True)
|
|
|
|
async def get_browser_callback(auth_result: CallbackResults):
|
|
async def callback(page: Tab):
|
|
while not auth_result.token:
|
|
auth_result.token = await page.evaluate("localStorage.getItem('_token')")
|
|
if not auth_result.token:
|
|
await asyncio.sleep(1)
|
|
(auth_result.path_and_query, auth_result.timestamp) = await page.evaluate("""
|
|
const device_id = localStorage.getItem("USER_HARD_WARE_INFO");
|
|
const uuid = localStorage.getItem("UNIQUE_USER_ID");
|
|
const os_name = navigator.userAgentData?.platform || navigator.platform || "Unknown";
|
|
const browser_name = (() => {
|
|
const userAgent = navigator.userAgent.toLowerCase();
|
|
if (userAgent.includes("chrome") && !userAgent.includes("edg")) return "chrome";
|
|
if (userAgent.includes("edg")) return "edge";
|
|
if (userAgent.includes("firefox")) return "firefox";
|
|
if (userAgent.includes("safari") && !userAgent.includes("chrome")) return "safari";
|
|
return "unknown";
|
|
})();
|
|
const cpu_core_num = navigator.hardwareConcurrency || 8;
|
|
const browser_language = navigator.language || "unknown";
|
|
const browser_platform = `${navigator.platform || "unknown"}`;
|
|
const screen_width = window.screen.width || "unknown";
|
|
const screen_height = window.screen.height || "unknown";
|
|
const unix = Date.now(); // Current Unix timestamp in milliseconds
|
|
const params = {
|
|
device_platform: "web",
|
|
biz_id: 2,
|
|
app_id: 3001,
|
|
version_code: 22201,
|
|
lang: "en",
|
|
uuid,
|
|
device_id,
|
|
os_name,
|
|
browser_name,
|
|
cpu_core_num,
|
|
browser_language,
|
|
browser_platform,
|
|
screen_width,
|
|
screen_height,
|
|
unix
|
|
};
|
|
[new URLSearchParams(params).toString(), unix]
|
|
""")
|
|
auth_result.path_and_query = f"{API_PATH}?{auth_result.path_and_query}"
|
|
return callback
|