mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-05 18:20:35 -08:00
Compare commits
3 commits
5bacb669b2
...
7234402211
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7234402211 | ||
|
|
1dac52a191 | ||
|
|
213e04bae7 |
10 changed files with 86 additions and 1068 deletions
|
|
@ -18,8 +18,7 @@ RUN apt-get update && apt-get upgrade -y \
|
|||
&& useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
|
||||
&& mkdir -p /var/log/supervisor \
|
||||
&& chown "${G4F_USER_ID}:${G4F_USER_ID}" /var/log/supervisor \
|
||||
&& echo "${G4F_USER}:${G4F_USER}" | chpasswd \
|
||||
&& python -m pip install --upgrade pip
|
||||
&& echo "${G4F_USER}:${G4F_USER}" | chpasswd
|
||||
|
||||
USER $G4F_USER_ID
|
||||
WORKDIR $G4F_DIR
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ RUN apt-get update && apt-get upgrade -y \
|
|||
&& groupadd -g $G4F_USER_ID $G4F_USER \
|
||||
&& useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
|
||||
&& echo "${G4F_USER}:${G4F_USER}" | chpasswd \
|
||||
&& python -m pip install --upgrade pip \
|
||||
&& apt-get clean \
|
||||
&& rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ def clean_name(name: str) -> str:
|
|||
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
label = "Cloudflare AI"
|
||||
url = "https://playground.ai.cloudflare.com"
|
||||
working = has_curl_cffi
|
||||
working = False
|
||||
use_nodriver = True
|
||||
active_by_default = True
|
||||
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
if is_data_an_audio(media_data, filename):
|
||||
has_audio = True
|
||||
break
|
||||
model = cls.default_audio_model if has_audio else model
|
||||
model = cls.default_audio_model if has_audio else cls.default_model
|
||||
elif cls._models_loaded or cls.get_models():
|
||||
if model in cls.model_aliases:
|
||||
model = cls.model_aliases[model]
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
import json
|
||||
import asyncio
|
||||
import os
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
import secrets
|
||||
|
||||
try:
|
||||
import curl_cffi
|
||||
|
|
@ -26,11 +27,26 @@ from ...requests import StreamSession, get_args_from_nodriver, raise_for_status,
|
|||
from ...errors import ModelNotFoundError, CloudflareError, MissingAuthError, MissingRequirementsError
|
||||
from ...providers.response import FinishReason, Usage, JsonConversation, ImageResponse, Reasoning, PlainTextResponse, JsonRequest
|
||||
from ...tools.media import merge_media
|
||||
from ...integration import uuid
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin
|
||||
from ..helper import get_last_user_message
|
||||
from ... import debug
|
||||
|
||||
def uuid7():
|
||||
"""
|
||||
Generate a UUIDv7 using Unix epoch (milliseconds since 1970-01-01)
|
||||
matching the browser's implementation.
|
||||
"""
|
||||
timestamp_ms = int(time.time() * 1000)
|
||||
rand_a = secrets.randbits(12)
|
||||
rand_b = secrets.randbits(62)
|
||||
|
||||
uuid_int = timestamp_ms << 80
|
||||
uuid_int |= (0x7000 | rand_a) << 64
|
||||
uuid_int |= (0x8000000000000000 | rand_b)
|
||||
|
||||
hex_str = f"{uuid_int:032x}"
|
||||
return f"{hex_str[0:8]}-{hex_str[8:12]}-{hex_str[12:16]}-{hex_str[16:20]}-{hex_str[20:32]}"
|
||||
|
||||
models = [
|
||||
{'id': '812c93cc-5f88-4cff-b9ca-c11a26599b0e', 'publicName': 'qwen3-max-preview',
|
||||
'capabilities': {'inputCapabilities': {'text': True}, 'outputCapabilities': {'text': True}},
|
||||
|
|
@ -485,7 +501,8 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
label = "LMArena"
|
||||
url = "https://lmarena.ai"
|
||||
share_url = None
|
||||
api_endpoint = "https://lmarena.ai/nextjs-api/stream/create-evaluation"
|
||||
create_evaluation = "https://lmarena.ai/nextjs-api/stream/create-evaluation"
|
||||
post_to_evaluation = "https://lmarena.ai/nextjs-api/stream/post-to-evaluation/{id}"
|
||||
working = True
|
||||
active_by_default = True
|
||||
use_stream_timeout = False
|
||||
|
|
@ -637,21 +654,23 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
else:
|
||||
raise ModelNotFoundError(f"Model '{model}' is not supported by LMArena provider.")
|
||||
|
||||
evaluationSessionId = str(uuid.uuid7())
|
||||
userMessageId = str(uuid.uuid7())
|
||||
modelAMessageId = str(uuid.uuid7())
|
||||
if conversation and getattr(conversation, "evaluationSessionId", None):
|
||||
url = cls.post_to_evaluation.format(id=conversation.evaluationSessionId)
|
||||
evaluationSessionId = conversation.evaluationSessionId
|
||||
else:
|
||||
url = cls.create_evaluation
|
||||
evaluationSessionId = str(uuid7())
|
||||
userMessageId = str(uuid7())
|
||||
modelAMessageId = str(uuid7())
|
||||
data = {
|
||||
"id": evaluationSessionId,
|
||||
"mode": "direct",
|
||||
"modelAId": model_id,
|
||||
"userMessageId": userMessageId,
|
||||
"modelAMessageId": modelAMessageId,
|
||||
"messages": [
|
||||
{
|
||||
"id": userMessageId,
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
"experimental_attachments": [
|
||||
"userMessage": {
|
||||
"content": prompt,
|
||||
"experimental_attachments": [
|
||||
{
|
||||
"name": name or os.path.basename(url),
|
||||
"contentType": get_content_type(url),
|
||||
|
|
@ -660,33 +679,14 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
for url, name in list(merge_media(media, messages))
|
||||
if isinstance(url, str) and url.startswith("https://")
|
||||
],
|
||||
"parentMessageIds": [] if conversation is None else conversation.message_ids,
|
||||
"participantPosition": "a",
|
||||
"modelId": None,
|
||||
"evaluationSessionId": evaluationSessionId,
|
||||
"status": "pending",
|
||||
"failureReason": None
|
||||
},
|
||||
{
|
||||
"id": modelAMessageId,
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"experimental_attachments": [],
|
||||
"parentMessageIds": [userMessageId],
|
||||
"participantPosition": "a",
|
||||
"modelId": model,
|
||||
"evaluationSessionId": evaluationSessionId,
|
||||
"status": "pending",
|
||||
"failureReason": None
|
||||
}
|
||||
],
|
||||
"modality": "image" if is_image_model else "chat"
|
||||
},
|
||||
"modality": "image" if is_image_model else "chat",
|
||||
}
|
||||
yield JsonRequest.from_dict(data)
|
||||
try:
|
||||
async with StreamSession(**args, timeout=timeout) as session:
|
||||
async with session.post(
|
||||
cls.api_endpoint,
|
||||
url,
|
||||
json=data,
|
||||
proxy=proxy
|
||||
) as response:
|
||||
|
|
@ -695,9 +695,7 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
async for chunk in response.iter_lines():
|
||||
line = chunk.decode()
|
||||
yield PlainTextResponse(line)
|
||||
if line.startswith("af:"):
|
||||
yield JsonConversation(message_ids=[modelAMessageId])
|
||||
elif line.startswith("a0:"):
|
||||
if line.startswith("a0:"):
|
||||
chunk = json.loads(line[3:])
|
||||
if chunk == "hasArenaError":
|
||||
raise ModelNotFoundError("LMArena Beta encountered an error: hasArenaError")
|
||||
|
|
@ -708,6 +706,7 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||
elif line.startswith("a2:"):
|
||||
yield ImageResponse([image.get("image") for image in json.loads(line[3:])], prompt)
|
||||
elif line.startswith("ad:"):
|
||||
yield JsonConversation(evaluationSessionId=evaluationSessionId)
|
||||
finish = json.loads(line[3:])
|
||||
if "finishReason" in finish:
|
||||
yield FinishReason(finish["finishReason"])
|
||||
|
|
|
|||
|
|
@ -9,8 +9,10 @@ import uuid
|
|||
import random
|
||||
from urllib.parse import unquote
|
||||
from copy import deepcopy
|
||||
|
||||
from .crypt import decrypt, encrypt
|
||||
try:
|
||||
from .crypt import decrypt, encrypt
|
||||
except ImportError:
|
||||
pass
|
||||
from ...requests import StreamSession
|
||||
from ...cookies import get_cookies_dir
|
||||
from ...errors import NoValidHarFileError
|
||||
|
|
|
|||
|
|
@ -4,16 +4,19 @@ from typing import Optional
|
|||
from functools import partial
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from pydantic_ai.models import Model, KnownModelName, infer_model
|
||||
from pydantic_ai.models.openai import OpenAIModel, OpenAISystemPromptRole
|
||||
from pydantic_ai import ModelResponsePart, ThinkingPart, ToolCallPart
|
||||
from pydantic_ai.models import Model, ModelResponse, KnownModelName, infer_model
|
||||
from pydantic_ai.usage import RequestUsage
|
||||
from pydantic_ai.models.openai import OpenAIChatModel
|
||||
from pydantic_ai.models.openai import OpenAISystemPromptRole, _now_utc, split_content_into_text_and_thinking, replace
|
||||
|
||||
import pydantic_ai.models.openai
|
||||
pydantic_ai.models.openai.NOT_GIVEN = None
|
||||
|
||||
from ..client import AsyncClient
|
||||
from ..client import AsyncClient, ChatCompletion
|
||||
|
||||
@dataclass(init=False)
|
||||
class AIModel(OpenAIModel):
|
||||
class AIModel(OpenAIChatModel):
|
||||
"""A model that uses the G4F API."""
|
||||
|
||||
client: AsyncClient = field(repr=False)
|
||||
|
|
@ -29,7 +32,7 @@ class AIModel(OpenAIModel):
|
|||
provider: str | None = None,
|
||||
*,
|
||||
system_prompt_role: OpenAISystemPromptRole | None = None,
|
||||
system: str | None = 'openai',
|
||||
system: str | None = 'g4f',
|
||||
**kwargs
|
||||
):
|
||||
"""Initialize an AI model.
|
||||
|
|
@ -44,7 +47,7 @@ class AIModel(OpenAIModel):
|
|||
customize the `base_url` and `api_key` to use a different provider.
|
||||
"""
|
||||
self._model_name = model_name
|
||||
self._provider = provider
|
||||
self._provider = getattr(provider, '__name__', provider)
|
||||
self.client = AsyncClient(provider=provider, **kwargs)
|
||||
self.system_prompt_role = system_prompt_role
|
||||
self._system = system
|
||||
|
|
@ -53,6 +56,38 @@ class AIModel(OpenAIModel):
|
|||
if self._provider:
|
||||
return f'g4f:{self._provider}:{self._model_name}'
|
||||
return f'g4f:{self._model_name}'
|
||||
|
||||
def _process_response(self, response: ChatCompletion | str) -> ModelResponse:
|
||||
"""Process a non-streamed response, and prepare a message to return."""
|
||||
choice = response.choices[0]
|
||||
items: list[ModelResponsePart] = []
|
||||
|
||||
if reasoning := getattr(choice.message, 'reasoning', None):
|
||||
items.append(ThinkingPart(id='reasoning', content=reasoning, provider_name=self.system))
|
||||
|
||||
if choice.message.content:
|
||||
items.extend(
|
||||
(replace(part, id='content', provider_name=self.system) if isinstance(part, ThinkingPart) else part)
|
||||
for part in split_content_into_text_and_thinking(choice.message.content, self.profile.thinking_tags)
|
||||
)
|
||||
if choice.message.tool_calls is not None:
|
||||
for c in choice.message.tool_calls:
|
||||
items.append(ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id))
|
||||
usage = RequestUsage(
|
||||
input_tokens=response.usage.prompt_tokens,
|
||||
output_tokens=response.usage.completion_tokens,
|
||||
)
|
||||
|
||||
return ModelResponse(
|
||||
parts=items,
|
||||
usage=usage,
|
||||
model_name=response.model,
|
||||
timestamp=_now_utc(),
|
||||
provider_details=None,
|
||||
provider_response_id=response.id,
|
||||
provider_name=self._provider,
|
||||
finish_reason=choice.finish_reason,
|
||||
)
|
||||
|
||||
def new_infer_model(model: Model | KnownModelName, api_key: str = None) -> Model:
|
||||
if isinstance(model, Model):
|
||||
|
|
@ -68,5 +103,4 @@ def new_infer_model(model: Model | KnownModelName, api_key: str = None) -> Model
|
|||
def patch_infer_model(api_key: str | None = None):
|
||||
import pydantic_ai.models
|
||||
|
||||
pydantic_ai.models.infer_model = partial(new_infer_model, api_key=api_key)
|
||||
pydantic_ai.models.AIModel = AIModel
|
||||
pydantic_ai.models.infer_model = partial(new_infer_model, api_key=api_key)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -6,7 +6,6 @@ from typing import Dict, List, Optional
|
|||
from .Provider import IterListProvider, ProviderType
|
||||
from .Provider import (
|
||||
### No Auth Required ###
|
||||
Blackbox,
|
||||
Chatai,
|
||||
Cloudflare,
|
||||
Copilot,
|
||||
|
|
@ -17,7 +16,6 @@ from .Provider import (
|
|||
GLM,
|
||||
Kimi,
|
||||
LambdaChat,
|
||||
Mintlify,
|
||||
OIVSCodeSer2,
|
||||
OIVSCodeSer0501,
|
||||
OperaAria,
|
||||
|
|
@ -27,7 +25,6 @@ from .Provider import (
|
|||
PollinationsAI,
|
||||
PollinationsImage,
|
||||
Qwen,
|
||||
StringableInference,
|
||||
TeachAnything,
|
||||
Together,
|
||||
WeWordle,
|
||||
|
|
@ -155,7 +152,6 @@ default = Model(
|
|||
name = "",
|
||||
base_provider = "",
|
||||
best_provider = IterListProvider([
|
||||
StringableInference,
|
||||
OIVSCodeSer0501,
|
||||
OIVSCodeSer2,
|
||||
Copilot,
|
||||
|
|
@ -168,7 +164,6 @@ default = Model(
|
|||
Together,
|
||||
Chatai,
|
||||
WeWordle,
|
||||
Mintlify,
|
||||
TeachAnything,
|
||||
OpenaiChat,
|
||||
Cloudflare,
|
||||
|
|
@ -179,7 +174,6 @@ default_vision = VisionModel(
|
|||
name = "",
|
||||
base_provider = "",
|
||||
best_provider = IterListProvider([
|
||||
StringableInference,
|
||||
DeepInfra,
|
||||
OIVSCodeSer0501,
|
||||
OIVSCodeSer2,
|
||||
|
|
|
|||
|
|
@ -348,7 +348,7 @@ class AnyProvider(AsyncGeneratorProvider, AnyModelProviderMixin):
|
|||
has_audio = True
|
||||
break
|
||||
has_image = True
|
||||
if "tools" in kwargs:
|
||||
if kwargs.get("tools", None):
|
||||
providers = [PollinationsAI]
|
||||
elif "audio" in kwargs or "audio" in kwargs.get("modalities", []):
|
||||
if kwargs.get("audio", {}).get("language") is None:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue