Add Kimi provider, add vision support to LMArenaBeta

This commit is contained in:
hlohaus 2025-07-25 16:43:06 +02:00
parent 91b658dbb1
commit 8892b00ac1
8 changed files with 134 additions and 5 deletions

3
.gitignore vendored
View file

@ -36,4 +36,5 @@ projects/windows/
*.bak
*.backup
.env
.env
g4f.dev/

View file

@ -19,7 +19,7 @@ RUN if [ "$G4F_VERSION" = "" ] ; then \
RUN apt-get -qqy update \
&& apt-get -qqy upgrade \
&& apt-get -qyy autoremove \
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac \
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac libavcodec-extra \
&& apt-get -qyy remove openjdk-11-jre-headless \
&& apt-get -qyy autoremove \
&& apt-get -qyy clean \

@ -1 +0,0 @@
Subproject commit b3a9831dd9b10e90f17bcf6524ff48863ac8112d

104
g4f/Provider/Kimi.py Normal file
View file

@ -0,0 +1,104 @@
from __future__ import annotations
import random
from typing import AsyncIterator
from .base_provider import AsyncAuthedProvider, ProviderModelMixin
from ..providers.helper import get_last_user_message
from ..requests import StreamSession, see_stream
from ..providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason
from ..typing import AsyncResult, Messages
class Kimi(AsyncAuthedProvider, ProviderModelMixin):
url = "https://www.kimi.com"
working = True
active_by_default = True
default_model = "kimi-k2"
models = [default_model]
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
device_id = str(random.randint(1000000000000000, 9999999999999999))
async with StreamSession(proxy=proxy, impersonate="chrome") as session:
async with session.post(
"https://www.kimi.com/api/device/register",
json={},
headers={
"x-msh-device-id": device_id,
"x-msh-platform": "web",
"x-traffic-id": device_id
}
) as response:
if response.status != 200:
raise Exception("Failed to register device")
data = await response.json()
if not data.get("access_token"):
raise Exception("No access token received")
yield AuthResult(
api_key=data.get("access_token"),
device_id=device_id,
)
@classmethod
async def create_authed(
cls,
model: str,
messages: Messages,
auth_result: AuthResult,
proxy: str = None,
conversation: JsonConversation = None,
web_search: bool = False,
**kwargs
) -> AsyncResult:
pass
async with StreamSession(
proxy=proxy,
impersonate="chrome",
headers={
"Authorization": f"Bearer {auth_result.api_key}",
}
) as session:
if conversation is None:
async with session.post("https://www.kimi.com/api/chat", json={
"name":"未命名会话",
"born_from":"home",
"kimiplus_id":"kimi",
"is_example":False,
"source":"web",
"tags":[]
}) as response:
if response.status != 200:
raise Exception("Failed to create chat")
chat_data = await response.json()
conversation = JsonConversation(chat_id=chat_data.get("id"))
data = {
"kimiplus_id": "kimi",
"extend": {"sidebar": True},
"model": model,
"use_search": web_search,
"messages": [
{
"role": "user",
"content": get_last_user_message(messages)
}
],
"refs": [],
"history": [],
"scene_labels": [],
"use_semantic_memory": False,
"use_deep_research": False
}
async with session.post(
f"https://www.kimi.com/api/chat/{conversation.chat_id}/completion/stream",
json=data
) as response:
if response.status != 200:
raise Exception("Failed to start chat completion")
async for line in see_stream(response):
if line.get("event") == "cmpl":
yield line.get("text")
elif line.get("event") == "rename":
yield TitleGeneration(line.get("text"))
elif line.get("event") == "all_done":
yield FinishReason("stop")
break

View file

@ -41,6 +41,7 @@ from .DeepInfraChat import DeepInfraChat
from .DuckDuckGo import DuckDuckGo
from .Free2GPT import Free2GPT
from .ImageLabs import ImageLabs
from .Kimi import Kimi
from .LambdaChat import LambdaChat
from .LegacyLMArena import LegacyLMArena
from .OIVSCodeSer2 import OIVSCodeSer2

View file

@ -4,12 +4,14 @@ import time
import uuid
import json
import asyncio
import os
from ...typing import AsyncResult, Messages
from ...typing import AsyncResult, Messages, MediaListType
from ...requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
from ...requests import DEFAULT_HEADERS, has_nodriver
from ...errors import ModelNotFoundError
from ...providers.response import FinishReason, Usage, JsonConversation, ImageResponse
from ...tools.media import merge_media
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin,AuthFileMixin
from ..helper import get_last_user_message
from ... import debug
@ -114,6 +116,7 @@ models = [
]
text_models = {model["publicName"]: model["id"] for model in models if "text" in model["capabilities"]["outputCapabilities"]}
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
vision_models = [model["publicName"] for model in models if "image" in model["capabilities"]["inputCapabilities"]]
class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "LMArena (New)"
@ -124,6 +127,7 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
default_model = list(text_models.keys())[0]
models = list(text_models) + list(image_models)
image_models = list(image_models)
vision_models = vision_models
@classmethod
async def create_async_generator(
@ -131,6 +135,7 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
model: str,
messages: Messages,
conversation: JsonConversation = None,
media: MediaListType = None,
proxy: str = None,
timeout: int = None,
**kwargs
@ -181,7 +186,15 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
"id": userMessageId,
"role": "user",
"content": prompt,
"experimental_attachments": [],
"experimental_attachments": [
{
"name": name or os.path.basename(url),
"contentType": get_content_type(url),
"url": url
}
for url, name in list(merge_media(media, messages))
if url.startswith("https://")
],
"parentMessageIds": [] if conversation is None else conversation.message_ids,
"participantPosition": "a",
"modelId": None,
@ -233,3 +246,13 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
# Save the args to cache file
with cache_file.open("w") as f:
json.dump(args, f)
def get_content_type(url: str) -> str:
if url.endswith(".webp"):
return "image/webp"
elif url.endswith(".png"):
return "image/png"
elif url.endswith(".jpg") or url.endswith(".jpeg"):
return "image/jpeg"
else:
return "application/octet-stream"

View file

@ -129,6 +129,7 @@ class AuthManager:
"GeminiPro": "Gemini",
"PollinationsAI": "Pollinations",
"OpenaiAPI": "Openai",
"PuterJS": "Puter",
}
@classmethod