mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Add Kimi provider, add vision support to LMArenaBeta
This commit is contained in:
parent
91b658dbb1
commit
8892b00ac1
8 changed files with 134 additions and 5 deletions
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -36,4 +36,5 @@ projects/windows/
|
||||||
|
|
||||||
*.bak
|
*.bak
|
||||||
*.backup
|
*.backup
|
||||||
.env
|
.env
|
||||||
|
g4f.dev/
|
||||||
|
|
@ -19,7 +19,7 @@ RUN if [ "$G4F_VERSION" = "" ] ; then \
|
||||||
RUN apt-get -qqy update \
|
RUN apt-get -qqy update \
|
||||||
&& apt-get -qqy upgrade \
|
&& apt-get -qqy upgrade \
|
||||||
&& apt-get -qyy autoremove \
|
&& apt-get -qyy autoremove \
|
||||||
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac \
|
&& apt-get -qqy install python3 python-is-python3 pip ffmpeg flac libavcodec-extra \
|
||||||
&& apt-get -qyy remove openjdk-11-jre-headless \
|
&& apt-get -qyy remove openjdk-11-jre-headless \
|
||||||
&& apt-get -qyy autoremove \
|
&& apt-get -qyy autoremove \
|
||||||
&& apt-get -qyy clean \
|
&& apt-get -qyy clean \
|
||||||
|
|
|
||||||
1
g4f.dev
1
g4f.dev
|
|
@ -1 +0,0 @@
|
||||||
Subproject commit b3a9831dd9b10e90f17bcf6524ff48863ac8112d
|
|
||||||
104
g4f/Provider/Kimi.py
Normal file
104
g4f/Provider/Kimi.py
Normal file
|
|
@ -0,0 +1,104 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import random
|
||||||
|
from typing import AsyncIterator
|
||||||
|
|
||||||
|
from .base_provider import AsyncAuthedProvider, ProviderModelMixin
|
||||||
|
from ..providers.helper import get_last_user_message
|
||||||
|
from ..requests import StreamSession, see_stream
|
||||||
|
from ..providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
|
||||||
|
class Kimi(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
|
url = "https://www.kimi.com"
|
||||||
|
working = True
|
||||||
|
active_by_default = True
|
||||||
|
default_model = "kimi-k2"
|
||||||
|
models = [default_model]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
|
||||||
|
device_id = str(random.randint(1000000000000000, 9999999999999999))
|
||||||
|
async with StreamSession(proxy=proxy, impersonate="chrome") as session:
|
||||||
|
async with session.post(
|
||||||
|
"https://www.kimi.com/api/device/register",
|
||||||
|
json={},
|
||||||
|
headers={
|
||||||
|
"x-msh-device-id": device_id,
|
||||||
|
"x-msh-platform": "web",
|
||||||
|
"x-traffic-id": device_id
|
||||||
|
}
|
||||||
|
) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
raise Exception("Failed to register device")
|
||||||
|
data = await response.json()
|
||||||
|
if not data.get("access_token"):
|
||||||
|
raise Exception("No access token received")
|
||||||
|
yield AuthResult(
|
||||||
|
api_key=data.get("access_token"),
|
||||||
|
device_id=device_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_authed(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
auth_result: AuthResult,
|
||||||
|
proxy: str = None,
|
||||||
|
conversation: JsonConversation = None,
|
||||||
|
web_search: bool = False,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
pass
|
||||||
|
async with StreamSession(
|
||||||
|
proxy=proxy,
|
||||||
|
impersonate="chrome",
|
||||||
|
headers={
|
||||||
|
"Authorization": f"Bearer {auth_result.api_key}",
|
||||||
|
}
|
||||||
|
) as session:
|
||||||
|
if conversation is None:
|
||||||
|
async with session.post("https://www.kimi.com/api/chat", json={
|
||||||
|
"name":"未命名会话",
|
||||||
|
"born_from":"home",
|
||||||
|
"kimiplus_id":"kimi",
|
||||||
|
"is_example":False,
|
||||||
|
"source":"web",
|
||||||
|
"tags":[]
|
||||||
|
}) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
raise Exception("Failed to create chat")
|
||||||
|
chat_data = await response.json()
|
||||||
|
conversation = JsonConversation(chat_id=chat_data.get("id"))
|
||||||
|
data = {
|
||||||
|
"kimiplus_id": "kimi",
|
||||||
|
"extend": {"sidebar": True},
|
||||||
|
"model": model,
|
||||||
|
"use_search": web_search,
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": get_last_user_message(messages)
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"refs": [],
|
||||||
|
"history": [],
|
||||||
|
"scene_labels": [],
|
||||||
|
"use_semantic_memory": False,
|
||||||
|
"use_deep_research": False
|
||||||
|
}
|
||||||
|
async with session.post(
|
||||||
|
f"https://www.kimi.com/api/chat/{conversation.chat_id}/completion/stream",
|
||||||
|
json=data
|
||||||
|
) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
raise Exception("Failed to start chat completion")
|
||||||
|
async for line in see_stream(response):
|
||||||
|
if line.get("event") == "cmpl":
|
||||||
|
yield line.get("text")
|
||||||
|
elif line.get("event") == "rename":
|
||||||
|
yield TitleGeneration(line.get("text"))
|
||||||
|
elif line.get("event") == "all_done":
|
||||||
|
yield FinishReason("stop")
|
||||||
|
break
|
||||||
|
|
@ -41,6 +41,7 @@ from .DeepInfraChat import DeepInfraChat
|
||||||
from .DuckDuckGo import DuckDuckGo
|
from .DuckDuckGo import DuckDuckGo
|
||||||
from .Free2GPT import Free2GPT
|
from .Free2GPT import Free2GPT
|
||||||
from .ImageLabs import ImageLabs
|
from .ImageLabs import ImageLabs
|
||||||
|
from .Kimi import Kimi
|
||||||
from .LambdaChat import LambdaChat
|
from .LambdaChat import LambdaChat
|
||||||
from .LegacyLMArena import LegacyLMArena
|
from .LegacyLMArena import LegacyLMArena
|
||||||
from .OIVSCodeSer2 import OIVSCodeSer2
|
from .OIVSCodeSer2 import OIVSCodeSer2
|
||||||
|
|
|
||||||
|
|
@ -4,12 +4,14 @@ import time
|
||||||
import uuid
|
import uuid
|
||||||
import json
|
import json
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import os
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages, MediaListType
|
||||||
from ...requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
|
from ...requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
|
||||||
from ...requests import DEFAULT_HEADERS, has_nodriver
|
from ...requests import DEFAULT_HEADERS, has_nodriver
|
||||||
from ...errors import ModelNotFoundError
|
from ...errors import ModelNotFoundError
|
||||||
from ...providers.response import FinishReason, Usage, JsonConversation, ImageResponse
|
from ...providers.response import FinishReason, Usage, JsonConversation, ImageResponse
|
||||||
|
from ...tools.media import merge_media
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin,AuthFileMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin,AuthFileMixin
|
||||||
from ..helper import get_last_user_message
|
from ..helper import get_last_user_message
|
||||||
from ... import debug
|
from ... import debug
|
||||||
|
|
@ -114,6 +116,7 @@ models = [
|
||||||
]
|
]
|
||||||
text_models = {model["publicName"]: model["id"] for model in models if "text" in model["capabilities"]["outputCapabilities"]}
|
text_models = {model["publicName"]: model["id"] for model in models if "text" in model["capabilities"]["outputCapabilities"]}
|
||||||
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
|
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
|
||||||
|
vision_models = [model["publicName"] for model in models if "image" in model["capabilities"]["inputCapabilities"]]
|
||||||
|
|
||||||
class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
label = "LMArena (New)"
|
label = "LMArena (New)"
|
||||||
|
|
@ -124,6 +127,7 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
default_model = list(text_models.keys())[0]
|
default_model = list(text_models.keys())[0]
|
||||||
models = list(text_models) + list(image_models)
|
models = list(text_models) + list(image_models)
|
||||||
image_models = list(image_models)
|
image_models = list(image_models)
|
||||||
|
vision_models = vision_models
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
|
@ -131,6 +135,7 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
conversation: JsonConversation = None,
|
conversation: JsonConversation = None,
|
||||||
|
media: MediaListType = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = None,
|
timeout: int = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
|
|
@ -181,7 +186,15 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
"id": userMessageId,
|
"id": userMessageId,
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": prompt,
|
"content": prompt,
|
||||||
"experimental_attachments": [],
|
"experimental_attachments": [
|
||||||
|
{
|
||||||
|
"name": name or os.path.basename(url),
|
||||||
|
"contentType": get_content_type(url),
|
||||||
|
"url": url
|
||||||
|
}
|
||||||
|
for url, name in list(merge_media(media, messages))
|
||||||
|
if url.startswith("https://")
|
||||||
|
],
|
||||||
"parentMessageIds": [] if conversation is None else conversation.message_ids,
|
"parentMessageIds": [] if conversation is None else conversation.message_ids,
|
||||||
"participantPosition": "a",
|
"participantPosition": "a",
|
||||||
"modelId": None,
|
"modelId": None,
|
||||||
|
|
@ -233,3 +246,13 @@ class LMArenaBeta(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
# Save the args to cache file
|
# Save the args to cache file
|
||||||
with cache_file.open("w") as f:
|
with cache_file.open("w") as f:
|
||||||
json.dump(args, f)
|
json.dump(args, f)
|
||||||
|
|
||||||
|
def get_content_type(url: str) -> str:
|
||||||
|
if url.endswith(".webp"):
|
||||||
|
return "image/webp"
|
||||||
|
elif url.endswith(".png"):
|
||||||
|
return "image/png"
|
||||||
|
elif url.endswith(".jpg") or url.endswith(".jpeg"):
|
||||||
|
return "image/jpeg"
|
||||||
|
else:
|
||||||
|
return "application/octet-stream"
|
||||||
|
|
@ -129,6 +129,7 @@ class AuthManager:
|
||||||
"GeminiPro": "Gemini",
|
"GeminiPro": "Gemini",
|
||||||
"PollinationsAI": "Pollinations",
|
"PollinationsAI": "Pollinations",
|
||||||
"OpenaiAPI": "Openai",
|
"OpenaiAPI": "Openai",
|
||||||
|
"PuterJS": "Puter",
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue