mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-15 14:51:19 -08:00
Add Claude provider with authentication handling and update Nvidia provider
This commit is contained in:
parent
f8c10a41ad
commit
d7375d4c6c
6 changed files with 62 additions and 9 deletions
|
|
@ -8,7 +8,7 @@ from ...typing import Union, AsyncResult, Messages, MediaListType
|
|||
from ...requests import StreamSession, StreamResponse, raise_for_status, sse_stream
|
||||
from ...image import use_aspect_ratio
|
||||
from ...image.copy_images import save_response_media
|
||||
from ...providers.response import FinishReason, ToolCalls, Usage, ImageResponse, ProviderInfo, AudioResponse, Reasoning
|
||||
from ...providers.response import FinishReason, ToolCalls, Usage, ImageResponse, ProviderInfo, AudioResponse, Reasoning, JsonConversation
|
||||
from ...tools.media import render_messages
|
||||
from ...tools.run_tools import AuthManager
|
||||
from ...errors import MissingAuthError
|
||||
|
|
@ -29,7 +29,6 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
|
|||
add_user = True
|
||||
use_image_size = False
|
||||
max_tokens: int = None
|
||||
supports_text_plain = True # Whether the provider can handle text/plain responses
|
||||
|
||||
@classmethod
|
||||
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
|
||||
|
|
@ -75,6 +74,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
|
|||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
conversation: JsonConversation = None,
|
||||
media: MediaListType = None,
|
||||
api_key: str = None,
|
||||
api_endpoint: str = None,
|
||||
|
|
@ -139,6 +139,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
|
|||
stop=stop,
|
||||
stream="audio" not in extra_parameters if stream is None else stream,
|
||||
user=user if cls.add_user else None,
|
||||
conversation=conversation.get_dict() if conversation else None,
|
||||
**extra_parameters,
|
||||
**extra_body
|
||||
)
|
||||
|
|
@ -148,7 +149,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
|
|||
if api_endpoint is None:
|
||||
api_endpoint = cls.api_endpoint
|
||||
async with session.post(api_endpoint, json=data, ssl=cls.ssl) as response:
|
||||
async for chunk in read_response(response, stream, prompt, cls.get_dict(), download_media, cls.supports_text_plain):
|
||||
async for chunk in read_response(response, stream, prompt, cls.get_dict(), download_media):
|
||||
yield chunk
|
||||
|
||||
@classmethod
|
||||
|
|
@ -163,11 +164,9 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
|
|||
**({} if headers is None else headers)
|
||||
}
|
||||
|
||||
async def read_response(response: StreamResponse, stream: bool, prompt: str, provider_info: dict, download_media: bool, supports_text_plain: bool = True) -> AsyncResult:
|
||||
async def read_response(response: StreamResponse, stream: bool, prompt: str, provider_info: dict, download_media: bool) -> AsyncResult:
|
||||
content_type = response.headers.get("content-type", "text/event-stream" if stream else "application/json")
|
||||
if supports_text_plain and content_type.startswith("text/plain"):
|
||||
yield await response.text()
|
||||
elif content_type.startswith("application/json"):
|
||||
if content_type.startswith("application/json"):
|
||||
data = await response.json()
|
||||
OpenaiTemplate.raise_error(data, response.status)
|
||||
await raise_for_status(response)
|
||||
|
|
@ -176,6 +175,8 @@ async def read_response(response: StreamResponse, stream: bool, prompt: str, pro
|
|||
yield ProviderInfo(**provider_info, model=model)
|
||||
if "usage" in data:
|
||||
yield Usage(**data["usage"])
|
||||
if "conversation" in data:
|
||||
yield JsonConversation.from_dict(data["conversation"])
|
||||
if "choices" in data:
|
||||
choice = next(iter(data["choices"]), None)
|
||||
message = choice.get("message", {})
|
||||
|
|
@ -229,6 +230,8 @@ async def read_response(response: StreamResponse, stream: bool, prompt: str, pro
|
|||
yield Reasoning(reasoning_content)
|
||||
if "usage" in data and data["usage"]:
|
||||
yield Usage(**data["usage"])
|
||||
if "conversation" in data and data["conversation"]:
|
||||
yield JsonConversation.from_dict(data["conversation"])
|
||||
if choice and choice.get("finish_reason") is not None:
|
||||
yield FinishReason(choice["finish_reason"])
|
||||
else:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue