gpt4free/g4f/Provider/GptOss.py
hlohaus 9563f8df3a feat: Update environment variables and modify model mappings
- Added `OPENROUTER_API_KEY` and `AZURE_API_KEYS` to `example.env`.
- Updated `AZURE_DEFAULT_MODEL` to "model-router" in `example.env`.
- Added `AZURE_ROUTES` with multiple model URLs in `example.env`.
- Changed the mapping for `"phi-4-multimodal"` in `DeepInfraChat.py` to `"microsoft/Phi-4-multimodal-instruct"`.
- Added `media` parameter to `GptOss.create_completion` method and raised a `ValueError` if `media` is provided.
- Updated `model_aliases` in `any_model_map.py` to include new mappings for various models.
- Removed several model aliases from `PollinationsAI` in `any_model_map.py`.
- Added new models and updated existing models in `model_map` across various files, including `any_model_map.py` and `__init__.py`.
- Refactored `AnyModelProviderMixin` to include `model_aliases` and updated the logic for handling model aliases.
2025-08-07 01:21:22 +02:00

94 lines
No EOL
3.6 KiB
Python

from __future__ import annotations
from ..typing import AsyncResult, Messages, MediaListType
from ..providers.response import JsonConversation, Reasoning, TitleGeneration
from ..requests import StreamSession, raise_for_status
from ..config import DEFAULT_MODEL
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_last_user_message
class GptOss(AsyncGeneratorProvider, ProviderModelMixin):
label = "gpt-oss (playground)"
url = "https://gpt-oss.com"
api_endpoint = "https://api.gpt-oss.com/chatkit"
working = True
active_by_default = True
default_model = "gpt-oss-120b"
models = [default_model, "gpt-oss-20b"]
model_aliases = {
DEFAULT_MODEL: default_model,
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
media: MediaListType = None,
conversation: JsonConversation = None,
reasoning_effort: str = "high",
proxy: str = None,
**kwargs
) -> AsyncResult:
if media:
raise ValueError("Media is not supported by gpt-oss")
model = cls.get_model(model)
user_message = get_last_user_message(messages)
cookies = {}
if conversation is None:
data = {
"op": "threads.create",
"params": {
"input": {
"text": user_message,
"content": [{"type": "input_text", "text": user_message}],
"quoted_text": "",
"attachments": []
}
}
}
else:
data = {
"op":"threads.addMessage",
"params": {
"input": {
"text": user_message,
"content": [{"type": "input_text", "text": user_message}],
"quoted_text": "",
"attachments": []
},
"threadId": conversation.id
}
}
cookies["user_id"] = conversation.user_id
headers = {
"accept": "text/event-stream",
"x-reasoning-effort": reasoning_effort,
"x-selected-model": model,
"x-show-reasoning": "true"
}
async with StreamSession(
headers=headers,
cookies=cookies,
proxy=proxy,
) as session:
async with session.post(
cls.api_endpoint,
json=data
) as response:
await raise_for_status(response)
async for chunk in response.sse():
if chunk.get("type") == "thread.created":
yield JsonConversation(id=chunk["thread"]["id"], user_id=response.cookies.get("user_id"))
elif chunk.get("type") == "thread.item_updated":
entry = chunk.get("update", {}).get("entry", chunk.get("update", {}))
if entry.get("type") == "thought":
yield Reasoning(entry.get("content"))
elif entry.get("type") == "recap":
pass #yield Reasoning(status=entry.get("summary"))
elif entry.get("type") == "assistant_message.content_part.text_delta":
yield entry.get("delta")
elif chunk.get("type") == "thread.updated":
yield TitleGeneration(chunk["thread"]["title"])