Refactor OIVSCodeSer2 and OIVSCodeSer0501 classes; consolidate Perplexity provider and update OpenaiTemplate for API key handling

This commit is contained in:
hlohaus 2025-10-02 00:55:01 +02:00
parent 9931e874ee
commit 7d184411e2
5 changed files with 28 additions and 66 deletions

View file

@ -5,23 +5,16 @@ import string
from .template import OpenaiTemplate
class OIVSCodeSer0501(OpenaiTemplate):
label = "OI VSCode Server 0501"
url = "https://oi-vscode-server-0501.onrender.com"
api_base = "https://oi-vscode-server-0501.onrender.com/v1"
api_endpoint = "https://oi-vscode-server-0501.onrender.com/v1/chat/completions"
class OIVSCodeSer2(OpenaiTemplate):
label = "OI VSCode Server 2"
url = "https://oi-vscode-server-2.onrender.com"
api_base = "https://oi-vscode-server-2.onrender.com/v1"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4.1-mini"
default_vision_model = default_model
default_model = "*"
default_vision_model = "gpt-4o-mini"
vision_models = [default_vision_model]
models = vision_models
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
# Generate a random user ID similar to the JavaScript code
@ -37,3 +30,12 @@ class OIVSCodeSer0501(OpenaiTemplate):
),
**({} if headers is None else headers)
}
class OIVSCodeSer0501(OIVSCodeSer2):
label = "OI VSCode Server 0501"
url = "https://oi-vscode-server-0501.onrender.com"
api_base = "https://oi-vscode-server-0501.onrender.com/v1"
default_model = "gpt-4.1-mini"
default_vision_model = default_model
vision_models = [default_vision_model]

View file

@ -1,39 +0,0 @@
from __future__ import annotations
import secrets
import string
from .template import OpenaiTemplate
class OIVSCodeSer2(OpenaiTemplate):
label = "OI VSCode Server 2"
url = "https://oi-vscode-server-2.onrender.com"
api_base = "https://oi-vscode-server-2.onrender.com/v1"
api_endpoint = "https://oi-vscode-server-2.onrender.com/v1/chat/completions"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini"
default_vision_model = default_model
vision_models = [default_vision_model]
models = vision_models
@classmethod
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
# Generate a random user ID similar to the JavaScript code
userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
return {
"Accept": "text/event-stream" if stream else "application/json",
"Content-Type": "application/json",
"userid": userid,
**(
{"Authorization": f"Bearer {api_key}"}
if api_key else {}
),
**({} if headers is None else headers)
}

View file

@ -38,6 +38,7 @@ except ImportError as e:
from .deprecated.ARTA import ARTA
from .deprecated.DuckDuckGo import DuckDuckGo
from .deprecated.PerplexityLabs import PerplexityLabs
from .ApiAirforce import ApiAirforce
from .Blackbox import Blackbox
@ -50,10 +51,9 @@ from .GLM import GLM
from .Kimi import Kimi
from .LambdaChat import LambdaChat
from .Mintlify import Mintlify
from .OIVSCodeSer2 import OIVSCodeSer2
from .OIVSCodeSer0501 import OIVSCodeSer0501
from .OIVSCodeSer import OIVSCodeSer2, OIVSCodeSer0501
from .OperaAria import OperaAria
from .PerplexityLabs import PerplexityLabs
from .Perplexity import Perplexity
from .PollinationsAI import PollinationsAI
from .PollinationsImage import PollinationsImage
from .Startnest import Startnest

View file

@ -3,11 +3,11 @@ from __future__ import annotations
import random
import json
from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
from ..errors import ResponseError
from ..providers.response import FinishReason, Sources
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...errors import ResponseError
from ...providers.response import FinishReason, Sources
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
API_URL = "https://www.perplexity.ai/socket.io/"
WS_URL = "wss://www.perplexity.ai/socket.io/"
@ -15,7 +15,7 @@ WS_URL = "wss://www.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
label = "Perplexity Labs"
url = "https://labs.perplexity.ai"
working = True
working = False
active_by_default = True
default_model = "r1-1776"

View file

@ -41,11 +41,10 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
api_key = cls.api_key
if not api_key:
api_key = AuthManager.load_api_key(cls)
print(f"API Key: {api_key}")
if cls.models_needs_auth and not api_key:
raise MissingAuthError('Add a "api_key"')
if api_key is not None:
headers["authorization"] = f"Bearer {api_key}"
response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
response = requests.get(f"{api_base}/models", headers=cls.get_headers(False, api_key), verify=cls.ssl)
raise_for_status(response)
data = response.json()
data = data.get("data") if isinstance(data, dict) else data