gpt4free/g4f/Provider/TeachAnything.py
kqlio67 9bac34fc88 refactor: reorganize providers and update model configurations
- Rename DeepInfraChat to DeepInfra across all files
- Move DeepInfra from needs_auth to main Provider directory
- Rename LMArenaBeta to LMArena throughout codebase
- Move search-related providers to new search subdirectory (GoogleSearch, SearXNG, YouTube)
- Move deprecated providers to not_working directory (Free2GPT, LegacyLMArena, PenguinAI, ImageLabs, har)
- Add new Mintlify provider with custom AI assistant implementation
- Update Anthropic provider with Claude 4 models and Opus 4.1 parameter handling
- Update Grok provider with Grok 4 models and improved streaming support
- Update GithubCopilot with expanded model list including o3-mini, o4-mini, gpt-5 previews
- Update LambdaChat default model from deepseek-r1 to deepseek-llama3.3-70b
- Update TeachAnything default model from gemini-1.5-pro to gemma
- Remove DeepInfra from needs_auth directory
- Update all model_map references from DeepInfraChat to DeepInfra
- Update all model_map references from LMArenaBeta to LMArena
- Add beta_headers support to Anthropic for special features
- Improve Mintlify provider with system prompt handling and streaming
- Update model configurations in models.py to reflect provider changes
2025-08-25 23:50:53 +03:00

82 lines
2.8 KiB
Python

from __future__ import annotations
from typing import Any, Dict
from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.teach-anything.com"
api_endpoint = "/api/generate"
working = True
default_model = 'gemma'
models = [default_model]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str | None = None,
**kwargs: Any
) -> AsyncResult:
headers = cls._get_headers()
model = cls.get_model(model)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {"prompt": prompt}
timeout = ClientTimeout(total=60)
async with session.post(
f"{cls.url}{cls.api_endpoint}",
json=data,
proxy=proxy,
timeout=timeout
) as response:
response.raise_for_status()
buffer = b""
async for chunk in response.content.iter_any():
buffer += chunk
try:
decoded = buffer.decode('utf-8')
yield decoded
buffer = b""
except UnicodeDecodeError:
# If we can't decode, we'll wait for more data
continue
# Handle any remaining data in the buffer
if buffer:
try:
yield buffer.decode('utf-8', errors='replace')
except Exception as e:
print(f"Error decoding final buffer: {e}")
@staticmethod
def _get_headers() -> Dict[str, str]:
return {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"dnt": "1",
"origin": "https://www.teach-anything.com",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://www.teach-anything.com/",
"sec-ch-us": '"Not?A_Brand";v="99", "Chromium";v="130"',
"sec-ch-us-mobile": "?0",
"sec-ch-us-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}