feat: update providers, model selection, media handling, and routing

- Added GithubCopilotAPI provider to g4f/Provider/needs_auth and __init__.py
- Fixed typo "GGOGLE_SID_COOKIE" to "GOOGLE_SID_COOKIE" in Gemini.py and updated all references
- Updated PollinationsAI.py:
  - Refined model aliases and removed/commented unused/legacy aliases
  - Updated logic for loading audio and vision models, using swap_models for alias reversals
  - Adjusted get_model and model loading methods for accuracy
  - Changed default model lists for text, image, and vision models
  - Updated conversation title and followup labels for followups tools
- Modified save_content in g4f/cli/client.py to handle url downloads for lists, allow cookies/headers, and removed duplicate HTTP download logic
- Added asyncio sleep after stdout writes in stream_response for smoother streaming
- Changed website.py render default to "home," adjusted chat route to accept any filename, and updated filenames used for rendering
- Updated model selection in g4f/models.py by removing PollinationsAI from best_provider and changing model provider order for specific models
- Enhanced media merging in g4f/tools/media.py to clarify comment about last user message and handle content appending for lists in render_messages
- Updated OpenaiTemplate.py to add an image_url field if media with http(s) URLs is present
- Adjusted test_provider_has_model in etc/unittest/models.py to skip providers requiring auth
This commit is contained in:
hlohaus 2025-07-09 19:36:58 +02:00
parent 78c0d67d54
commit 6c126e468c
11 changed files with 79 additions and 72 deletions

View file

@ -347,7 +347,7 @@ def generate_commit_message(diff_text: str, model: str = DEFAULT_MODEL, max_retr
spinner = None
content.append(chunk.choices[0].delta.content)
print(chunk.choices[0].delta.content, end="", flush=True)
return "".join(content).strip("`").strip()
return "".join(content).strip("`").split("\n---\n")[0].strip()
except Exception as e:
# Stop spinner if it's running
if 'spinner' in locals() and spinner:

View file

@ -12,6 +12,8 @@ class TestProviderHasModel(unittest.TestCase):
def test_provider_has_model(self):
for model, providers in __models__.values():
for provider in providers:
if provider.needs_auth:
continue
if issubclass(provider, ProviderModelMixin):
provider.get_models() # Update models
if model.name in provider.model_aliases:

View file

@ -5,7 +5,7 @@ import json
import random
import requests
import asyncio
from urllib.parse import quote, quote_plus
from urllib.parse import quote_plus
from typing import Optional
from aiohttp import ClientSession, ClientTimeout
@ -40,14 +40,14 @@ FOLLOWUPS_TOOLS = [{
"parameters": {
"properties": {
"title": {
"title": "Conversation Title",
"title": "Conversation title. Prefixed with one or more emojies",
"type": "string"
},
"followups": {
"items": {
"type": "string"
},
"title": "Suggested Followups",
"title": "Suggested 4 Followups (only user messages)",
"type": "array"
}
},
@ -59,7 +59,7 @@ FOLLOWUPS_TOOLS = [{
FOLLOWUPS_DEVELOPER_MESSAGE = [{
"role": "developer",
"content": "Prefix conversation title with one or more emojies. Suggested 4 Followups (User messages only).",
"content": "Provide conversation options.",
}]
class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
@ -83,61 +83,38 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = default_model
default_audio_model = "openai-audio"
text_models = [default_model, "evil"]
image_models = [default_image_model, "flux-dev", "turbo", "gptimage"]
image_models = [default_image_model, "kontext", "gptimage"]
audio_models = {default_audio_model: []}
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "openai-reasoning", "searchgpt"]
vision_models = [default_vision_model]
_models_loaded = False
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
model_aliases = {
### Text Models ###
"gpt-4": "openai",
"gpt-4o": "openai",
"gpt-4.1-mini": "openai",
"gpt-4o-mini": "openai",
"gpt-4.1-nano": "openai-fast",
"gpt-4": "openai-large",
"gpt-4o": "openai-large",
"gpt-4.1": "openai-large",
"gpt-4o-audio": "openai-audio",
"o4-mini": "openai-reasoning",
"gpt-4.1-mini": "openai",
"command-r-plus": "command-r",
"gemini-2.5-flash": "gemini",
"gemini-2.0-flash-thinking": "gemini-thinking",
"qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama",
"llama-4-scout": "llamascout",
"llama-4-scout-17b": "llamascout",
"mistral-small-3.1-24b": "mistral",
"deepseek-r1": "deepseek-reasoning-large",
"deepseek-r1-distill-llama-70b": "deepseek-reasoning-large",
#"deepseek-r1-distill-llama-70b": "deepseek-r1-llama",
#"mistral-small-3.1-24b": "unity", # Personas
#"mirexa": "mirexa", # Personas
#"midijourney": "midijourney", # Personas
#"rtist": "rtist", # Personas
#"searchgpt": "searchgpt",
#"evil": "evil", # Personas
"deepseek-r1-distill-qwen-32b": "deepseek-reasoning",
"phi-4": "phi",
#"pixtral-12b": "pixtral",
#"hormoz-8b": "hormoz",
"qwq-32b": "qwen-qwq",
#"hypnosis-tracy-7b": "hypnosis-tracy", # Personas
#"mistral-?": "sur", # Personas
"deepseek-v3": "deepseek",
"deepseek-r1": "deepseek-reasoning",
"deepseek-v3-0324": "deepseek",
#"bidara": "bidara", # Personas
"deepseek-v3": "deepseek",
"grok-3-mini": "grok",
### Audio Models ###
"gpt-4o-audio": "openai-audio",
"grok-3-mini-high": "grok",
"gpt-4o-mini-audio": "openai-audio",
### Image Models ###
"gpt-4o-audio": "openai-audio",
"sdxl-turbo": "turbo",
"gpt-image": "gptimage",
"dall-e-3": "gptimage",
"flux-dev": "flux",
"flux-schnell": "flux",
"flux-pro": "flux",
"flux-schnell": "flux"
"flux": "flux",
}
swap_models = {value: key for key, value in model_aliases.items()}
@classmethod
def get_model(cls, model: str) -> str:
@ -185,11 +162,14 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
cls.audio_models = {
model.get("name"): model.get("voices")
for model in models
if "output_modalities" in model and "audio" in model["output_modalities"] and model.get("name") != "gemini"
if "output_modalities" in model and "audio" in model["output_modalities"]
}
for alias, model in cls.model_aliases.items():
if model in cls.audio_models and alias not in cls.audio_models:
cls.audio_models.update({alias: {}})
cls.vision_models.extend([
model.get("name")
cls.swap_models.get(model.get("name"), model.get("name"))
for model in models
if model.get("vision") and model not in cls.vision_models
])
@ -207,7 +187,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
for model in models:
model_name = model.get("name")
if model_name and "input_modalities" in model and "text" in model["input_modalities"]:
text_models.append(model_name)
text_models.append(cls.swap_models.get(model_name, model_name))
# Convert to list and update text_models
cls.text_models = list(dict.fromkeys(text_models))
@ -237,7 +217,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
{"group": "Text Generation", "models": cls.text_models},
{"group": "Image Generation", "models": cls.image_models},
{"group": "Audio Generation", "models": list(cls.audio_models.keys())},
{"group": "Audio Voices", "models": cls.audio_models[cls.default_audio_model]}
{"group": "Audio Voices", "models": cls.audio_models.get(cls.default_audio_model, [])},
]
@classmethod
@ -270,7 +250,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
top_p: float = None,
frequency_penalty: float = None,
response_format: Optional[dict] = None,
download_media: bool = True,
extra_parameters: list[str] = ["tools", "parallel_tool_calls", "tool_choice", "reasoning_effort", "logit_bias", "voice", "modalities", "audio"],
**kwargs
) -> AsyncResult:

View file

@ -59,7 +59,7 @@ UPLOAD_IMAGE_HEADERS = {
}
GOOGLE_COOKIE_DOMAIN = ".google.com"
ROTATE_COOKIES_URL = "https://accounts.google.com/RotateCookies"
GGOGLE_SID_COOKIE = "__Secure-1PSID"
GOOGLE_SID_COOKIE = "__Secure-1PSID"
models = {
"gemini-2.5-pro-exp": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"2525e3954d185b3c"]'},
@ -152,11 +152,12 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
"""
while True:
new_1psidts = None
try:
new_1psidts = await rotate_1psidts(cls.url, cls._cookies, proxy)
except Exception as e:
debug.error(f"Failed to refresh cookies: {e}")
task = cls.rotate_tasks.get(cls._cookies[GGOGLE_SID_COOKIE])
task = cls.rotate_tasks.get(cls._cookies[GOOGLE_SID_COOKIE])
if task:
task.cancel()
debug.error(
@ -220,10 +221,10 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
await cls.fetch_snlm0e(session, cls._cookies)
if not cls._snlm0e:
raise RuntimeError("Invalid cookies. SNlM0e not found")
if GGOGLE_SID_COOKIE in cls._cookies:
task = cls.rotate_tasks.get(cls._cookies[GGOGLE_SID_COOKIE])
if GOOGLE_SID_COOKIE in cls._cookies:
task = cls.rotate_tasks.get(cls._cookies[GOOGLE_SID_COOKIE])
if not task:
cls.rotate_tasks[cls._cookies[GGOGLE_SID_COOKIE]] = asyncio.create_task(
cls.rotate_tasks[cls._cookies[GOOGLE_SID_COOKIE]] = asyncio.create_task(
cls.start_auto_refresh()
)

View file

@ -0,0 +1,12 @@
from __future__ import annotations
from .OpenaiAPI import OpenaiAPI
class GithubCopilotAPI(OpenaiAPI):
label = "GitHub Copilot API"
url = "https://github.com/copilot"
login_url = "https://aider.chat/docs/llms/github.html"
working = True
api_base = "https://api.githubcopilot.com"
needs_auth = True

View file

@ -13,6 +13,7 @@ from .Gemini import Gemini
from .GeminiPro import GeminiPro
from .GigaChat import GigaChat
from .GithubCopilot import GithubCopilot
from .GithubCopilotAPI import GithubCopilotAPI
from .GlhfChat import GlhfChat
from .Grok import Grok
from .Groq import Groq

View file

@ -92,6 +92,9 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
"model": model,
**use_aspect_ratio({"width": kwargs.get("width"), "height": kwargs.get("height")}, kwargs.get("aspect_ratio", None))
}
# Handle media if provided
if media is not None:
data["image_url"] = next([data for data, _ in media if data and isinstance(data, str) and data.startswith("http://") or data.startswith("https://")], None)
async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data, ssl=cls.ssl) as response:
data = await response.json()
cls.raise_error(data, response.status)

View file

@ -134,6 +134,7 @@ async def stream_response(
for byte in str(token).encode('utf-8'):
sys.stdout.buffer.write(bytes([byte]))
sys.stdout.buffer.flush()
await asyncio.sleep(0.01)
except (IOError, BrokenPipeError) as e:
print(f"\nError writing to stdout: {e}", file=sys.stderr)
break
@ -153,7 +154,21 @@ async def stream_response(
def save_content(content, filepath: str, allowed_types = None):
if hasattr(content, "urls"):
content = next(iter(content.urls), None) if isinstance(content.urls, list) else content.urls
import requests
for url in content.urls:
if url.startswith("http://") or url.startswith("https://"):
try:
response = requests.get(url, cookies=content.get("cookies"), headers=content.get("headers"))
if response.status_code == 200:
with open(filepath, "wb") as f:
f.write(response.content)
return True
except requests.RequestException as e:
print(f"Error downloading {url}: {e}", file=sys.stderr)
return False
else:
content = url
break
elif hasattr(content, "data"):
content = content.data
if not content:
@ -166,13 +181,6 @@ def save_content(content, filepath: str, allowed_types = None):
with open(filepath, "wb") as f:
f.write(extract_data_uri(content))
return True
elif content.startswith("http://") or content.startswith("https://"):
import requests
response = requests.get(content)
if response.status_code == 200:
with open(filepath, "wb") as f:
f.write(response.content)
return True
content = filter_markdown(content, allowed_types)
if content:
with open(filepath, "w") as f:

View file

@ -14,7 +14,7 @@ from ... import version
def redirect_home():
return redirect('/chat/')
def render(filename = "chat"):
def render(filename = "home"):
if os.path.exists(DIST_DIR) and not request.args.get("debug"):
path = os.path.abspath(os.path.join(os.path.dirname(DIST_DIR), (filename + ("" if "." in filename else ".html"))))
return send_from_directory(os.path.dirname(path), os.path.basename(path))
@ -72,7 +72,7 @@ class Website:
'function': self._background,
'methods': ['GET', 'POST']
},
'/chat/<conversation_id>': {
'/chat/<filename>': {
'function': self._chat,
'methods': ['GET', 'POST']
},
@ -95,8 +95,8 @@ class Website:
def _background(self, filename = "background"):
return render(filename)
def _chat(self, filename = "chat"):
filename = "chat/index" if filename == 'chat' else secure_filename(filename)
def _chat(self, filename = ""):
filename = f"chat/{filename}" if filename else "chat/index"
return render(filename)
def _dist(self, name: str):

View file

@ -23,7 +23,6 @@ from .Provider import (
OIVSCodeSer0501,
OpenAIFM,
PerplexityLabs,
Pi,
PollinationsAI,
PollinationsImage,
TeachAnything,
@ -37,7 +36,6 @@ from .Provider import (
CopilotAccount,
Gemini,
GeminiPro,
HailuoAI,
HuggingChat,
HuggingFace,
HuggingFaceMedia,
@ -502,7 +500,7 @@ gemini_2_0_flash = Model(
gemini_2_0_flash_thinking = Model(
name = 'gemini-2.0-flash-thinking',
base_provider = 'Google',
best_provider = IterListProvider([PollinationsAI, Gemini])
best_provider = IterListProvider([Gemini, GeminiPro])
)
gemini_2_0_flash_thinking_with_apps = Model(
@ -515,7 +513,7 @@ gemini_2_0_flash_thinking_with_apps = Model(
gemini_2_5_flash = Model(
name = 'gemini-2.5-flash',
base_provider = 'Google',
best_provider = IterListProvider([PollinationsAI, Gemini])
best_provider = IterListProvider([Gemini, GeminiPro])
)
gemini_2_5_pro = Model(
@ -561,7 +559,7 @@ command_r = Model(
command_r_plus = Model(
name = 'command-r-plus',
base_provider = 'CohereForAI',
best_provider = IterListProvider([PollinationsAI, HuggingSpace, HuggingChat])
best_provider = IterListProvider([HuggingSpace, HuggingChat])
)
command_r7b = Model(
@ -693,7 +691,7 @@ qwen_3_0_6b = Model(
qwq_32b = Model(
name = 'qwq-32b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, Together, HuggingChat])
best_provider = IterListProvider([DeepInfraChat, Together, HuggingChat])
)
### DeepSeek ###
@ -720,7 +718,7 @@ deepseek_r1_turbo = Model(
deepseek_r1_distill_llama_70b = Model(
name = 'deepseek-r1-distill-llama-70b',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat, Together, PollinationsAI])
best_provider = IterListProvider([DeepInfraChat, Together])
)
deepseek_r1_distill_qwen_1_5b = Model(
@ -738,7 +736,7 @@ deepseek_r1_distill_qwen_14b = Model(
deepseek_r1_distill_qwen_32b = Model(
name = 'deepseek-r1-distill-qwen-32b',
base_provider = 'DeepSeek',
best_provider = IterListProvider([DeepInfraChat, PollinationsAI])
best_provider = IterListProvider([DeepInfraChat])
)
# deepseek-v2

View file

@ -56,6 +56,7 @@ def render_part(part: dict) -> dict:
def merge_media(media: list, messages: list) -> Iterator:
buffer = []
# Read media from the last user message
for message in messages:
if message.get("role") == "user":
content = message.get("content")
@ -90,13 +91,15 @@ def render_messages(messages: Messages, media: list = None) -> Iterator:
last_is_assistant = True
else:
last_is_assistant = False
if isinstance(message["content"], list):
# Render content parts
if isinstance(message.get("content"), list):
parts = [render_part(part) for part in message["content"] if part]
yield {
**message,
"content": [part for part in parts if part]
}
else:
# Append media to the last message
if media is not None and idx == len(messages) - 1:
yield {
**message,