Fix "n" parameter in API, update models.py

This commit is contained in:
hlohaus 2025-04-05 16:03:28 +02:00
parent 4ae536f1cc
commit 2234d926b8
4 changed files with 7 additions and 13 deletions

View file

@ -49,7 +49,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
text_models = [default_model] text_models = [default_model]
image_models = [default_image_model] image_models = [default_image_model]
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"] extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
vision_models = [default_vision_model, "gpt-4o-mini", "o3-mini", "openai", "openai-large"] vision_models = [default_vision_model, "gpt-4o-mini", "o3-mini", "openai", "openai-large", "searchgpt"]
extra_text_models = vision_models extra_text_models = vision_models
_models_loaded = False _models_loaded = False
model_aliases = { model_aliases = {
@ -68,7 +68,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"gemini-2.0": "gemini", "gemini-2.0": "gemini",
"gemini-2.0-flash": "gemini", "gemini-2.0-flash": "gemini",
"gemini-2.0-flash-thinking": "gemini-thinking", "gemini-2.0-flash-thinking": "gemini-thinking",
"deepseek-r1": "deepseek-r1-llama", "deepseek-r1": "deepseek-reasoning-large",
"gpt-4o-audio": "openai-audio", "gpt-4o-audio": "openai-audio",
### Image Models ### ### Image Models ###
@ -101,7 +101,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
original_text_models = [ original_text_models = [
model.get("name") model.get("name")
for model in models for model in models
if model.get("type") == "chat" if "text"in model.get("output_modalities")
] ]
cls.audio_models = { cls.audio_models = {
model.get("name"): model.get("voices") model.get("name"): model.get("voices")

View file

@ -45,7 +45,7 @@ class PollinationsImage(PollinationsAI):
private: bool = False, private: bool = False,
enhance: bool = False, enhance: bool = False,
safe: bool = False, safe: bool = False,
n: int = 4, n: int = 1,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
# Calling model updates before creating a generator # Calling model updates before creating a generator

View file

@ -421,14 +421,8 @@ class Api:
config.api_key = credentials.credentials config.api_key = credentials.credentials
try: try:
response = await self.client.images.generate( response = await self.client.images.generate(
prompt=config.prompt, **config.dict(exclude_none=True),
model=config.model, provider=AppConfig.image_provider if config.provider is None else config.provider
provider=AppConfig.image_provider if config.provider is None else config.provider,
**filter_none(
response_format=config.response_format,
api_key=config.api_key,
proxy=config.proxy
)
) )
for image in response.data: for image in response.data:
if hasattr(image, "url") and image.url.startswith("/"): if hasattr(image, "url") and image.url.startswith("/"):

View file

@ -541,7 +541,7 @@ class Images:
images = await asyncio.gather(*[get_b64_from_url(image) for image in response.get_list()]) images = await asyncio.gather(*[get_b64_from_url(image) for image in response.get_list()])
else: else:
# Save locally for None (default) case # Save locally for None (default) case
images = await copy_media(response.get_list(), response.get("cookies"), proxy) images = await copy_media(response.get_list(), response.get("cookies"), response.get("headers"), proxy, response.alt)
images = [Image.model_construct(url=image, revised_prompt=response.alt) for image in images] images = [Image.model_construct(url=image, revised_prompt=response.alt) for image in images]
return ImagesResponse.model_construct( return ImagesResponse.model_construct(