mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Fix "n" parameter in API, update models.py
This commit is contained in:
parent
4ae536f1cc
commit
2234d926b8
4 changed files with 7 additions and 13 deletions
|
|
@ -49,7 +49,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
text_models = [default_model]
|
||||
image_models = [default_image_model]
|
||||
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
|
||||
vision_models = [default_vision_model, "gpt-4o-mini", "o3-mini", "openai", "openai-large"]
|
||||
vision_models = [default_vision_model, "gpt-4o-mini", "o3-mini", "openai", "openai-large", "searchgpt"]
|
||||
extra_text_models = vision_models
|
||||
_models_loaded = False
|
||||
model_aliases = {
|
||||
|
|
@ -68,7 +68,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
"gemini-2.0": "gemini",
|
||||
"gemini-2.0-flash": "gemini",
|
||||
"gemini-2.0-flash-thinking": "gemini-thinking",
|
||||
"deepseek-r1": "deepseek-r1-llama",
|
||||
"deepseek-r1": "deepseek-reasoning-large",
|
||||
"gpt-4o-audio": "openai-audio",
|
||||
|
||||
### Image Models ###
|
||||
|
|
@ -101,7 +101,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
original_text_models = [
|
||||
model.get("name")
|
||||
for model in models
|
||||
if model.get("type") == "chat"
|
||||
if "text"in model.get("output_modalities")
|
||||
]
|
||||
cls.audio_models = {
|
||||
model.get("name"): model.get("voices")
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ class PollinationsImage(PollinationsAI):
|
|||
private: bool = False,
|
||||
enhance: bool = False,
|
||||
safe: bool = False,
|
||||
n: int = 4,
|
||||
n: int = 1,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
# Calling model updates before creating a generator
|
||||
|
|
|
|||
|
|
@ -421,14 +421,8 @@ class Api:
|
|||
config.api_key = credentials.credentials
|
||||
try:
|
||||
response = await self.client.images.generate(
|
||||
prompt=config.prompt,
|
||||
model=config.model,
|
||||
provider=AppConfig.image_provider if config.provider is None else config.provider,
|
||||
**filter_none(
|
||||
response_format=config.response_format,
|
||||
api_key=config.api_key,
|
||||
proxy=config.proxy
|
||||
)
|
||||
**config.dict(exclude_none=True),
|
||||
provider=AppConfig.image_provider if config.provider is None else config.provider
|
||||
)
|
||||
for image in response.data:
|
||||
if hasattr(image, "url") and image.url.startswith("/"):
|
||||
|
|
|
|||
|
|
@ -541,7 +541,7 @@ class Images:
|
|||
images = await asyncio.gather(*[get_b64_from_url(image) for image in response.get_list()])
|
||||
else:
|
||||
# Save locally for None (default) case
|
||||
images = await copy_media(response.get_list(), response.get("cookies"), proxy)
|
||||
images = await copy_media(response.get_list(), response.get("cookies"), response.get("headers"), proxy, response.alt)
|
||||
images = [Image.model_construct(url=image, revised_prompt=response.alt) for image in images]
|
||||
|
||||
return ImagesResponse.model_construct(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue