mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
feat: Refactor PollinationsAI and ARTA provider structure
- Updated `PollinationsAI.py` to strip trailing periods and newlines from the prompt before encoding. - Modified the encoding of the prompt to remove trailing percent signs after URL encoding. - Simplified the audio response handling in `PollinationsAI.py` by removing unnecessary checks and yielding chunks directly. - Renamed `ARTA.py` to `deprecated/ARTA.py` and updated import paths accordingly in `__init__.py`. - Changed the `working` status of the `ARTA` class to `False` to indicate it is deprecated. - Enhanced the `Video` class in `Video.py` to include aspect ratio handling and improved URL response caching. - Updated the `RequestConfig` class to use a dictionary for storing URLs associated with prompts. - Removed references to the `ARTA` provider in various files, including `models.py` and `any_provider.py`. - Adjusted the `best_provider` assignments in `models.py` to exclude `ARTA` and include `HuggingFaceMedia` where applicable. - Updated the response handling in `Video.py` to yield cached responses when available.
This commit is contained in:
parent
faf94ccfbb
commit
d824d77d65
11 changed files with 106 additions and 87 deletions
|
|
@ -392,10 +392,10 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
**params
|
||||
}, "1:1" if aspect_ratio is None else aspect_ratio)
|
||||
query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None)
|
||||
encoded_prompt = prompt
|
||||
encoded_prompt = prompt.strip(". \n")
|
||||
if model == "gptimage" and aspect_ratio is not None:
|
||||
encoded_prompt = f"{encoded_prompt} aspect-ratio: {aspect_ratio}"
|
||||
encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8]
|
||||
encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8].rstrip("%")
|
||||
url = f"{cls.image_api_endpoint}prompt/{encoded_prompt}?{query}"
|
||||
def get_url_with_seed(i: int, seed: Optional[int] = None):
|
||||
if model == "gptimage":
|
||||
|
|
@ -583,15 +583,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||
audio = message.get("audio", {})
|
||||
if "data" in audio:
|
||||
async for chunk in save_response_media(audio["data"], prompt, [model, extra_body.get("audio", {}).get("voice")]):
|
||||
if isinstance(chunk, AudioResponse) and not download_media and voice and len(messages) == 1:
|
||||
prompt = messages[0].get("content")
|
||||
if isinstance(prompt, str):
|
||||
url = f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}&voice={quote(voice)}&seed={quote(str(seed))}"
|
||||
yield AudioResponse(url)
|
||||
else:
|
||||
yield chunk
|
||||
else:
|
||||
yield chunk
|
||||
yield chunk
|
||||
if "transcript" in audio:
|
||||
yield "\n\n"
|
||||
yield audio["transcript"]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue