Set return_conversation to True

This commit is contained in:
hlohaus 2025-04-26 23:10:06 +02:00
parent e238ca3a58
commit e5a49cdb6d
18 changed files with 73 additions and 16 deletions

View file

@ -77,7 +77,7 @@ class AllenAI(AsyncGeneratorProvider, ProviderModelMixin):
top_p: float = None,
temperature: float = None,
conversation: Conversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
media: MediaListType = None,
**kwargs
) -> AsyncResult:

View file

@ -548,7 +548,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
temperature: float = None,
max_tokens: int = None,
conversation: Conversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

View file

@ -68,7 +68,7 @@ class Copilot(AsyncGeneratorProvider, ProviderModelMixin):
prompt: str = None,
media: MediaListType = None,
conversation: BaseConversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
api_key: str = None,
**kwargs
) -> AsyncResult:

View file

@ -321,7 +321,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
timeout: int = 60,
cookies: Cookies = None,
conversation: Conversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
**kwargs
) -> AsyncResult:
model = cls.validate_model(model)

View file

@ -270,7 +270,7 @@ class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin)
cls, model: str, messages: Messages,
media: MediaListType = None,
conversation: JsonConversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
max_tokens: int = 2048,
temperature: float = 0.7,
top_p: float = 1,

View file

@ -36,7 +36,7 @@ class Yqcloud(AsyncGeneratorProvider, ProviderModelMixin):
stream: bool = True,
proxy: str = None,
conversation: Conversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

View file

@ -102,7 +102,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
auth_result: AuthResult,
prompt: str = None,
media: MediaListType = None,
return_conversation: bool = False,
return_conversation: bool = True,
conversation: Conversation = None,
web_search: bool = False,
**kwargs

View file

@ -38,7 +38,7 @@ class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
api_key: str = None,
proxy: str = None,
conversation: JsonConversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

View file

@ -76,7 +76,7 @@ class DeepseekAI_JanusPro7b(AsyncGeneratorProvider, ProviderModelMixin):
cookies: Cookies = None,
api_key: str = None,
zerogpu_uuid: str = "[object Object]",
return_conversation: bool = False,
return_conversation: bool = True,
conversation: JsonConversation = None,
seed: int = None,
**kwargs

View file

@ -99,7 +99,7 @@ class Microsoft_Phi_4(AsyncGeneratorProvider, ProviderModelMixin):
cookies: Cookies = None,
api_key: str = None,
zerogpu_uuid: str = "[object Object]",
return_conversation: bool = False,
return_conversation: bool = True,
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:

View file

@ -31,7 +31,7 @@ class Qwen_Qwen_2_5M(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
return_conversation: bool = False,
return_conversation: bool = True,
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:

View file

@ -48,7 +48,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin):
model: str,
messages: Messages,
auth_result: AuthResult,
return_conversation: bool = False,
return_conversation: bool = True,
conversation: Conversation = None,
**kwargs
) -> AsyncResult:

View file

@ -152,7 +152,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
cookies: Cookies = None,
connector: BaseConnector = None,
media: MediaListType = None,
return_conversation: bool = False,
return_conversation: bool = True,
conversation: Conversation = None,
language: str = "en",
prompt: str = None,

View file

@ -38,7 +38,7 @@ class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
cookies: Cookies = None,
conversation_id: str = None,
conversation: Conversation = None,
return_conversation: bool = False,
return_conversation: bool = True,
**kwargs
) -> AsyncResult:
if not model:

View file

@ -0,0 +1,53 @@
from __future__ import annotations
import asyncio
import json
from ...typing import AsyncResult, Messages, Cookies
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin, get_running_loop
from ...requests import Browser, get_nodriver, has_nodriver
from ...errors import MissingRequirementsError, ModelNotFoundError
from ... import debug
from ..helper import get_last_user_message
class GoogleSearch(AsyncGeneratorProvider, AuthFileMixin):
label = "Google Search"
url = "https://google.com"
working = has_nodriver
use_nodriver = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
browser: Browser = None,
proxy: str = None,
timeout: int = 300,
**kwargs
) -> AsyncResult:
if not has_nodriver:
raise MissingRequirementsError("Google requires a browser to be installed.")
if not cls.working:
raise ModelNotFoundError(f"Model {model} not found.")
try:
stop_browser = None
if browser is None:
browser, stop_browser = await get_nodriver(proxy=proxy, timeout=timeout)
tab = await browser.get(cls.url)
await asyncio.sleep(3)
while True:
try:
await tab.wait_for('[aria-modal="true"]', timeout=10)
await tab.wait_for('[aria-modal="true"][style*="display: none"]', timeout=timeout)
except Exception as e:
break
break
element = await tab.wait_for('textarea')
await element.send_keys(get_last_user_message(messages))
button = await tab.find("Google Suche")
await button.click()
await asyncio.sleep(1000)
finally:
if stop_browser is not None:
stop_browser()

View file

@ -304,7 +304,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
action: str = "next",
conversation: Conversation = None,
media: MediaListType = None,
return_conversation: bool = False,
return_conversation: bool = True,
web_search: bool = False,
prompt: str = None,
**kwargs

View file

@ -27,7 +27,7 @@ class ChatCompletionsConfig(BaseModel):
proxy: Optional[str] = None
conversation_id: Optional[str] = None
conversation: Optional[dict] = None
return_conversation: Optional[bool] = None
return_conversation: bool = True
history_disabled: Optional[bool] = None
timeout: Optional[int] = None
tool_calls: list = Field(default=[], examples=[[

View file

@ -308,6 +308,8 @@ class Completions:
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
resolve_media(kwargs, image, image_name)
if hasattr(model, "name"):
model = model.name
if provider is None:
provider = self.provider
if provider is None:
@ -603,6 +605,8 @@ class AsyncCompletions:
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
resolve_media(kwargs, image, image_name)
if hasattr(model, "name"):
model = model.name
if provider is None:
provider = self.provider
if provider is None: