mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-06 02:30:41 -08:00
Fix build docker image
This commit is contained in:
parent
082c152248
commit
c0e323b543
6 changed files with 285 additions and 43 deletions
|
|
@ -33,7 +33,7 @@ COPY docker/supervisor-api.conf /etc/supervisor/conf.d/api.conf
|
||||||
COPY docker/background.png /usr/share/images/fluxbox/ubuntu-light.png
|
COPY docker/background.png /usr/share/images/fluxbox/ubuntu-light.png
|
||||||
|
|
||||||
# Add user, fix permissions
|
# Add user, fix permissions
|
||||||
RUN chown "${SEL_UID}:${SEL_GID}" $HOME/.local
|
RUN chown "${SEL_UID}:${SEL_GID}" $HOME/.local /opt/venv/share
|
||||||
|
|
||||||
# Switch user
|
# Switch user
|
||||||
USER $SEL_UID
|
USER $SEL_UID
|
||||||
|
|
|
||||||
|
|
@ -3,14 +3,16 @@ from __future__ import annotations
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import nodriver
|
||||||
|
import asyncio
|
||||||
from typing import Dict, Any, AsyncIterator
|
from typing import Dict, Any, AsyncIterator
|
||||||
|
|
||||||
from ...typing import Messages, Cookies, AsyncResult
|
from ...typing import Messages, AsyncResult
|
||||||
from ...providers.response import JsonConversation, Reasoning, ImagePreview, ImageResponse, TitleGeneration, AuthResult, RequestLogin
|
from ...providers.response import JsonConversation, Reasoning, ImagePreview, ImageResponse, TitleGeneration, AuthResult, RequestLogin
|
||||||
from ...requests import StreamSession, get_args_from_nodriver, DEFAULT_HEADERS
|
from ...requests import StreamSession, get_nodriver, DEFAULT_HEADERS
|
||||||
from ...requests.raise_for_status import raise_for_status
|
from ...requests.raise_for_status import raise_for_status
|
||||||
from ..base_provider import AsyncAuthedProvider, ProviderModelMixin
|
from ..base_provider import AsyncAuthedProvider, ProviderModelMixin
|
||||||
from ..helper import format_prompt, get_cookies, get_last_user_message
|
from ..helper import format_prompt, get_last_user_message
|
||||||
|
|
||||||
class Conversation(JsonConversation):
|
class Conversation(JsonConversation):
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
|
|
@ -33,30 +35,40 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
model_aliases = {"grok-3-r1": "grok-3-thinking"}
|
model_aliases = {"grok-3-r1": "grok-3-thinking"}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def on_auth_async(cls, cookies: Cookies = None, proxy: str = None, **kwargs) -> AsyncIterator:
|
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
|
||||||
if cookies is None:
|
auth_result = AuthResult(headers=DEFAULT_HEADERS, impersonate="chrome")
|
||||||
cookies = get_cookies(cls.cookie_domain, False, True, False)
|
auth_result.headers["referer"] = cls.url + "/"
|
||||||
if cookies is not None and "sso" in cookies:
|
browser, stop_browser = await get_nodriver(proxy=proxy)
|
||||||
yield AuthResult(
|
|
||||||
cookies=cookies,
|
|
||||||
impersonate="chrome",
|
|
||||||
proxy=proxy,
|
|
||||||
headers=DEFAULT_HEADERS
|
|
||||||
)
|
|
||||||
return
|
|
||||||
yield RequestLogin(cls.__name__, os.environ.get("G4F_LOGIN_URL") or "")
|
yield RequestLogin(cls.__name__, os.environ.get("G4F_LOGIN_URL") or "")
|
||||||
yield AuthResult(
|
try:
|
||||||
**await get_args_from_nodriver(
|
page = browser.main_tab
|
||||||
cls.url,
|
has_headers = False
|
||||||
proxy=proxy,
|
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
|
||||||
wait_for='[href="/chat#private"]'
|
nonlocal has_headers
|
||||||
)
|
if event.request.url.startswith(cls.conversation_url + "/new"):
|
||||||
)
|
for key, value in event.request.headers.items():
|
||||||
|
auth_result.headers[key.lower()] = value
|
||||||
|
has_headers = True
|
||||||
|
await page.send(nodriver.cdp.network.enable())
|
||||||
|
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
||||||
|
page = await browser.get(cls.url)
|
||||||
|
auth_result.headers["user-agent"] = await page.evaluate("window.navigator.userAgent", return_by_value=True)
|
||||||
|
while True:
|
||||||
|
if has_headers:
|
||||||
|
break
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
auth_result.cookies = {}
|
||||||
|
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
|
||||||
|
auth_result.cookies[c.name] = c.value
|
||||||
|
await page.close()
|
||||||
|
finally:
|
||||||
|
stop_browser()
|
||||||
|
yield auth_result
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def _prepare_payload(cls, model: str, message: str) -> Dict[str, Any]:
|
async def _prepare_payload(cls, model: str, message: str) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"temporary": False,
|
"temporary": True,
|
||||||
"modelName": "grok-latest" if model == "grok-2" else "grok-3",
|
"modelName": "grok-latest" if model == "grok-2" else "grok-3",
|
||||||
"message": message,
|
"message": message,
|
||||||
"fileAttachments": [],
|
"fileAttachments": [],
|
||||||
|
|
@ -83,7 +95,6 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
auth_result: AuthResult,
|
auth_result: AuthResult,
|
||||||
return_conversation: bool = True,
|
|
||||||
conversation: Conversation = None,
|
conversation: Conversation = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
|
|
@ -99,7 +110,6 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
url = f"{cls.conversation_url}/{conversation_id}/responses"
|
url = f"{cls.conversation_url}/{conversation_id}/responses"
|
||||||
async with session.post(url, json=payload) as response:
|
async with session.post(url, json=payload) as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
|
|
||||||
thinking_duration = None
|
thinking_duration = None
|
||||||
async for line in response.iter_lines():
|
async for line in response.iter_lines():
|
||||||
if line:
|
if line:
|
||||||
|
|
@ -133,9 +143,7 @@ class Grok(AsyncAuthedProvider, ProviderModelMixin):
|
||||||
title = result.get("title", {}).get("newTitle", "")
|
title = result.get("title", {}).get("newTitle", "")
|
||||||
if title:
|
if title:
|
||||||
yield TitleGeneration(title)
|
yield TitleGeneration(title)
|
||||||
|
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
if return_conversation and conversation_id is not None:
|
# if conversation_id is not None:
|
||||||
yield Conversation(conversation_id)
|
# yield Conversation(conversation_id)
|
||||||
|
|
@ -41,7 +41,7 @@ except NameError:
|
||||||
def add_chunk(content, chunk):
|
def add_chunk(content, chunk):
|
||||||
if content == "" and isinstance(chunk, (MediaResponse, AudioResponse)):
|
if content == "" and isinstance(chunk, (MediaResponse, AudioResponse)):
|
||||||
content = chunk
|
content = chunk
|
||||||
else:
|
elif not isinstance(chunk, Reasoning):
|
||||||
content = str(content) + str(chunk)
|
content = str(content) + str(chunk)
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
@ -65,6 +65,7 @@ def iter_response(
|
||||||
stop: Optional[list[str]] = None
|
stop: Optional[list[str]] = None
|
||||||
) -> ChatCompletionResponseType:
|
) -> ChatCompletionResponseType:
|
||||||
content = ""
|
content = ""
|
||||||
|
reasoning_content = []
|
||||||
finish_reason = None
|
finish_reason = None
|
||||||
tool_calls = None
|
tool_calls = None
|
||||||
usage = None
|
usage = None
|
||||||
|
|
@ -92,9 +93,8 @@ def iter_response(
|
||||||
elif isinstance(chunk, ProviderInfo):
|
elif isinstance(chunk, ProviderInfo):
|
||||||
provider = chunk
|
provider = chunk
|
||||||
continue
|
continue
|
||||||
elif isinstance(chunk, BaseConversation):
|
elif isinstance(chunk, Reasoning):
|
||||||
yield chunk
|
reasoning_content.append(chunk)
|
||||||
continue
|
|
||||||
elif isinstance(chunk, HiddenResponse):
|
elif isinstance(chunk, HiddenResponse):
|
||||||
continue
|
continue
|
||||||
elif isinstance(chunk, Exception):
|
elif isinstance(chunk, Exception):
|
||||||
|
|
@ -141,7 +141,8 @@ def iter_response(
|
||||||
chat_completion = ChatCompletion.model_construct(
|
chat_completion = ChatCompletion.model_construct(
|
||||||
content, finish_reason, completion_id, int(time.time()), usage=usage,
|
content, finish_reason, completion_id, int(time.time()), usage=usage,
|
||||||
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {},
|
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {},
|
||||||
conversation=None if conversation is None else conversation.get_dict()
|
conversation=None if conversation is None else conversation.get_dict(),
|
||||||
|
reasoning_content=reasoning_content if reasoning_content else None
|
||||||
)
|
)
|
||||||
if provider is not None:
|
if provider is not None:
|
||||||
chat_completion.provider = provider.name
|
chat_completion.provider = provider.name
|
||||||
|
|
@ -168,6 +169,7 @@ async def async_iter_response(
|
||||||
stop: Optional[list[str]] = None
|
stop: Optional[list[str]] = None
|
||||||
) -> AsyncChatCompletionResponseType:
|
) -> AsyncChatCompletionResponseType:
|
||||||
content = ""
|
content = ""
|
||||||
|
reasoning_content = []
|
||||||
finish_reason = None
|
finish_reason = None
|
||||||
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
||||||
idx = 0
|
idx = 0
|
||||||
|
|
@ -193,6 +195,8 @@ async def async_iter_response(
|
||||||
elif isinstance(chunk, ProviderInfo):
|
elif isinstance(chunk, ProviderInfo):
|
||||||
provider = chunk
|
provider = chunk
|
||||||
continue
|
continue
|
||||||
|
elif isinstance(chunk, Reasoning) and not stream:
|
||||||
|
reasoning_content.append(chunk)
|
||||||
elif isinstance(chunk, HiddenResponse):
|
elif isinstance(chunk, HiddenResponse):
|
||||||
continue
|
continue
|
||||||
elif isinstance(chunk, Exception):
|
elif isinstance(chunk, Exception):
|
||||||
|
|
@ -241,7 +245,8 @@ async def async_iter_response(
|
||||||
**filter_none(
|
**filter_none(
|
||||||
tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]
|
tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]
|
||||||
) if tool_calls is not None else {},
|
) if tool_calls is not None else {},
|
||||||
conversation=conversation
|
conversation=conversation,
|
||||||
|
reasoning_content=reasoning_content if reasoning_content else None
|
||||||
)
|
)
|
||||||
if provider is not None:
|
if provider is not None:
|
||||||
chat_completion.provider = provider.name
|
chat_completion.provider = provider.name
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ from time import time
|
||||||
from ..image import extract_data_uri
|
from ..image import extract_data_uri
|
||||||
from ..image.copy_images import get_media_dir
|
from ..image.copy_images import get_media_dir
|
||||||
from ..client.helper import filter_markdown
|
from ..client.helper import filter_markdown
|
||||||
|
from ..providers.response import Reasoning
|
||||||
from .helper import filter_none
|
from .helper import filter_none
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
@ -132,16 +133,21 @@ class ResponseMessageContent(BaseModel):
|
||||||
class ChatCompletionMessage(BaseModel):
|
class ChatCompletionMessage(BaseModel):
|
||||||
role: str
|
role: str
|
||||||
content: str
|
content: str
|
||||||
|
reasoning_content: list[Reasoning] = None
|
||||||
tool_calls: list[ToolCallModel] = None
|
tool_calls: list[ToolCallModel] = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def model_construct(cls, content: str, tool_calls: list = None):
|
def model_construct(cls, content: str, reasoning_content: list[Reasoning] = None, tool_calls: list = None):
|
||||||
return super().model_construct(role="assistant", content=content, **filter_none(tool_calls=tool_calls))
|
return super().model_construct(role="assistant", content=content, **filter_none(tool_calls=tool_calls, reasoning_content=reasoning_content))
|
||||||
|
|
||||||
@field_serializer('content')
|
@field_serializer('content')
|
||||||
def serialize_content(self, content: str):
|
def serialize_content(self, content: str):
|
||||||
return str(content)
|
return str(content)
|
||||||
|
|
||||||
|
@field_serializer('reasoning_content')
|
||||||
|
def serialize_reasoning_content(self, reasoning_content: list):
|
||||||
|
return "".join([str(content) for content in reasoning_content]) if reasoning_content else None
|
||||||
|
|
||||||
def save(self, filepath: str, allowed_types = None):
|
def save(self, filepath: str, allowed_types = None):
|
||||||
if hasattr(self.content, "data"):
|
if hasattr(self.content, "data"):
|
||||||
os.rename(self.content.data.replace("/media", get_media_dir()), filepath)
|
os.rename(self.content.data.replace("/media", get_media_dir()), filepath)
|
||||||
|
|
@ -184,7 +190,8 @@ class ChatCompletion(BaseModel):
|
||||||
created: int = None,
|
created: int = None,
|
||||||
tool_calls: list[ToolCallModel] = None,
|
tool_calls: list[ToolCallModel] = None,
|
||||||
usage: UsageModel = None,
|
usage: UsageModel = None,
|
||||||
conversation: dict = None
|
conversation: dict = None,
|
||||||
|
reasoning_content: list[Reasoning] = None
|
||||||
):
|
):
|
||||||
return super().model_construct(
|
return super().model_construct(
|
||||||
id=f"chatcmpl-{completion_id}" if completion_id else None,
|
id=f"chatcmpl-{completion_id}" if completion_id else None,
|
||||||
|
|
@ -193,7 +200,7 @@ class ChatCompletion(BaseModel):
|
||||||
model=None,
|
model=None,
|
||||||
provider=None,
|
provider=None,
|
||||||
choices=[ChatCompletionChoice.model_construct(
|
choices=[ChatCompletionChoice.model_construct(
|
||||||
ChatCompletionMessage.model_construct(content, tool_calls),
|
ChatCompletionMessage.model_construct(content, reasoning_content, tool_calls),
|
||||||
finish_reason,
|
finish_reason,
|
||||||
)],
|
)],
|
||||||
**filter_none(usage=usage, conversation=conversation)
|
**filter_none(usage=usage, conversation=conversation)
|
||||||
|
|
@ -245,16 +252,20 @@ class ClientResponse(BaseModel):
|
||||||
class ChatCompletionDelta(BaseModel):
|
class ChatCompletionDelta(BaseModel):
|
||||||
role: str
|
role: str
|
||||||
content: Optional[str]
|
content: Optional[str]
|
||||||
|
reasoning_content: Optional[str] = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def model_construct(cls, content: Optional[str]):
|
def model_construct(cls, content: Optional[str]):
|
||||||
|
if isinstance(content, Reasoning):
|
||||||
|
return super().model_construct(role="reasoning", content=content, reasoning_content=str(content))
|
||||||
return super().model_construct(role="assistant", content=content)
|
return super().model_construct(role="assistant", content=content)
|
||||||
|
|
||||||
@field_serializer('content')
|
@field_serializer('content')
|
||||||
def serialize_content(self, content: Optional[str]):
|
def serialize_content(self, content: Optional[str]):
|
||||||
if content is None:
|
if content is None:
|
||||||
return ""
|
return ""
|
||||||
|
if isinstance(content, Reasoning):
|
||||||
|
return None
|
||||||
return str(content)
|
return str(content)
|
||||||
|
|
||||||
class ChatCompletionDeltaChoice(BaseModel):
|
class ChatCompletionDeltaChoice(BaseModel):
|
||||||
|
|
|
||||||
|
|
@ -93,7 +93,10 @@ def secure_filename(filename: str) -> str:
|
||||||
unquote(filename).strip(),
|
unquote(filename).strip(),
|
||||||
flags=re.UNICODE
|
flags=re.UNICODE
|
||||||
)
|
)
|
||||||
filename = filename[:100].strip(".,_-+")
|
max_length = 100
|
||||||
|
if len(filename.encode()) > len(filename):
|
||||||
|
max_length = 50
|
||||||
|
filename = filename[:max_length].strip(".,_+-")
|
||||||
return filename
|
return filename
|
||||||
|
|
||||||
def supports_filename(filename: str):
|
def supports_filename(filename: str):
|
||||||
|
|
|
||||||
215
new.py
Normal file
215
new.py
Normal file
|
|
@ -0,0 +1,215 @@
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
import g4f.debug
|
||||||
|
import requests
|
||||||
|
from g4f.client import Client
|
||||||
|
|
||||||
|
def upload_and_process(files_or_urls, bucket_id=None):
|
||||||
|
if bucket_id is None:
|
||||||
|
bucket_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
if isinstance(files_or_urls, list): # URLs
|
||||||
|
files = {'files': ('downloads.json', json.dumps(
|
||||||
|
files_or_urls), 'application/json')}
|
||||||
|
elif isinstance(files_or_urls, dict): # Files
|
||||||
|
files = files_or_urls
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"files_or_urls must be a list of URLs or a dictionary of files")
|
||||||
|
|
||||||
|
upload_response = requests.post(
|
||||||
|
f'http://localhost:8080/backend-api/v2/files/{bucket_id}', files=files)
|
||||||
|
|
||||||
|
if upload_response.status_code == 200:
|
||||||
|
upload_data = upload_response.json()
|
||||||
|
print(f"Upload successful. Bucket ID: {upload_data['bucket_id']}")
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Upload failed: {upload_response.status_code} - {upload_response.text}")
|
||||||
|
|
||||||
|
response = requests.get(
|
||||||
|
f'http://localhost:8080/backend-api/v2/files/{bucket_id}', stream=True, headers={'Accept': 'text/event-stream'})
|
||||||
|
for line in response.iter_lines():
|
||||||
|
if line:
|
||||||
|
line = line.decode('utf-8')
|
||||||
|
if line.startswith('data:'):
|
||||||
|
try:
|
||||||
|
data = json.loads(line[5:]) # remove data: prefix
|
||||||
|
if "action" in data:
|
||||||
|
print(f"SSE Event: {data}")
|
||||||
|
elif "error" in data:
|
||||||
|
print(f"Error: {data['error']['message']}")
|
||||||
|
else:
|
||||||
|
# Assuming it's file content
|
||||||
|
print(f"File data received: {data}")
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
print(f"Error decoding JSON: {e}")
|
||||||
|
else:
|
||||||
|
print(f"Unhandled SSE event: {line}")
|
||||||
|
response.close()
|
||||||
|
return bucket_id
|
||||||
|
|
||||||
|
|
||||||
|
# Example with URLs
|
||||||
|
|
||||||
|
# Enable debug mode
|
||||||
|
g4f.debug.logging = True
|
||||||
|
|
||||||
|
client = Client()
|
||||||
|
|
||||||
|
# Upload example file
|
||||||
|
files = {'files': ('demo.docx', open('demo.docx', 'rb'))}
|
||||||
|
bucket_id = upload_and_process(files)
|
||||||
|
|
||||||
|
# Send request with file:
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model='gpt-4o',
|
||||||
|
messages=[{"role": "user", "content": [
|
||||||
|
{"type": "text", "text": "Discribe this file."},
|
||||||
|
{"bucket_id": bucket_id}
|
||||||
|
]}],
|
||||||
|
)
|
||||||
|
print(response.choices[0].message.content)
|
||||||
|
exit()
|
||||||
|
# import asyncio
|
||||||
|
# from g4f.client import AsyncClient
|
||||||
|
# import g4f.Provider
|
||||||
|
|
||||||
|
# async def main():
|
||||||
|
# client = AsyncClient(provider=g4f.Provider.MarkItDown)
|
||||||
|
|
||||||
|
# # Transcribe a audio file
|
||||||
|
# with open("audio.wav", "rb") as audio_file:
|
||||||
|
# response = await client.chat.completions.create("", media=[audio_file])
|
||||||
|
# print(response.choices[0].message.content)
|
||||||
|
|
||||||
|
# if __name__ == "__main__":
|
||||||
|
# asyncio.run(main())
|
||||||
|
|
||||||
|
#exit()
|
||||||
|
import requests
|
||||||
|
|
||||||
|
# Open the audio file in binary mode
|
||||||
|
with open('demo.docx', 'rb') as audio_file:
|
||||||
|
# Make the POST request
|
||||||
|
response = requests.post('http://localhost:8080/api/markitdown', files={'file': audio_file})
|
||||||
|
|
||||||
|
# Check the response and print the transcription
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
print(data['text'])
|
||||||
|
else:
|
||||||
|
print(f"Error: {response.status_code}, {response.text}")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
# from openai import OpenAI
|
||||||
|
# client = OpenAI(base_url="http://localhost:8080/v1", api_key="secret")
|
||||||
|
|
||||||
|
# with open("audio.wav", "rb") as file:
|
||||||
|
# transcript = client.audio.transcriptions.create(
|
||||||
|
# model="",
|
||||||
|
# extra_body={"provider": "MarkItDown"},
|
||||||
|
# file=file
|
||||||
|
# )
|
||||||
|
# print(transcript.text)
|
||||||
|
|
||||||
|
exit()
|
||||||
|
import asyncio
|
||||||
|
import time
|
||||||
|
from g4f import AsyncClient
|
||||||
|
from g4f.Provider import PollinationsAI
|
||||||
|
async def test():
|
||||||
|
client = AsyncClient()
|
||||||
|
response = client.chat.completions.create("guten tag", stream=True, provider=HarProvider)
|
||||||
|
async for chunk in response:
|
||||||
|
if chunk.choices[0].finish_reason == "stop":
|
||||||
|
break
|
||||||
|
print(chunk.choices[0].delta.content, end="", flush=True)
|
||||||
|
print()
|
||||||
|
|
||||||
|
asyncio.run(test())
|
||||||
|
time.sleep(1)
|
||||||
|
exit()
|
||||||
|
|
||||||
|
client = Client(provider=PollinationsAI)
|
||||||
|
response = client.media.generate("Hello", model="hypnosis-tracy")
|
||||||
|
response.data[0].save("hypnosis.mp3")
|
||||||
|
|
||||||
|
client = Client(provider=Gemini)
|
||||||
|
response = client.media.generate("Hello", model="gemini-audio")
|
||||||
|
response.data[0].save("gemini.ogx")
|
||||||
|
|
||||||
|
client = Client(provider=EdgeTTS)
|
||||||
|
response = client.media.generate("Hello", audio={"locale": "en-US"})
|
||||||
|
response.data[0].save("edge-tts.mp3")
|
||||||
|
|
||||||
|
exit()
|
||||||
|
import requests
|
||||||
|
import uuid
|
||||||
|
import json
|
||||||
|
|
||||||
|
def upload_and_process(files_or_urls, bucket_id=None):
|
||||||
|
if bucket_id is None:
|
||||||
|
bucket_id = str(uuid.uuid4())
|
||||||
|
|
||||||
|
if isinstance(files_or_urls, list): #URLs
|
||||||
|
files = {'files': ('downloads.json', json.dumps(files_or_urls), 'application/json')}
|
||||||
|
elif isinstance(files_or_urls, dict): #Files
|
||||||
|
files = files_or_urls
|
||||||
|
else:
|
||||||
|
raise ValueError("files_or_urls must be a list of URLs or a dictionary of files")
|
||||||
|
|
||||||
|
upload_response = requests.post(f'http://localhost:8080/v1/files/{bucket_id}', files=files)
|
||||||
|
|
||||||
|
if upload_response.status_code == 200:
|
||||||
|
upload_data = upload_response.json()
|
||||||
|
print(f"Upload successful. Bucket ID: {upload_data['bucket_id']}")
|
||||||
|
else:
|
||||||
|
print(f"Upload failed: {upload_response.status_code} - {upload_response.text}")
|
||||||
|
|
||||||
|
response = requests.get(f'http://localhost:8080/v1/files/{bucket_id}', stream=True, headers={'Accept': 'text/event-stream'})
|
||||||
|
for line in response.iter_lines():
|
||||||
|
if line:
|
||||||
|
line = line.decode('utf-8')
|
||||||
|
if line.startswith('data:'):
|
||||||
|
try:
|
||||||
|
data = json.loads(line[5:]) #remove data: prefix
|
||||||
|
if "action" in data:
|
||||||
|
print(f"SSE Event: {data}")
|
||||||
|
elif "error" in data:
|
||||||
|
print(f"Error: {data['error']['message']}")
|
||||||
|
else:
|
||||||
|
print(f"File data received: {data}") #Assuming it's file content
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
print(f"Error decoding JSON: {e}")
|
||||||
|
else:
|
||||||
|
print(f"Unhandled SSE event: {line}")
|
||||||
|
response.close()
|
||||||
|
return bucket_id
|
||||||
|
|
||||||
|
# Example with URLs
|
||||||
|
#Example with files
|
||||||
|
#files = {'files': open('document.pdf', 'rb'), 'files': open('data.json', 'rb')}
|
||||||
|
#bucket_id = upload_and_process(files)
|
||||||
|
import asyncio
|
||||||
|
from g4f.client import Client
|
||||||
|
|
||||||
|
import g4f.debug
|
||||||
|
g4f.debug.logging = True
|
||||||
|
|
||||||
|
client = Client()
|
||||||
|
|
||||||
|
|
||||||
|
files = {'files': ('demo.docx', open('demo.docx', 'rb'))}
|
||||||
|
bucket_id = upload_and_process(files)
|
||||||
|
|
||||||
|
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
[{"role": "user", "content": [
|
||||||
|
{"type": "text", "text": "Discribe this file."},
|
||||||
|
{"bucket_id": bucket_id}
|
||||||
|
]}],
|
||||||
|
"o1",
|
||||||
|
)
|
||||||
|
print(response.choices[0].message.content)
|
||||||
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue