refactor(docs): Update AsyncClient API documentation to reflect changes in API usage and add asyncio examples

This commit is contained in:
kqlio67 2024-10-15 11:26:42 +03:00
parent 9c3190d11c
commit f45c072d33

View file

@ -26,7 +26,7 @@ from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
client = AsyncClient( client = AsyncClient(
provider=OpenaiChat, provider=OpenaiChat,
image_provider=Gemini, image_provider=Gemini,
... # Add any other necessary parameters
) )
``` ```
@ -44,7 +44,7 @@ from g4f.client import AsyncClient
client = AsyncClient( client = AsyncClient(
api_key="your_api_key_here", api_key="your_api_key_here",
proxies="http://user:pass@host", proxies="http://user:pass@host",
... # Add any other necessary parameters
) )
``` ```
@ -59,18 +59,20 @@ You can use the `ChatCompletions` endpoint to generate text completions. Here
```python ```python
import asyncio import asyncio
from g4f.client import AsyncClient
from g4f.client import Client
async def main(): async def main():
client = AsyncClient() client = Client()
response = await client.chat.completions.create( response = await client.chat.completions.async_create(
[{"role": "user", "content": "say this is a test"}], model="gpt-3.5-turbo",
model="gpt-3.5-turbo" messages=[{"role": "user", "content": "say this is a test"}],
# Add any other necessary parameters
) )
print(response.choices[0].message.content) print(response.choices[0].message.content)
asyncio.run(main()) asyncio.run(main())
``` ```
### Streaming Completions ### Streaming Completions
@ -79,19 +81,23 @@ The `AsyncClient` also supports streaming completions. This allows you to proces
```python ```python
import asyncio import asyncio
from g4f.client import AsyncClient
from g4f.client import Client
async def main(): async def main():
client = AsyncClient() client = Client()
async for chunk in await client.chat.completions.create( stream = await client.chat.completions.async_create(
[{"role": "user", "content": "say this is a test"}],
model="gpt-4", model="gpt-4",
messages=[{"role": "user", "content": "say this is a test"}],
stream=True, stream=True,
): # Add any other necessary parameters
)
async for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content or "", end="") print(chunk.choices[0].delta.content or "", end="")
print()
asyncio.run(main()) asyncio.run(main())
``` ```
In this example: In this example:
@ -102,23 +108,29 @@ In this example:
The following code snippet demonstrates how to use a vision model to analyze an image and generate a description based on the content of the image. This example shows how to fetch an image, send it to the model, and then process the response. The following code snippet demonstrates how to use a vision model to analyze an image and generate a description based on the content of the image. This example shows how to fetch an image, send it to the model, and then process the response.
```python ```python
import g4f
import requests import requests
import asyncio
from g4f.client import Client from g4f.client import Client
from g4f.Provider import Bing
client = AsyncClient( image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
provider=Bing # Or: image = open("docs/cat.jpeg", "rb")
)
image = requests.get("https://my_website/image.jpg", stream=True).raw
# Or: image = open("local_path/image.jpg", "rb")
response = client.chat.completions.create( async def main():
"", client = Client()
messages=[{"role": "user", "content": "what is in this picture?"}], response = await client.chat.completions.async_create(
model=g4f.models.default,
provider=g4f.Provider.Bing,
messages=[{"role": "user", "content": "What are on this image?"}],
image=image image=image
) # Add any other necessary parameters
print(response.choices[0].message.content) )
print(response.choices[0].message.content)
asyncio.run(main())
``` ```
### Image Generation: ### Image Generation:
@ -127,32 +139,40 @@ You can generate images using a specified prompt:
```python ```python
import asyncio import asyncio
from g4f.client import AsyncClient from g4f.client import Client
async def main(): async def main():
client = AsyncClient(image_provider='') client = Client()
response = await client.images.generate( response = await client.images.async_generate(
prompt="a white siamese cat" prompt="a white siamese cat",
model="flux", model="dall-e-3",
#n=1, # Add any other necessary parameters
#size="1024x1024"
# ...
) )
image_url = response.data[0].url image_url = response.data[0].url
print(image_url) print(f"Generated image URL: {image_url}")
asyncio.run(main()) asyncio.run(main())
``` ```
#### Base64 as the response format #### Base64 as the response format
```python ```python
response = await client.images.generate( import asyncio
prompt="a cool cat", from g4f.client import Client
response_format="b64_json"
)
base64_text = response.data[0].b64_json async def main():
client = Client()
response = await client.images.async_generate(
prompt="a white siamese cat",
model="dall-e-3",
response_format="b64_json"
# Add any other necessary parameters
)
base64_text = response.data[0].b64_json
print(base64_text)
asyncio.run(main())
``` ```
### Example usage with asyncio.gather ### Example usage with asyncio.gather
@ -161,34 +181,34 @@ Start two tasks at the same time:
```python ```python
import asyncio import asyncio
import g4f
from g4f.client import AsyncClient from g4f.client import Client
async def main(): async def main():
client = AsyncClient( client = Client()
provider=OpenaiChat,
image_provider=BingCreateImages,
)
# Task for text completion task1 = client.chat.completions.async_create(
async def text_task():
response = await client.chat.completions.create(
[{"role": "user", "content": "Say this is a test"}],
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
) )
print(response.choices[0].message.content) task2 = client.images.generate(
print() model="dall-e-3",
prompt="a white siamese cat",
# Task for image generation
async def image_task():
response = await client.images.generate(
"a white siamese cat",
model="flux",
) )
print(f"Image generated: {response.data[0].url}")
# Execute both tasks asynchronously responses = await asyncio.gather(task1, task2)
await asyncio.gather(text_task(), image_task())
chat_response, image_response = responses
print("Chat Response:")
print(chat_response.choices[0].message.content)
print("\nImage Response:")
image_url = image_response.data[0].url
print(image_url)
asyncio.run(main()) asyncio.run(main())
``` ```
[Return to Home](/)