Rename apply_patch function in pydantic_ai

This commit is contained in:
hlohaus 2025-02-22 04:58:05 +01:00
parent b84f35f4e4
commit 357a3bd4fb
4 changed files with 65 additions and 13 deletions

View file

@ -21,12 +21,12 @@ pip install g4f pydantic_ai
### 1. Patch PydanticAI to Use G4F Models
In order to use PydanticAI with G4F models, you need to apply the necessary patch to the client. This can be done by importing `apply_patch` from `g4f.tools.pydantic_ai`. The `api_key` parameter is optional, so if you have one, you can provide it. If not, the system will proceed without it.
In order to use PydanticAI with G4F models, you need to apply the necessary patch to the client. This can be done by importing `patch_infer_model` from `g4f.tools.pydantic_ai`. The `api_key` parameter is optional, so if you have one, you can provide it. If not, the system will proceed without it.
```python
from g4f.tools.pydantic_ai import apply_patch
from g4f.tools.pydantic_ai import patch_infer_model
apply_patch(api_key="your_api_key_here") # Optional
patch_infer_model(api_key="your_api_key_here") # Optional
```
If you don't have an API key, simply omit the `api_key` argument.
@ -83,12 +83,58 @@ The phrase "hello world" is commonly used in programming tutorials to demonstrat
For example, you can process your query or interact with external systems before passing the data to the agent.
---
### Simple Example with Agent
```python
from pydantic_ai import Agent
from g4f.tools.pydantic_ai import AIModel
agent = Agent(
AIModel("gpt-4o"),
)
result = agent.run_sync('Are you gpt-4o?')
print(result.data)
```
This example shows how to initialize an agent with a specific model (`gpt-4o`) and run it synchronously.
---
### Full Example with Tool Calls:
```python
from pydantic import BaseModel
from pydantic_ai import Agent
from pydantic_ai.models import ModelSettings
from g4f.tools.pydantic_ai import apply_patch
apply_patch("your_api_key")
class MyModel(BaseModel):
city: str
country: str
agent = Agent('g4f:Groq:llama3-70b-8192', result_type=MyModel, model_settings=ModelSettings(temperature=0))
if __name__ == '__main__':
result = agent.run_sync('The windy city in the US of A.')
print(result.data)
print(result.usage())
```
This example demonstrates the use of a custom Pydantic model (`MyModel`) to capture structured data (city and country) from the response and running the agent with specific model settings.
---
## Conclusion
By following these steps, you have successfully integrated PydanticAI models into the G4F client, created an agent, and enabled debugging. This allows you to conduct conversations with the language model, pass system prompts, and retrieve responses synchronously.
### Notes:
- The `api_key` parameter when calling `apply_patch` is optional. If you dont provide it, the system will still work without an API key.
- The `api_key` parameter when calling `patch_infer_model` is optional. If you dont provide it, the system will still work without an API key.
- Modify the agents `system_prompt` to suit the nature of the conversation you wish to have.
- **Tool calls within AI requests are not fully supported** at the moment. Use the agent's basic functionality for generating responses and handle external calls separately.

View file

@ -26,12 +26,15 @@ class BaseModel(BaseModel):
return super().model_construct(**data)
return cls.construct(**data)
class TokenDetails(BaseModel):
pass
class UsageModel(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
prompt_tokens_details: Optional[Dict[str, Any]]
completion_tokens_details: Optional[Dict[str, Any]]
prompt_tokens_details: TokenDetails
completion_tokens_details: TokenDetails
@classmethod
def model_construct(cls, prompt_tokens=0, completion_tokens=0, total_tokens=0, prompt_tokens_details=None, completion_tokens_details=None, **kwargs):
@ -39,8 +42,8 @@ class UsageModel(BaseModel):
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=total_tokens,
prompt_tokens_details=prompt_tokens_details,
completion_tokens_details=completion_tokens_details,
prompt_tokens_details=TokenDetails.model_construct(**prompt_tokens_details) if prompt_tokens_details else None,
completion_tokens_details=TokenDetails.model_construct(**completion_tokens_details) if completion_tokens_details else None,
**kwargs
)

View file

@ -374,7 +374,9 @@ class RaiseErrorMixin():
raise ResponseError(data["error_message"])
elif "error" in data:
if "code" in data["error"]:
raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
raise ResponseError("\n".join(
[e for e in [f'Error {data["error"]["code"]}: {data["error"]["message"]}', data["error"].get("failed_generation")] if e is not None]
))
elif "message" in data["error"]:
raise ResponseError(data["error"]["message"])
else:

View file

@ -7,6 +7,9 @@ from dataclasses import dataclass, field
from pydantic_ai.models import Model, KnownModelName, infer_model
from pydantic_ai.models.openai import OpenAIModel, OpenAISystemPromptRole
import pydantic_ai.models.openai
pydantic_ai.models.openai.NOT_GIVEN = None
from ..client import AsyncClient
@dataclass(init=False)
@ -62,10 +65,8 @@ def new_infer_model(model: Model | KnownModelName, api_key: str = None) -> Model
return AIModel(model)
return infer_model(model)
def apply_patch(api_key: str | None = None):
def patch_infer_model(api_key: str | None = None):
import pydantic_ai.models
import pydantic_ai.models.openai
pydantic_ai.models.infer_model = partial(new_infer_model, api_key=api_key)
pydantic_ai.models.AIModel = AIModel
pydantic_ai.models.openai.NOT_GIVEN = None