From a2da46c23f54eaf8f2574f3715c11c729697d690 Mon Sep 17 00:00:00 2001 From: Samuel Colvin Date: Tue, 16 Sep 2025 11:12:21 -0700 Subject: [PATCH 01/46] adding vercel AI chat --- .gitignore | 2 +- examples/pydantic_ai_examples/chat_app.py | 231 ++++----------- .../pydantic_ai_examples/sqlite_database.py | 81 ++++++ .../pydantic_ai/agent/abstract.py | 159 +++++++++- pydantic_ai_slim/pydantic_ai/run.py | 13 + .../vercel_ai_elements/__init__.py | 0 .../pydantic_ai/vercel_ai_elements/_utils.py | 18 ++ .../vercel_ai_elements/request_types.py | 273 ++++++++++++++++++ .../vercel_ai_elements/response_stream.py | 141 +++++++++ .../vercel_ai_elements/response_types.py | 216 ++++++++++++++ .../vercel_ai_elements/starlette.py | 69 +++++ 11 files changed, 1023 insertions(+), 180 deletions(-) create mode 100644 examples/pydantic_ai_examples/sqlite_database.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py diff --git a/.gitignore b/.gitignore index c0f10dc973..22657eee27 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ env*/ /TODO.md /postgres-data/ .DS_Store -examples/pydantic_ai_examples/.chat_app_messages.sqlite +.chat_app_messages.sqlite .cache/ .vscode/ /question_graph_history.json diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index f81211111b..95bf228571 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -7,215 +7,90 @@ from __future__ import annotations as _annotations -import asyncio -import json -import sqlite3 -from collections.abc import AsyncIterator, Callable -from concurrent.futures.thread import ThreadPoolExecutor from contextlib import asynccontextmanager from dataclasses import dataclass -from datetime import datetime, timezone -from functools import partial from pathlib import Path -from typing import Annotated, Any, Literal, TypeVar import fastapi import logfire -from fastapi import Depends, Request -from fastapi.responses import FileResponse, Response, StreamingResponse -from typing_extensions import LiteralString, ParamSpec, TypedDict - -from pydantic_ai import Agent, UnexpectedModelBehavior -from pydantic_ai.messages import ( - ModelMessage, - ModelMessagesTypeAdapter, - ModelRequest, - ModelResponse, - TextPart, - UserPromptPart, -) +from fastapi import Depends, Request, Response + +from pydantic_ai import Agent, RunContext +from pydantic_ai.vercel_ai_elements.starlette import StarletteChat + +from .sqlite_database import Database # 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured logfire.configure(send_to_logfire='if-token-present') logfire.instrument_pydantic_ai() -agent = Agent('openai:gpt-4o') THIS_DIR = Path(__file__).parent +sql_schema = """ +create table if not exists memory( + id integer primary key, + user_id integer not null, + value text not null, + unique(user_id, value) +);""" @asynccontextmanager async def lifespan(_app: fastapi.FastAPI): - async with Database.connect() as db: + async with Database.connect(sql_schema) as db: yield {'db': db} -app = fastapi.FastAPI(lifespan=lifespan) -logfire.instrument_fastapi(app) +@dataclass +class Deps: + conn: Database + user_id: int -@app.get('/') -async def index() -> FileResponse: - return FileResponse((THIS_DIR / 'chat_app.html'), media_type='text/html') +chat_agent = Agent( + 'openai:gpt-4.1', instructions='You are a helpful assistant.', deps_type=Deps +) -@app.get('/chat_app.ts') -async def main_ts() -> FileResponse: - """Get the raw typescript code, it's compiled in the browser, forgive me.""" - return FileResponse((THIS_DIR / 'chat_app.ts'), media_type='text/plain') +@chat_agent.tool +async def record_memory(ctx: RunContext[Deps], value: str) -> str: + """Use this tool to store information in memory.""" + await ctx.deps.conn.execute( + 'insert into memory(user_id, value) values(?, ?) on conflict do nothing', + ctx.deps.user_id, + value, + commit=True, + ) + return 'Value added to memory.' -async def get_db(request: Request) -> Database: - return request.state.db +@chat_agent.tool +async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str: + """Get all memories about the user.""" + rows = await ctx.deps.conn.fetchall( + 'select value from memory where user_id = ? and value like ?', + ctx.deps.user_id, + f'%{memory_contains}%', + ) + return '\n'.join([row['value'] for row in rows]) -@app.get('/chat/') -async def get_chat(database: Database = Depends(get_db)) -> Response: - msgs = await database.get_messages() - return Response( - b'\n'.join(json.dumps(to_chat_message(m)).encode('utf-8') for m in msgs), - media_type='text/plain', - ) +starlette_chat = StarletteChat(chat_agent) +app = fastapi.FastAPI(lifespan=lifespan) +logfire.instrument_fastapi(app) -class ChatMessage(TypedDict): - """Format of messages sent to the browser.""" - - role: Literal['user', 'model'] - timestamp: str - content: str - - -def to_chat_message(m: ModelMessage) -> ChatMessage: - first_part = m.parts[0] - if isinstance(m, ModelRequest): - if isinstance(first_part, UserPromptPart): - assert isinstance(first_part.content, str) - return { - 'role': 'user', - 'timestamp': first_part.timestamp.isoformat(), - 'content': first_part.content, - } - elif isinstance(m, ModelResponse): - if isinstance(first_part, TextPart): - return { - 'role': 'model', - 'timestamp': m.timestamp.isoformat(), - 'content': first_part.content, - } - raise UnexpectedModelBehavior(f'Unexpected message type for chat app: {m}') - - -@app.post('/chat/') -async def post_chat( - prompt: Annotated[str, fastapi.Form()], database: Database = Depends(get_db) -) -> StreamingResponse: - async def stream_messages(): - """Streams new line delimited JSON `Message`s to the client.""" - # stream the user prompt so that can be displayed straight away - yield ( - json.dumps( - { - 'role': 'user', - 'timestamp': datetime.now(tz=timezone.utc).isoformat(), - 'content': prompt, - } - ).encode('utf-8') - + b'\n' - ) - # get the chat history so far to pass as context to the agent - messages = await database.get_messages() - # run the agent with the user prompt and the chat history - async with agent.run_stream(prompt, message_history=messages) as result: - async for text in result.stream_output(debounce_by=0.01): - # text here is a `str` and the frontend wants - # JSON encoded ModelResponse, so we create one - m = ModelResponse(parts=[TextPart(text)], timestamp=result.timestamp()) - yield json.dumps(to_chat_message(m)).encode('utf-8') + b'\n' - - # add new messages (e.g. the user prompt and the agent response in this case) to the database - await database.add_messages(result.new_messages_json()) - - return StreamingResponse(stream_messages(), media_type='text/plain') - - -P = ParamSpec('P') -R = TypeVar('R') +async def get_db(request: Request) -> Database: + return request.state.db -@dataclass -class Database: - """Rudimentary database to store chat messages in SQLite. - - The SQLite standard library package is synchronous, so we - use a thread pool executor to run queries asynchronously. - """ - - con: sqlite3.Connection - _loop: asyncio.AbstractEventLoop - _executor: ThreadPoolExecutor - - @classmethod - @asynccontextmanager - async def connect( - cls, file: Path = THIS_DIR / '.chat_app_messages.sqlite' - ) -> AsyncIterator[Database]: - with logfire.span('connect to DB'): - loop = asyncio.get_event_loop() - executor = ThreadPoolExecutor(max_workers=1) - con = await loop.run_in_executor(executor, cls._connect, file) - slf = cls(con, loop, executor) - try: - yield slf - finally: - await slf._asyncify(con.close) - - @staticmethod - def _connect(file: Path) -> sqlite3.Connection: - con = sqlite3.connect(str(file)) - con = logfire.instrument_sqlite3(con) - cur = con.cursor() - cur.execute( - 'CREATE TABLE IF NOT EXISTS messages (id INT PRIMARY KEY, message_list TEXT);' - ) - con.commit() - return con - - async def add_messages(self, messages: bytes): - await self._asyncify( - self._execute, - 'INSERT INTO messages (message_list) VALUES (?);', - messages, - commit=True, - ) - await self._asyncify(self.con.commit) - - async def get_messages(self) -> list[ModelMessage]: - c = await self._asyncify( - self._execute, 'SELECT message_list FROM messages order by id' - ) - rows = await self._asyncify(c.fetchall) - messages: list[ModelMessage] = [] - for row in rows: - messages.extend(ModelMessagesTypeAdapter.validate_json(row[0])) - return messages - - def _execute( - self, sql: LiteralString, *args: Any, commit: bool = False - ) -> sqlite3.Cursor: - cur = self.con.cursor() - cur.execute(sql, args) - if commit: - self.con.commit() - return cur - - async def _asyncify( - self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs - ) -> R: - return await self._loop.run_in_executor( # type: ignore - self._executor, - partial(func, **kwargs), - *args, # type: ignore - ) +@app.options('/api/chat') +def options_chat(): + pass + + +@app.post('/api/chat') +async def get_chat(request: Request, database: Database = Depends(get_db)) -> Response: + return await starlette_chat.dispatch_request(request, deps=Deps(database, 123)) if __name__ == '__main__': diff --git a/examples/pydantic_ai_examples/sqlite_database.py b/examples/pydantic_ai_examples/sqlite_database.py new file mode 100644 index 0000000000..491e5020dd --- /dev/null +++ b/examples/pydantic_ai_examples/sqlite_database.py @@ -0,0 +1,81 @@ +from __future__ import annotations as _annotations + +import asyncio +import sqlite3 +from collections.abc import AsyncIterator, Callable +from concurrent.futures.thread import ThreadPoolExecutor +from contextlib import asynccontextmanager +from dataclasses import dataclass +from functools import partial +from pathlib import Path +from typing import Any, LiteralString, ParamSpec, TypeVar + +import logfire + +P = ParamSpec('P') +R = TypeVar('R') + + +@dataclass +class Database: + """Rudimentary database to store chat messages in SQLite. + + The SQLite standard library package is synchronous, so we + use a thread pool executor to run queries asynchronously. + """ + + con: sqlite3.Connection + _loop: asyncio.AbstractEventLoop + _executor: ThreadPoolExecutor + + @classmethod + @asynccontextmanager + async def connect( + cls, schema_sql: str, file: Path = Path('.chat_app_messages.sqlite') + ) -> AsyncIterator[Database]: + with logfire.span('connect to DB'): + loop = asyncio.get_event_loop() + executor = ThreadPoolExecutor(max_workers=1) + con = await loop.run_in_executor(executor, cls._connect, schema_sql, file) + slf = cls(con, loop, executor) + try: + yield slf + finally: + await slf._asyncify(con.close) + + @staticmethod + def _connect(schema_sql: str, file: Path) -> sqlite3.Connection: + con = sqlite3.connect(str(file)) + con = logfire.instrument_sqlite3(con) + cur = con.cursor() + cur.execute(schema_sql) + con.commit() + return con + + async def execute(self, sql: LiteralString, *args: Any, commit: bool = False): + await self._asyncify(self._execute, sql, *args, commit=True) + if commit: + await self._asyncify(self.con.commit) + + async def fetchall(self, sql: LiteralString, *args: Any) -> list[dict[str, Any]]: + c = await self._asyncify(self._execute, sql, *args) + rows = await self._asyncify(c.fetchall) + return [dict(row) for row in rows] + + def _execute( + self, sql: LiteralString, *args: Any, commit: bool = False + ) -> sqlite3.Cursor: + cur = self.con.cursor() + cur.execute(sql, args) + if commit: + self.con.commit() + return cur + + async def _asyncify( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + return await self._loop.run_in_executor( # type: ignore + self._executor, + partial(func, **kwargs), + *args, # type: ignore + ) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 8d6c9ff293..54c6856099 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -1,5 +1,6 @@ from __future__ import annotations as _annotations +import asyncio import inspect from abc import ABC, abstractmethod from collections.abc import AsyncIterable, AsyncIterator, Awaitable, Callable, Iterator, Mapping, Sequence @@ -7,6 +8,7 @@ from types import FrameType from typing import TYPE_CHECKING, Any, Generic, TypeAlias, cast, overload +import anyio from typing_extensions import Self, TypeIs, TypeVar from pydantic_graph import End @@ -24,7 +26,7 @@ from .._tool_manager import ToolManager from ..output import OutputDataT, OutputSpec from ..result import AgentStream, FinalResult, StreamedRunResult -from ..run import AgentRun, AgentRunResult +from ..run import AgentRun, AgentRunResult, AgentRunResultEvent from ..settings import ModelSettings from ..tools import ( AgentDepsT, @@ -543,6 +545,161 @@ async def on_complete() -> None: if not yielded: raise exceptions.AgentRunError('Agent run finished without producing a final result') # pragma: no cover + @overload + def run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: None = None, + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[OutputDataT]]: ... + + @overload + def run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT], + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[RunOutputDataT]]: ... + + def run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]: + """Run the agent with a user prompt in async mode and stream events from the run. + + This is a convenience method that wraps [`self.run`][pydantic_ai.agent.AbstractAgent.run] and + uses the `event_stream_handler` kwarg to get a stream of events from the run. + + Example: + ```python + from pydantic_ai import Agent + + agent = Agent('openai:gpt-4o') + + async def main(): + async for event in agent.run_stream_events('What is the capital of France?'): + print(event) + ``` + + Arguments are the same as for [`self.run`][pydantic_ai.agent.AbstractAgent.run], + except that `event_stream_handler` is now allowed. + + Args: + user_prompt: User input to start/continue the conversation. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + + Returns: + An async iterable of stream events `AgentStreamEvent` and finally a `AgentRunResultEvent` with the final + run result. + """ + # unfortunately this hack of returning a generator rather than defining it right here is + # required to allow overloads of this method to work in python's typing system, or at least with pyright + # or at least I couldn't make it work without + return self._run_stream_events( + user_prompt, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + ) + + async def _run_stream_events( + self, + user_prompt: str | Sequence[_messages.UserContent] | None = None, + *, + output_type: OutputSpec[RunOutputDataT] | None = None, + message_history: list[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: models.Model | models.KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: _usage.UsageLimits | None = None, + usage: _usage.RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + ) -> AsyncIterator[_messages.AgentStreamEvent | AgentRunResultEvent[Any]]: + send_stream, receive_stream = anyio.create_memory_object_stream[ + _messages.AgentStreamEvent | AgentRunResultEvent[Any] + ]() + + async def event_stream_handler( + _: RunContext[AgentDepsT], events: AsyncIterable[_messages.AgentStreamEvent] + ) -> None: + async for event in events: + await send_stream.send(event) + + async def run_agent() -> AgentRunResult[Any]: + try: + return await self.run( + user_prompt, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + event_stream_handler=event_stream_handler, + ) + finally: + send_stream.close() + + task = asyncio.create_task(run_agent()) + + async for message in receive_stream: + yield message + + result = await task + yield AgentRunResultEvent(result) + @overload def iter( self, diff --git a/pydantic_ai_slim/pydantic_ai/run.py b/pydantic_ai_slim/pydantic_ai/run.py index 0cc9481043..39a3d9080a 100644 --- a/pydantic_ai_slim/pydantic_ai/run.py +++ b/pydantic_ai_slim/pydantic_ai/run.py @@ -353,3 +353,16 @@ def timestamp(self) -> datetime: model_response = self.all_messages()[-1] assert isinstance(model_response, _messages.ModelResponse) return model_response.timestamp + + +@dataclasses.dataclass +class AgentRunResultEvent(Generic[OutputDataT]): + """An event indicating the agent run ended and containing the final result of the agent run.""" + + result: AgentRunResult[OutputDataT] + """The result of the run.""" + + _: dataclasses.KW_ONLY + + event_kind: Literal['agent_run_result'] = 'agent_run_result' + """Event type identifier, used as a discriminator.""" diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py new file mode 100644 index 0000000000..6ef877a235 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py @@ -0,0 +1,18 @@ +from abc import ABC +from typing import Any + +from pydantic import BaseModel, ConfigDict +from pydantic.alias_generators import to_camel + +__all__ = 'ProviderMetadata', 'CamelBaseModel' + +# technically this is recursive union of JSON types +# for to simplify validation, we call it Any +JSONValue = Any + +# Provider metadata types +ProviderMetadata = dict[str, dict[str, JSONValue]] + + +class CamelBaseModel(BaseModel, ABC): + model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra='forbid') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py new file mode 100644 index 0000000000..d7dd60acb6 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py @@ -0,0 +1,273 @@ +"""Convert to Python from. + +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts + +Mostly with Claude. +""" + +from typing import Annotated, Any, Literal + +from pydantic import Discriminator, TypeAdapter + +from ._utils import CamelBaseModel, ProviderMetadata + + +class TextUIPart(CamelBaseModel): + """A text part of a message.""" + + type: Literal['text'] = 'text' + + text: str + """The text content.""" + + state: Literal['streaming', 'done'] | None = None + """The state of the text part.""" + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class ReasoningUIPart(CamelBaseModel): + """A reasoning part of a message.""" + + type: Literal['reasoning'] = 'reasoning' + + text: str + """The reasoning text.""" + + state: Literal['streaming', 'done'] | None = None + """The state of the reasoning part.""" + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class SourceUrlUIPart(CamelBaseModel): + """A source part of a message.""" + + type: Literal['source-url'] = 'source-url' + source_id: str + url: str + title: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class SourceDocumentUIPart(CamelBaseModel): + """A document source part of a message.""" + + type: Literal['source-document'] = 'source-document' + source_id: str + media_type: str + title: str + filename: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class FileUIPart(CamelBaseModel): + """A file part of a message.""" + + type: Literal['file'] = 'file' + + media_type: str + """ + IANA media type of the file. + + @see https://www.iana.org/assignments/media-types/media-types.xhtml + """ + + filename: str | None = None + """Optional filename of the file.""" + + url: str + """ + The URL of the file. + It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs). + """ + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class StepStartUIPart(CamelBaseModel): + """A step boundary part of a message.""" + + type: Literal['step-start'] = 'step-start' + + +class DataUIPart(CamelBaseModel): + """Data part with dynamic type based on data name.""" + + type: str # Will be f"data-{NAME}" + id: str | None = None + data: Any + + +# Tool part states as separate models +class ToolInputStreamingPart(CamelBaseModel): + """Tool part in input-streaming state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['input-streaming'] = 'input-streaming' + input: Any | None = None + provider_executed: bool | None = None + + +class ToolInputAvailablePart(CamelBaseModel): + """Tool part in input-available state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['input-available'] = 'input-available' + input: Any + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + + +class ToolOutputAvailablePart(CamelBaseModel): + """Tool part in output-available state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['output-available'] = 'output-available' + input: Any + output: Any + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + preliminary: bool | None = None + + +class ToolOutputErrorPart(CamelBaseModel): + """Tool part in output-error state.""" + + type: str # Will be f"tool-{NAME}" + tool_call_id: str + state: Literal['output-error'] = 'output-error' + input: Any | None = None + raw_input: Any | None = None + error_text: str + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + + +# Union of all tool part states +ToolUIPart = ToolInputStreamingPart | ToolInputAvailablePart | ToolOutputAvailablePart | ToolOutputErrorPart + + +# Dynamic tool part states as separate models +class DynamicToolInputStreamingPart(CamelBaseModel): + """Dynamic tool part in input-streaming state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['input-streaming'] = 'input-streaming' + input: Any | None = None + + +class DynamicToolInputAvailablePart(CamelBaseModel): + """Dynamic tool part in input-available state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['input-available'] = 'input-available' + input: Any + call_provider_metadata: ProviderMetadata | None = None + + +class DynamicToolOutputAvailablePart(CamelBaseModel): + """Dynamic tool part in output-available state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['output-available'] = 'output-available' + input: Any + output: Any + call_provider_metadata: ProviderMetadata | None = None + preliminary: bool | None = None + + +class DynamicToolOutputErrorPart(CamelBaseModel): + """Dynamic tool part in output-error state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['output-error'] = 'output-error' + input: Any + error_text: str + call_provider_metadata: ProviderMetadata | None = None + + +# Union of all dynamic tool part states +DynamicToolUIPart = ( + DynamicToolInputStreamingPart + | DynamicToolInputAvailablePart + | DynamicToolOutputAvailablePart + | DynamicToolOutputErrorPart +) + + +UIMessagePart = ( + TextUIPart + | ReasoningUIPart + | ToolUIPart + | DynamicToolUIPart + | SourceUrlUIPart + | SourceDocumentUIPart + | FileUIPart + | DataUIPart + | StepStartUIPart +) +"""Union of all message part types.""" + + +class UIMessage(CamelBaseModel): + """A message as displayed in the UI by Vercel AI Elements.""" + + id: str + """A unique identifier for the message.""" + + role: Literal['system', 'user', 'assistant'] + """The role of the message.""" + + metadata: Any | None = None + """The metadata of the message.""" + + parts: list[UIMessagePart] + """ + The parts of the message. Use this for rendering the message in the UI. + + System messages should be avoided (set the system prompt on the server instead). + They can have text parts. + + User messages can have text parts and file parts. + + Assistant messages can have text, reasoning, tool invocation, and file parts. + """ + + +class SubmitMessage(CamelBaseModel): + """Submit a message to the agent.""" + + trigger: Literal['submit-message'] + id: str + messages: list[UIMessage] + + model: str + web_search: bool + + +class RegenerateMessage(CamelBaseModel): + """Ask the agent to regenerate a message.""" + + trigger: Literal['regenerate-message'] + id: str + messages: list[UIMessage] + message_id: str + + +RequestData = SubmitMessage | RegenerateMessage +request_data_schema: TypeAdapter[RequestData] = TypeAdapter(Annotated[RequestData, Discriminator('trigger')]) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py new file mode 100644 index 0000000000..82da300014 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py @@ -0,0 +1,141 @@ +from __future__ import annotations as _annotations + +from collections.abc import AsyncIterator +from dataclasses import dataclass, field +from typing import Any +from uuid import uuid4 + +from pydantic_core import to_json + +from .. import messages +from ..agent import Agent +from ..run import AgentRunResultEvent +from ..tools import AgentDepsT +from . import response_types as _t + +__all__ = 'sse_stream', 'VERCEL_AI_ELEMENTS_HEADERS', 'EventStreamer' +# no idea if this is important, but vercel sends it, therefore so am I +VERCEL_AI_ELEMENTS_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} + + +async def sse_stream(agent: Agent[AgentDepsT], user_prompt: str, deps: Any) -> AsyncIterator[str]: + """Stream events from an agent run as Vercel AI Elements events. + + Args: + agent: The agent to run. + user_prompt: The user prompt to run the agent with. + deps: The dependencies to pass to the agent. + + Yields: + An async iterator text lines to stream over SSE. + """ + event_streamer = EventStreamer() + async for event in agent.run_stream_events(user_prompt, deps=deps): + if not isinstance(event, AgentRunResultEvent): + async for chunk in event_streamer.event_to_chunks(event): + yield chunk.sse() + async for chunk in event_streamer.finish(): + yield chunk.sse() + + +@dataclass +class EventStreamer: + """Logic for mapping pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE.""" + + message_id: str = field(default_factory=lambda: uuid4().hex) + _final_result_tool_id: str | None = field(default=None, init=False) + + async def event_to_chunks(self, event: messages.AgentStreamEvent) -> AsyncIterator[_t.AbstractSSEChunk]: # noqa C901 + """Convert pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE. + + Args: + event: The pydantic-ai event to convert. + + Yields: + An async iterator of Vercel AI Elements events. + """ + match event: + case messages.PartStartEvent(part=part): + match part: + case messages.TextPart(content=content): + yield _t.TextStartChunk(id=self.message_id) + yield _t.TextDeltaChunk(id=self.message_id, delta=content) + case ( + messages.ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) + | messages.BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) + ): + yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) + if isinstance(args, str): + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) + elif args is not None: + yield ( + _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) + ) + + case messages.BuiltinToolReturnPart( + tool_name=tool_name, tool_call_id=tool_call_id, content=content + ): + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) + + case messages.ThinkingPart(content=content): + yield _t.ReasoningStartChunk(id=self.message_id) + yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content) + + case messages.PartDeltaEvent(delta=delta): + match delta: + case messages.TextPartDelta(content_delta=content_delta): + yield _t.TextDeltaChunk(id=self.message_id, delta=content_delta) + case messages.ThinkingPartDelta(content_delta=content_delta): + if content_delta: + yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content_delta) + case messages.ToolCallPartDelta(args_delta=args, tool_call_id=tool_call_id): + tool_call_id = tool_call_id or '' + if isinstance(args, str): + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) + elif args is not None: + yield ( + _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) + ) + case messages.FinalResultEvent(tool_name=tool_name, tool_call_id=tool_call_id): + if tool_call_id and tool_name: + self._final_result_tool_id = tool_call_id + yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) + case messages.FunctionToolCallEvent(): + pass + # print(f'TODO FunctionToolCallEvent {part}') + case messages.FunctionToolResultEvent(result=result): + match result: + case messages.ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) + case messages.RetryPromptPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) + case messages.BuiltinToolCallEvent(part=part): + tool_call_id = part.tool_call_id + tool_name = part.tool_name + args = part.args + yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) + if isinstance(args, str): + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) + elif args is not None: + yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) + case messages.BuiltinToolResultEvent(result=result): + yield _t.ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + + async def finish(self) -> AsyncIterator[_t.AbstractSSEChunk | DoneChunk]: + """Send extra messages required to close off the stream.""" + if tool_call_id := self._final_result_tool_id: + yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) + yield _t.FinishChunk() + yield DoneChunk() + + +class DoneChunk: + def sse(self) -> str: + return '[DONE]' + + def __str__(self) -> str: + return 'DoneChunk' + + +def _json_dumps(obj: Any) -> str: + return to_json(obj).decode('utf-8') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py new file mode 100644 index 0000000000..8538dd9680 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py @@ -0,0 +1,216 @@ +"""Convert to Python from. + +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts + +Mostly with Claude. +""" + +from typing import Any, Literal + +from ._utils import CamelBaseModel, ProviderMetadata + + +class AbstractSSEChunk(CamelBaseModel): + """Abstract base class for response SSE even.""" + + def sse(self) -> str: + return self.model_dump_json(exclude_none=True, by_alias=True) + + +class TextStartChunk(AbstractSSEChunk): + """Text start chunk.""" + + type: Literal['text-start'] = 'text-start' + id: str + provider_metadata: ProviderMetadata | None = None + + +class TextDeltaChunk(AbstractSSEChunk): + """Text delta chunk.""" + + type: Literal['text-delta'] = 'text-delta' + delta: str + id: str + provider_metadata: ProviderMetadata | None = None + + +class TextEndChunk(AbstractSSEChunk): + """Text end chunk.""" + + type: Literal['text-end'] = 'text-end' + id: str + provider_metadata: ProviderMetadata | None = None + + +class ReasoningStartChunk(AbstractSSEChunk): + """Reasoning start chunk.""" + + type: Literal['reasoning-start'] = 'reasoning-start' + id: str + provider_metadata: ProviderMetadata | None = None + + +class ReasoningDeltaChunk(AbstractSSEChunk): + """Reasoning delta chunk.""" + + type: Literal['reasoning-delta'] = 'reasoning-delta' + id: str + delta: str + provider_metadata: ProviderMetadata | None = None + + +class ReasoningEndChunk(AbstractSSEChunk): + """Reasoning end chunk.""" + + type: Literal['reasoning-end'] = 'reasoning-end' + id: str + provider_metadata: ProviderMetadata | None = None + + +class ErrorChunk(AbstractSSEChunk): + """Error chunk.""" + + type: Literal['error'] = 'error' + error_text: str + + +class ToolInputAvailableChunk(AbstractSSEChunk): + """Tool input available chunk.""" + + type: Literal['tool-input-available'] = 'tool-input-available' + tool_call_id: str + tool_name: str + input: Any + provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None + dynamic: bool | None = None + + +class ToolInputErrorChunk(AbstractSSEChunk): + """Tool input error chunk.""" + + type: Literal['tool-input-error'] = 'tool-input-error' + tool_call_id: str + tool_name: str + input: Any + provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None + dynamic: bool | None = None + error_text: str + + +class ToolOutputAvailableChunk(AbstractSSEChunk): + """Tool output available chunk.""" + + type: Literal['tool-output-available'] = 'tool-output-available' + tool_call_id: str + output: Any + provider_executed: bool | None = None + dynamic: bool | None = None + preliminary: bool | None = None + + +class ToolOutputErrorChunk(AbstractSSEChunk): + """Tool output error chunk.""" + + type: Literal['tool-output-error'] = 'tool-output-error' + tool_call_id: str + error_text: str + provider_executed: bool | None = None + dynamic: bool | None = None + + +class ToolInputStartChunk(AbstractSSEChunk): + """Tool input start chunk.""" + + type: Literal['tool-input-start'] = 'tool-input-start' + tool_call_id: str + tool_name: str + provider_executed: bool | None = None + dynamic: bool | None = None + + +class ToolInputDeltaChunk(AbstractSSEChunk): + """Tool input delta chunk.""" + + type: Literal['tool-input-delta'] = 'tool-input-delta' + tool_call_id: str + input_text_delta: str + + +# Source chunk types +class SourceUrlChunk(AbstractSSEChunk): + """Source URL chunk.""" + + type: Literal['source-url'] = 'source-url' + source_id: str + url: str + title: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class SourceDocumentChunk(AbstractSSEChunk): + """Source document chunk.""" + + type: Literal['source-document'] = 'source-document' + source_id: str + media_type: str + title: str + filename: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class FileChunk(AbstractSSEChunk): + """File chunk.""" + + type: Literal['file'] = 'file' + url: str + media_type: str + + +class DataUIMessageChunk(AbstractSSEChunk): + """Data UI message chunk with dynamic type.""" + + type: str # Will be f"data-{NAME}" + data: Any + + +class StartStepChunk(AbstractSSEChunk): + """Start step chunk.""" + + type: Literal['start-step'] = 'start-step' + + +class FinishStepChunk(AbstractSSEChunk): + """Finish step chunk.""" + + type: Literal['finish-step'] = 'finish-step' + + +# Message lifecycle chunk types +class StartChunk(AbstractSSEChunk): + """Start chunk.""" + + type: Literal['start'] = 'start' + message_id: str | None = None + message_metadata: Any | None = None + + +class FinishChunk(AbstractSSEChunk): + """Finish chunk.""" + + type: Literal['finish'] = 'finish' + message_metadata: Any | None = None + + +class AbortChunk(AbstractSSEChunk): + """Abort chunk.""" + + type: Literal['abort'] = 'abort' + + +class MessageMetadataChunk(AbstractSSEChunk): + """Message metadata chunk.""" + + type: Literal['message-metadata'] = 'message-metadata' + message_metadata: Any diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py new file mode 100644 index 0000000000..59868afeef --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py @@ -0,0 +1,69 @@ +from dataclasses import dataclass +from typing import Generic + +from pydantic import ValidationError + +from ..agent import Agent +from ..tools import AgentDepsT +from .request_types import RequestData, TextUIPart, request_data_schema +from .response_stream import VERCEL_AI_ELEMENTS_HEADERS, sse_stream + +try: + from sse_starlette.sse import EventSourceResponse + from starlette.requests import Request + from starlette.responses import JSONResponse, Response +except ImportError as e: + raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e + + +@dataclass +class StarletteChat(Generic[AgentDepsT]): + """Starlette support for Pydantic AI's Vercel AI Elements integration. + + This can be used with either FastAPI or Starlette apps. + """ + + agent: Agent[AgentDepsT] + + async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: + """Handle a request and return a streamed SSE response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + body = await request.body() + try: + data = request_data_schema.validate_json(body) + except ValidationError as e: + return JSONResponse({'errors': e.errors()}, status_code=422) + else: + return await self.handle_request_data(data, deps) + + async def handle_request_data(self, data: RequestData, deps: AgentDepsT) -> Response: + """Handle request data that has already been validated and return a streamed SSE response. + + Args: + data: The validated request data. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + if not data.messages: + return JSONResponse({'errors': 'no messages provided'}) + + message = data.messages[-1] + prompt: list[str] = [] + for part in message.parts: + if isinstance(part, TextUIPart): + prompt.append(part.text) + else: + return JSONResponse({'errors': 'only text parts are supported yet'}) + + return EventSourceResponse( + sse_stream(self.agent, '\n'.join(prompt), deps=deps), headers=VERCEL_AI_ELEMENTS_HEADERS + ) From 0018e116e5dfb4d22f42ff2771f6dec5d2a71a2f Mon Sep 17 00:00:00 2001 From: Samuel Colvin Date: Tue, 16 Sep 2025 11:26:24 -0700 Subject: [PATCH 02/46] fix sqlite --- examples/pydantic_ai_examples/chat_app.py | 10 ++++++++-- examples/pydantic_ai_examples/sqlite_database.py | 4 ++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index 95bf228571..39691827af 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -47,7 +47,13 @@ class Deps: chat_agent = Agent( - 'openai:gpt-4.1', instructions='You are a helpful assistant.', deps_type=Deps + 'openai:gpt-4.1', + deps_type=Deps, + instructions=""" +You are a helpful assistant. + +Always reply with markdown. ALWAYS use code fences for code examples and lines of code. +""", ) @@ -71,7 +77,7 @@ async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str: ctx.deps.user_id, f'%{memory_contains}%', ) - return '\n'.join([row['value'] for row in rows]) + return '\n'.join([row[0] for row in rows]) starlette_chat = StarletteChat(chat_agent) diff --git a/examples/pydantic_ai_examples/sqlite_database.py b/examples/pydantic_ai_examples/sqlite_database.py index 491e5020dd..9d470a937a 100644 --- a/examples/pydantic_ai_examples/sqlite_database.py +++ b/examples/pydantic_ai_examples/sqlite_database.py @@ -57,10 +57,10 @@ async def execute(self, sql: LiteralString, *args: Any, commit: bool = False): if commit: await self._asyncify(self.con.commit) - async def fetchall(self, sql: LiteralString, *args: Any) -> list[dict[str, Any]]: + async def fetchall(self, sql: LiteralString, *args: Any) -> list[tuple[str, ...]]: c = await self._asyncify(self._execute, sql, *args) rows = await self._asyncify(c.fetchall) - return [dict(row) for row in rows] + return [tuple(row) for row in rows] def _execute( self, sql: LiteralString, *args: Any, commit: bool = False From bdd321d1c4403729d76c9be31c2cd74778724391 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 8 Oct 2025 12:01:13 +0000 Subject: [PATCH 03/46] refactoring --- examples/pydantic_ai_examples/chat_app.py | 2 +- .../__init__.py | 0 .../_utils.py | 0 .../request_types.py | 11 +- .../response_stream.py | 31 +- .../response_types.py | 0 .../pydantic_ai/vercel_ai/starlette.py | 85 ++ .../vercel_ai_elements/starlette.py | 69 -- tests/cassettes/test_vercel_ai/test_run.yaml | 849 ++++++++++++++++++ tests/test_ag_ui.py | 1 - tests/test_vercel_ai.py | 820 +++++++++++++++++ 11 files changed, 1767 insertions(+), 101 deletions(-) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/__init__.py (100%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/_utils.py (100%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/request_types.py (95%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/response_stream.py (84%) rename pydantic_ai_slim/pydantic_ai/{vercel_ai_elements => vercel_ai}/response_types.py (100%) create mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py create mode 100644 tests/cassettes/test_vercel_ai/test_run.yaml create mode 100644 tests/test_vercel_ai.py diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index 39691827af..4e2f29ddf7 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -16,7 +16,7 @@ from fastapi import Depends, Request, Response from pydantic_ai import Agent, RunContext -from pydantic_ai.vercel_ai_elements.starlette import StarletteChat +from pydantic_ai.vercel_ai.starlette import StarletteChat from .sqlite_database import Database diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/__init__.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/_utils.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py similarity index 95% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py index d7dd60acb6..4753ca90f5 100644 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/request_types.py +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py @@ -249,15 +249,16 @@ class UIMessage(CamelBaseModel): """ -class SubmitMessage(CamelBaseModel): +class SubmitMessage(CamelBaseModel, extra='allow'): """Submit a message to the agent.""" trigger: Literal['submit-message'] id: str messages: list[UIMessage] - model: str - web_search: bool + # TODO (DouweM): Update, make variable? I like `inference_params` from OpenAI ChatKit. + # model: str + # web_search: bool class RegenerateMessage(CamelBaseModel): @@ -269,5 +270,5 @@ class RegenerateMessage(CamelBaseModel): message_id: str -RequestData = SubmitMessage | RegenerateMessage -request_data_schema: TypeAdapter[RequestData] = TypeAdapter(Annotated[RequestData, Discriminator('trigger')]) +RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')] +request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py similarity index 84% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py index 82da300014..b721192f03 100644 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_stream.py +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py @@ -8,34 +8,12 @@ from pydantic_core import to_json from .. import messages -from ..agent import Agent -from ..run import AgentRunResultEvent -from ..tools import AgentDepsT from . import response_types as _t -__all__ = 'sse_stream', 'VERCEL_AI_ELEMENTS_HEADERS', 'EventStreamer' -# no idea if this is important, but vercel sends it, therefore so am I -VERCEL_AI_ELEMENTS_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} +__all__ = 'VERCEL_AI_DSP_HEADERS', 'EventStreamer' - -async def sse_stream(agent: Agent[AgentDepsT], user_prompt: str, deps: Any) -> AsyncIterator[str]: - """Stream events from an agent run as Vercel AI Elements events. - - Args: - agent: The agent to run. - user_prompt: The user prompt to run the agent with. - deps: The dependencies to pass to the agent. - - Yields: - An async iterator text lines to stream over SSE. - """ - event_streamer = EventStreamer() - async for event in agent.run_stream_events(user_prompt, deps=deps): - if not isinstance(event, AgentRunResultEvent): - async for chunk in event_streamer.event_to_chunks(event): - yield chunk.sse() - async for chunk in event_streamer.finish(): - yield chunk.sse() +# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol +VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} @dataclass @@ -136,6 +114,9 @@ def sse(self) -> str: def __str__(self) -> str: return 'DoneChunk' + def __eq__(self, other: Any) -> bool: + return isinstance(other, DoneChunk) + def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/vercel_ai_elements/response_types.py rename to pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py new file mode 100644 index 0000000000..62e0a68964 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py @@ -0,0 +1,85 @@ +from collections.abc import AsyncIterator +from dataclasses import dataclass +from typing import Generic + +from pydantic import ValidationError + +from ..agent import Agent +from ..run import AgentRunResultEvent +from ..tools import AgentDepsT +from .request_types import RequestData, TextUIPart, request_data_ta +from .response_stream import VERCEL_AI_DSP_HEADERS, DoneChunk, EventStreamer +from .response_types import AbstractSSEChunk + +try: + from sse_starlette.sse import EventSourceResponse + from starlette.requests import Request + from starlette.responses import JSONResponse, Response +except ImportError as e: + raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e + + +@dataclass +class StarletteChat(Generic[AgentDepsT]): + """Starlette support for Pydantic AI's Vercel AI Elements integration. + + This can be used with either FastAPI or Starlette apps. + """ + + agent: Agent[AgentDepsT] + + async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: + """Handle a request and return a streamed SSE response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + try: + data = request_data_ta.validate_json(await request.json()) + + async def run_sse() -> AsyncIterator[str]: + async for chunk in self.run(data, deps=deps): + yield chunk.sse() + + return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) + except ValidationError as e: + return JSONResponse({'errors': e.errors()}, status_code=422) + except Exception as e: + return JSONResponse({'errors': str(e)}, status_code=500) + + async def run(self, data: RequestData, deps: AgentDepsT = None) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: + """Stream events from an agent run as Vercel AI Elements events. + + Args: + data: The data to run the agent with. + deps: The dependencies to pass to the agent. + + Yields: + An async iterator text lines to stream over SSE. + """ + # TODO (DouweM): Use .model and .builtin_tools + + # TODO: Use entire message history + + if not data.messages: + raise ValueError('no messages provided') + + message = data.messages[-1] + prompt: list[str] = [] + for part in message.parts: + if isinstance(part, TextUIPart): + prompt.append(part.text) + else: + raise ValueError(f'Only text parts are supported yet, got {part}') + + event_streamer = EventStreamer() + async for event in self.agent.run_stream_events('\n'.join(prompt), deps=deps): + if not isinstance(event, AgentRunResultEvent): + async for chunk in event_streamer.event_to_chunks(event): + yield chunk + async for chunk in event_streamer.finish(): + yield chunk diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py deleted file mode 100644 index 59868afeef..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai_elements/starlette.py +++ /dev/null @@ -1,69 +0,0 @@ -from dataclasses import dataclass -from typing import Generic - -from pydantic import ValidationError - -from ..agent import Agent -from ..tools import AgentDepsT -from .request_types import RequestData, TextUIPart, request_data_schema -from .response_stream import VERCEL_AI_ELEMENTS_HEADERS, sse_stream - -try: - from sse_starlette.sse import EventSourceResponse - from starlette.requests import Request - from starlette.responses import JSONResponse, Response -except ImportError as e: - raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e - - -@dataclass -class StarletteChat(Generic[AgentDepsT]): - """Starlette support for Pydantic AI's Vercel AI Elements integration. - - This can be used with either FastAPI or Starlette apps. - """ - - agent: Agent[AgentDepsT] - - async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: - """Handle a request and return a streamed SSE response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - body = await request.body() - try: - data = request_data_schema.validate_json(body) - except ValidationError as e: - return JSONResponse({'errors': e.errors()}, status_code=422) - else: - return await self.handle_request_data(data, deps) - - async def handle_request_data(self, data: RequestData, deps: AgentDepsT) -> Response: - """Handle request data that has already been validated and return a streamed SSE response. - - Args: - data: The validated request data. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - if not data.messages: - return JSONResponse({'errors': 'no messages provided'}) - - message = data.messages[-1] - prompt: list[str] = [] - for part in message.parts: - if isinstance(part, TextUIPart): - prompt.append(part.text) - else: - return JSONResponse({'errors': 'only text parts are supported yet'}) - - return EventSourceResponse( - sse_stream(self.agent, '\n'.join(prompt), deps=deps), headers=VERCEL_AI_ELEMENTS_HEADERS - ) diff --git a/tests/cassettes/test_vercel_ai/test_run.yaml b/tests/cassettes/test_vercel_ai/test_run.yaml new file mode 100644 index 0000000000..f4bd74e5f6 --- /dev/null +++ b/tests/cassettes/test_vercel_ai/test_run.yaml @@ -0,0 +1,849 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '301' + content-type: + - application/json + host: + - api.openai.com + method: POST + parsed_body: + include: + - reasoning.encrypted_content + input: + - content: How do I get FastAPI instrumentation to include the HTTP request and response + role: user + model: gpt-5 + previous_response_id: null + stream: true + tool_choice: auto + tools: + - search_context_size: medium + type: web_search + uri: https://api.openai.com/v1/responses + response: + body: + string: |+ + event: response.created + data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_00e767404995b9950068e647f092888191843d25c5285ef895","object":"response","created_at":1759922160,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"web_search","filters":null,"search_context_size":"medium","user_location":{"type":"approximate","city":null,"country":"US","region":null,"timezone":null}}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + event: response.in_progress + data: {"type":"response.in_progress","sequence_number":1,"response":{"id":"resp_00e767404995b9950068e647f092888191843d25c5285ef895","object":"response","created_at":1759922160,"status":"in_progress","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-2025-08-07","output":[],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"auto","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"web_search","filters":null,"search_context_size":"medium","user_location":{"type":"approximate","city":null,"country":"US","region":null,"timezone":null}}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":2,"output_index":0,"item":{"id":"rs_00e767404995b9950068e647f10d8c819187515d1b2517b059","type":"reasoning","encrypted_content":"gAAAAABo5kfxLc0mW1zaOXGjnG239Cj0OLbDS4R2vZP9Qkm4_OcqmYRLMgU_KvshspEnG73vwwg0ZJmuTP56pmAg1NYvUTVIGdfQ3vXswO7VsH0mbZYxDgcbGS8638Q4AZNyrd3cYJOa5mJDsVL5Mxu5Y6GmwW3SrDWQv3aNBSnTXRRko0A1B5zWWRsHngMogVlauGY4aUbPPSTtJZucgoLiirHZ5ROWD-NYBVRbCmhWCoZGOUKFCO456KqurwtrhHc_HSEWnr0ohnYkh3gmRy3HWhDopz4lKve7rxtPD8CTb9Bmzss3XogM5KgopyRU1XjA31QXyMNvi5R9gC90FMokaWnHbea3MUPsJE2dw_wJ6NCYkEf8YqVMinTo9MV9hH82Oq3FamabeE8U9911E_sPl1BNH8wAOQLxm0Zi8Je8ulp1dHoyqZD_vtfzx4Yue0VwuQdhyHj-YMpPEGRWflr_gHePrYsMcM0LUAKyex6FpWRYoJlvJp3kjDi_IwDxMVYCyEjO_oCzjCTrf5XVTylvHXJjec-XDq1p6RsEeuJplVt460cydXFCjAsJ0Xh4ELcIrePipT5cSSQ5ZppDepss1eXHywuYsYoxaOGgXv4b9-_BoJKsdhp5lF0pab6RrA5brx6NF_hTB33CvBMrmbaNUiC88fMQQ6b33UAXzC1gqVf4vQ4r7wgysnfP5xzDZMJMvcvx_TRHWpRhjt0gUJuo-ql6tkgmQJEUiNPHfEmc-AN0_KDqdmT5TIyZ537Oe-TT6ZmWrPB-EzcMYntWorj-HdgYCEUhn1DkomMhEHQ3ILbMVhAeM8upzCqRW1MnqDpzIkj1_lIC","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":3,"output_index":0,"item":{"id":"rs_00e767404995b9950068e647f10d8c819187515d1b2517b059","type":"reasoning","encrypted_content":"gAAAAABo5kf5xn6cAv4jZi6LDABpXHtrd1qzU0V68_w3dCOTzg7EVKvB8i56Yah1EC7B4i7Zh3fO1H3Q6sdGS-jULiPLQdtu6TOMlNeZir_mGMVRih89fRk2UBKdwh0YywoHpv7xQLHT0qOtbVxGkzrixkOKe7B-oFGVq7v5Zpz-uD-FsyZCMikyKgVI0MvbbVHHBx8FJULvlVOAKyUQesNtZGnwobMJvUgnOUM_7rPzlC-jpzD1hwURJ93QvSsP1Klpv9Ebuqbxih70HsMmjNqO8PKwRLUzo7IqKGUTarrq0eOZmDird01aNC8ao4paWUE92fEbT8Uzv0QPmWEBjQVUgm_9gL4E0M3XzlWj5hGXrqDBJYNB36ciCJyBNSS4tx2LFkqM0UDg8u8k_7yDdVyL7j-CLz095-jT4IwCRMuKuBjaqIetw42S7xEeTrrIZPGg6oqAgrz715lr1kz8NoO2aDds-RZ780FXcptue64fFy19kRT4tmRlkeUT4oHAlLcJqay-Z9EFtbKBtazKCZyPKDCHiVWQa4tUTlq6SsAvJAkgLMV18qO643EvVhmWsm37Jrb2XaiDydgNQ65olaacS8nH3YuM9xDhGatKuyo8BcQAJdHq6tRHJpSUDG8awLuGckKQ8h2FHPeztkj5ooIwb4EvmHcOTBn1KxpXC4SD6lS24Ob7l9G4VB7udxDrYyFM0d22fh-gdRcd_axy56CTn_wZIyunQwIhQXHAkF-A_tdsrhHLj2V_hQ2ICdPmtZ4ZWr7kGoiGW4RWr75yd3pqzehr64pDXn3HTZ_a7vyMSSvkQFYUZfTRU56cE_09q2MZKfGAd1_X3Tqvh2EJwwXGXi7NwjRt0deK6TUjVmLEFkiRrROGZ1XcFDWoCQs9kmaHUAEWqDKaJJ3Bt6PstG-ZfZlzSouIPpZ_aUzwIuVkYclzT_d6PPY40m304HVHoonODHlNYLYlNaQCeH0th0KAYQmGvt6JE8p5j6UYrYPCT8zXW5YVppuIqMAmT7qOwz1GC1DAFkZNkBVc9QWPeK-38leDqbNzNKUSsBLom1UwyBdJE-6GXX-zPrUr229ak8PT2VrrkiMvKoR9oOqdn0oMlRkCvreOvq7gwUHRy-yhkLtSAEmD2PoK-hPCefS_aDsLs-kscZOY73NrA6bhCAkh5KrUylk3-LKWJLXooZoCvu_ZaG5w8Gi1tBk1F3oa7WdXGeTnl8oxUuDFoiuLF4GASVmGIEMxbrSJhrpkXkZdpWzQAWV6-XK7owSgy8QBkaEy5qu70NwCS90JpvBTSmoe4EDvVwTlgpkCmQrEItynZ-Atma_L1TLHobjJcYQ7muZmFXhforiS7wrga-8oGy9Jch4y93xwLzWwSl6UWBmgqyhbqUiB9wAtituqlulU68WN8iAOaLR-zuPkHJ5TXrGyguReCAwWjEZXFjqtauX7ueoEdb-fKluBVMlivCZS0fXyts47doGzozeKaZg1ahDKxudeoZbGQR3fb6yHeLTM2W2iuk_Tpwp6Rc4Qj6EEFf5k7mTy5W0_cCzVOlv48Qn55OrnhFb8myAqtQ_8-hDt68FVuA-HCfY6KnByNHTBvKLe8Bt9BA9uCVJSXhTytauMucZXvGluISQZFKxEYpwrUkAtI4dWuGUEPaLOGv5oFrtW5-mBLKyinUKhjW0o5FhF8juCWkQ4Tm0IFhQcXasnEUDp3Z--xJl6l-lYurxa9nRrbYco4qbMj6fxHcDcjW4-s_3ZNAHFvgmLJKHvd6dQo8lzwuXw2ZO6I71htJnhPdfo8456tiiH0J7qc5i0jB5CUnttTupJMvGOr5DyrNdGzT9KfoJ4mt970AaIg3NDAsLP0mnZhsVJSUKeLxkoUd64mkhnr8Hjnjbk06QEUmVtyO3ha-jFkCVP2GjwLE2L9gWWQ8pctXuVAtHn2umpoQbcNELKgv9P9rK6eQNTTeVgz0ly8N8-P67SueDSh-Bf5nrUYmc8Yz9hxlYsemremEjftbCpODVpgnpnDINEnH6jELjameMmCJcQDzDIaVLNMM5z71wU_n1RDdWdnBdUKTy0B0Qqds9DHLyP6RGC7NQHO8EirWcpiMtckYDB6M_8QQI_ZQ3ORgmzoWgAOSmiOzlIiT3XNc9G0ts_zZigU4iIBm_BBXYa2PjpO5ijMiZDGybqrpzuNChKAY9f2XFl2YDt11cR-SUFV56dL0Ebc3BESJQ-1CpeODHqqLPd3iRWzUyyT6UVtUo-x9A70HbnJ142QAo0MvZ5Sd-q-TQVrKDMUvXZRQzQ3x3NISPPO_EIOXguwa9EjcLRpV3ldSVpNNTo0O2kQh8HTyH16kGw4RjMT7Qn_DJWpe9lXPwQyI_eCzacR_FwiOOUt5H2cgKgAI-TSKnUm7oFm9lTSPf2FB4LmympN96zX9a1o8YsoYKMnuGypCCn65qblBs0ftTswO2gvZrhjWX9MB7ZlAaA5MgHmrKcLb5ICe5XY3BgGouOWBWkjES-tjAogrUXnBAN7l99g5rkAfAfZvL4RX2H4UGvxHqRUrBME2IIUGKCK5joD-PFZL6x-75S7Kj-vLm37UXglF-RiN3AJbrFce7vzbu2GcLgJMwd4GilTycVUNCIKyJypMs9RbkeUDMeO3HIIyjn2zwNIQGWq1ZSL5yrkEzpBXd8RMCwVdpegZYzZwPhGH673xoj4CV5P42QRZ2qJcjjtGf9E35FCqCWb9ogKV-MWYriwKOFmNN4GKGV39aGQj4NHCw7Mwth5sD6WpoE5BaMbMBEfy2uVTfaqT0KZ7fRcgy1hXOm0A4qDSS3zkfsh4h7Oni2fjPPkwuh_DLx2Y9T01sJ1vYd79Gsmxlmzor-TjED3I24phJUgAanOxxcRD8GIojeiRTI4Tgj95q0SfkCzy_DY-wD6IIHjEyL4D7JO0Y7U_UADJN3ZAoKHWlZl3OSO7nw_uskVM5FB7jqqDfr5P0YBMQMwWjVuAibgMnt-9rO4NPBG1xifL1GwcBvFW30cgxE2SGFrOcPqyc8HS6vc55DgUS4kpPOkN8sTOP8wBT2_jEKzRde3ByBDa7Dyx1hAuet_huuNmYU6mGRcmKCqWc6qW6FJH4sjLSvUW9UoRDMn3ZpVBsHVtdiQAB8RlegtJNyr61gexss0Qla4InT65EbkB1carYnCdeQGyyMUKk-YD79CapD_BtRnMJ5twCFampVIiQn_tANwdMEX9NFGgQgnczJ3MzbrEHHA3iBhapWxWD65FEm2zYrDUaeMNm8yH60RuNKyZ7aZRo-sGKrYWMU6bZ4YwuRbqiTNF1NPizcpGrj_2k_hT-TcbtPnPNAMdXzTK9EB2QHfsQqmsqOImtx8q0vekrqCSWjN4H1xRHrYPZMvlvTdtDdhiWI8WSatsPludVXeYxqXxOy2XulPjzkiVtpgvMUwTGH_Yh-kUP5C94ZuZ3WDM93Gy9AsDdaNScSC_m0Ho4inuGpe5b_ctx7MNCdhdzl3PXwl67Fj_0E3qkdmDS6McKk0S2fOmzbvoJmaL7g2g_QVCCpxA-4vmmecak3J5Me_2EW6sHd5Zn1tvcFezrcmQfcZQbax7yoyZk20egkaQqpkli-AKjSUKBmP0aSR7qrkvAe2z4mgl4yP062GlC2YI3r_8xedjsg3jWRHyyJ4D_7Y0rtmAjVwhi1tfl0XK0JCPKVkwTTCBSbwVQo2yR_zc7zIazj9TDm8qFK2G_Q3QuEqz-Cz9VGQu_BPhrMaH1WaB-2ays6IUf3s7_gUavAAynaEM9kGGHj_t8zkyqDTjalRl0JE5T5PxxYdv1G9s3MU4B0yB3H_LhcDyjVAb1WzPZpcAxxrcBdpLVFa36ewDzKRmVmSONJd3fLtB9_HJhXgtFIIRgAKuhmRf1hMAG6qaJSsJNA3HYRZigy15OMe4RgkFSl1aj3ScrJc1-N3Si44OwqRIJGrVV2ihh_euAE0l_ImdW_vEgpRbgfMqBiDmFbxgdcv2cT_MaOMoFX3n_V1GrAuBzjrVQSBbhgau7nWsynSK4h9781waWYMAZK4LrOK0UQ4iyugcpj0JNO9k4WvB-yRR9E2rtIoEC6Nz0=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":4,"output_index":1,"item":{"id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":5,"output_index":1,"item_id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":6,"output_index":1,"item_id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":7,"output_index":1,"item_id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":8,"output_index":1,"item":{"id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645","type":"web_search_call","status":"completed","action":{"type":"search","query":"OpenTelemetry FastAPI instrumentation capture request and response body"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":9,"output_index":2,"item":{"id":"rs_00e767404995b9950068e647fa69e48191b6f5385a856b2948","type":"reasoning","encrypted_content":"gAAAAABo5kf6RMw3cYGYW7G695hqy-Lc0VhF9ct5CCX103QsnamWXw3ZJIXPr3hTUifg4sJ8liWn3c1CwDSRK_2RGcxOQYtowBwX1a68VoxQxQYrZnmnln1osLOAYnxvlEczg7IvnDJAtllxXHKmpvMO573_A-r1o6Wm_BP7TBMvQzUt9pIH_RCkpFM5xI93ERGaKCWE9RA1dWeXDFUccGx_lOJa7eif13UlaPTjICzlVynuHg4QVejjon89TBQIPprkAliKvQEANSEZMBrEexL1Ta1jU983iS3nyIHnAliDiyZgDWJWpVZDgcbb5keDwIgD2Rq6TFN0y0zVEERUjaAribiUoXSg9rP6-527IycrY9AFQgmTTCU0Pud-A-DJN_IGIK4MATMOOjUnluLnZMTE8pMv1aqlZpJV2zgZORgNucV3mOIeWIjekuO42TmIeLvBmrvkMEt2aZAayDVLysbvtxHoehn59q-rh17eGfDvoTR-2N_MjidI9WjWrBcH42OI9SVGO2Cyw1I7Z78PxPFn012iGJhyfLfT5e1xjUecFDPgRZjVz-Y1j3QVSB8tTFxoXhlrSNxrdIusMVR4_8f8L92aClGIHtYwm60lQx10Mo_Zqlr5WlhpCWf8EHMAUSOcqZ40TmHGUrAhHbl2XsbDHynG_GC3twjCOIjQL7JC7pAdojRCzsIVZY16L4eFZXIxxGNM1oYzHeVTFsIXE0bpBNlugY_FgwsliLFdv_inoWgcCg7Ujvu4Mg83vXCfwBtLCyBmu3_xKCoLcOryErEDMq3jp_Siwtepgs2gHXkJgu-ihE5TfoUodkM4jB44OBivPY0dwS_r","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":10,"output_index":2,"item":{"id":"rs_00e767404995b9950068e647fa69e48191b6f5385a856b2948","type":"reasoning","encrypted_content":"gAAAAABo5kf71KqiOXQLdnpn0X2dce5ZXQV3HkgzKX8X-CIy9ax4SrMhk5x7PGHiHFyqGHhXQ9VBYdbmpeQmcXGtngPW7v0BPz1pl4HVfrNGG2MVFgqx3xVgDR8IDDBtoX10qhGFzQOmp_V14WZWPvdwP7irv9LtSXMdDKvJThXM-s4Kp4vdJCPWk6PeLmQ-3bh6eXbxGB9jK5kifSBoJfgicrgpfpUFmdispLXzxdTvyUUMJDjg7p_AgdKwupWPRz0I95d9Fu04GdBZfJn2bcXbUUgKeKKqpOeQjmnxD7Igmce3W0iD5icZq9d7ny_dIfygoel_9JPBNIKJ05TRKVY4C9yiy8VJAvywl55gUdziTWDS5WbEggs99brazkEdWTvqhFcASgqDErzCeppQ0ACWy6a8F2Wgi-g0Iw_MiaB4zvA0VSDC4xOduXhWB4BUJemEp7rE8ztCN7FTKtON98gBixqyV4ueEUDp7SXwkRJ_d-IJMh8w12e3eTTqKs3uljEPDBZXnvPkeCz6GUIuKkEQi5mk4qLi-vZmH3gvhOe-dKJNhhN9CI01PK0bmwNdXr6loXsTmPhsLlp3Mwc9mk1RWvXm0TbDSGDeH1a5UBiqjFI7qjWANOzye5qeFEs-vqIQnC9SBDMMDlFlzLv7LJjsTnz7Q9TTuUx1sdPqSu-lLDM5OBaCdykS4gQzOZqomLJNSb_lMKrzzylpo6bYmV40N_jZkM71gF57n8lBYVmS4t-JidvqsqW3kVlGGDmZ38sA2I7jJmI8v8v3Roio_uNCzzocWCtbcPqgjNsvDzXxTLl_WTjyRfHE9Qdrj-KY565D-ynxlR_iCPFcOx3cwdnprXFf08Jx9WIlvVysra15pjj7WH32t4j_Bp7g3pI8ZVYGwIb4US3Img9D7Plfc3rAWI0d-RGaMFjjbsPzCBZeF9JPDcOwdGLa88ap_vZWkdRScEJffjZocv7FZywA_VgNlGv1S9bP43EuaqEXmF9aNLkMxsoaLLFhRRsgryfxk4jslG38F8BiuHhzxASD8C7f6WNXZVjJL5jW1GkhuYB1qrn9TpXrLR8mJY1Kzkt3dUtNhnIxGKzP","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":11,"output_index":3,"item":{"id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":12,"output_index":3,"item_id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":13,"output_index":3,"item_id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":14,"output_index":3,"item_id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":15,"output_index":3,"item":{"id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":16,"output_index":4,"item":{"id":"rs_00e767404995b9950068e647fd656081919385a27bd1162fcd","type":"reasoning","encrypted_content":"gAAAAABo5kf9aqabCKTlJxF74hcPXQKl5xUBRpxs4IzOqoQ1YUhaTBc7MowYLJQHHXnWYngx3hqtnk1ROPfLNDgQYfngIadOTZxwUoOrcEzIIEnMdC-JsfDfvyqgBrE8qDYNYkAFa-ml-G7Kuy2DRXhqrNljDK-1ILGgvuQALcWh7Vyd0erIWhbJORFeS5-OOgxCVHliruj6JHiGRYeJ-ixdQLrp6yZeRkq1Ow-1FMxblbv0kRuGYDeM5w0K8d8aC0xbFGiJMYCZonGcYG6MqeeF4v2w_9SBOGd-ty6r34TftOOLnWMmYGBKaBM0i1EbTBYrUH0gDwjc5SgiDaMfX_yJhZvtM3yAIk6wrE_Vbmnw86BrV6ZVNNIZjBlX8tQe_-k9qhr-y6Ufpmk7eo1VR2_05ryz3-7zMrzbgkGzj_qWXnVXBRkt4T2cwyHca3FHr1euqpJDOjSDUWIEs7zNZ2WNlCWPIZUrEbmSyq7ChhTU7PECR00wqvzpFSP50jyr0KA0L-hQI5FdAfrEanAOrPkp65aZn6ni9JrQxe8FLRjEszQ4oNvqHdWfHqsFOOhzJ14mLIm9m_2QUDB_crkJkK4o6IXYs7kdGfSzvm0z4HMNSU91PSNdrrKDmu_JTNcV-IFNuP_CBhvyEse4s5Z1IAwYQUxfXJb3mT-Oz3U8j57nNCLb0EP17xMujNuO1NFH5d-NmLWqPPX8YpithcX4jV1O7Yhk6JjsVO4eaFuYz_7BXNzQL5kbAi1spsrYy57uNIO5FG8UZTndcRRPCgyyA1AHCuqo2KTRr875fYgMLJcdAnkszV0YQLWCmcGmR0YbPCNdW8_CGfq-","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":17,"output_index":4,"item":{"id":"rs_00e767404995b9950068e647fd656081919385a27bd1162fcd","type":"reasoning","encrypted_content":"gAAAAABo5kf-GWE79PWmX5BUJGtdfBs_8-rH3_mU0ojB4l0DH9RZkOBh8uXVIGaJfTwN9yMTzxrSwC8kdSKuYrKAvpfn82Dq0i4KU0q9ZlA4mQuVsekJXqABXyBxSdR0Zt4k_hDaNRQDiSdl5z9qkDhupx3oD0QzTN2vJF0EntJpnQtiOkBlqdiFJG63olxaKLcaYk7w1UpPXHFKHxv_clxSb6Bhs8XJyquYVcuBP_qxal77tkyepJ4HQI46B_bnwS8LaIGjZ516MYzxNdIYPPc2T1TSACumXTsTLseOL2LseiClD_fSAEuCG_g_lnbtkMa95pz6-fTN2dmJaXcg05MSjX8YasVt1lkk8EzF2L_lhqled2ht2Np70R3Ykmlv8TE0kzCx1otr3WEhdi9xarx7pbISF0HuguVGp3V89-Vikge5LYIar1sMOOOFKxjVBhPvXqBB9sw6JCknfXKorDAj2shu9vGrPHX5YVVBvkJwPuSO1c2oV7SlW96Oy0lt3V9subpr6XqFtxO4QR45Kv_jLCs88_0W5ImjXA4vfKzWwP7vYuzWVY3xkaXXydjH2SzRZ3PpJBFzoNF7QjbnlOBlwKaPnPyGnA3gb-m3dNOYCloaa1Z6XIuNS1zlBHlJIHrJrEKMAtMJxBsmmjbxwu_nMbvdri1MzhtqTwySEB5rLTRYvYuU_3tnDbk1iuoB2kcDp_J6AY5eZScwsiyf-zWhTjgAyjdxwYytFKNzL3j1RTHwefI3U8JCmQfDk7kzfrBK3a_1Jm7MW_LnAhD-Nf9dbeNEBaVI-0LnH3A_xFMYLFOcINGe-5SoUYV88CWMIERuaSuPYoXw54S-fqUmDVVT5eNOFEKXWvuiUZDJBWYW0RAZzKXB2uLowcj1qBOeW2dxUR6s5fFudt45OwA7aDi8_ReiyC_RJ1NuOOYQ5YqxsAYZt6-NXALhZKgOapsK3ui6wCeaXGprG7IohQ1jj7rKUcRhs6JWxPSG8SaG9P8D3p_ahdKnSq6rXB9MpWmbtOSVNOLQk51RnMwwj0__6pHcJz-Tk0ZWr3dHBXbNKwxt8j89XF5bbNChGEZ458LcUE6EQQyjwHbqqxQRe325CHqhwwjgh3eXig_en9hHbDRyyYYFFeG_7ysZW5o6gO8QaI3Lo8JW0_PaY6e1um328lICJauuWTDKIWMFOUEahiz_eveoSHWJR-_mi2KBm4OQZ4y9xeffPmFs","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":18,"output_index":5,"item":{"id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":19,"output_index":5,"item_id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":20,"output_index":5,"item_id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":21,"output_index":5,"item_id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":22,"output_index":5,"item":{"id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":23,"output_index":6,"item":{"id":"rs_00e767404995b9950068e648022d288191a6acb6cff99dafba","type":"reasoning","encrypted_content":"gAAAAABo5kgC6pJVTA7kgbUftG9v1gX2Q9F-F39EeJ3t-43LUIAqjCjd7SqHqUWCBKWKJVZbucJs0MhBUziC8dhRNcp7KIsn8PNoO3dTjMcaqgXLmHSl_JWLYUPz9M75OhMpneCI9uMGycJDkgzMEWcwzyXGnPmGIupt_mfHAVNs9khlamVjjFO7e_QUT9s6V_bQD5dzdtwwIjoqZEjvUjqrbO-yR425vIjPTCzdhmHWJRCoVzD-Nih8_ubbkQfh_4i2zZjZhaLEuCqG7k2f_y-XlmSfLakRmEBAgpHXmkiR8gzjY2kYsBwL9jGrC-fFLez0bnz6cV4DPfI9c6obkWlI-t8YXI7DUoqgBbbIzqrviqhWkUjdqoCWFuFwd-3semR5RhLBlmP5tLJTv8Gomtw_aicnY2UpbJvnK6vj4UoOlLotfYueFCyMIgjKN1QbQh4qHwL2iO--eg388K5qQ1lBRB6VozpU12SyQQnqKzU7WDAsxkgdSgvoOY5P7Nfyynozy-Dts55htTNgaHiozBzzAqzGoW8J8t5hewSbbIDObPIst5QcmcnCW_t5Ny-rN7GEQyVn4WYXdMSP86oksp0rzep3DHK2YsZCqkEh3GMzxhHOwaT9CI0jctPBkhf-BcF043tWwO3C8lPy5aoXc3JS9cMLhbzL94MomR9D0dsj42t1OZr4bdjkrqwz3d8aK96lahPeufzKqj3LWQFECR4NVde-qgpNF8hnRoG_GBI61M-7y1nQNkPRpOLkxTnJ8vrb_YLNdXJ7qtMOUbfq_yhJeFvy8CfABuGHz3K_ykKsgKvRNgwNqspoMxAJfbVVdn9oPo8RNxpB","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":24,"output_index":6,"item":{"id":"rs_00e767404995b9950068e648022d288191a6acb6cff99dafba","type":"reasoning","encrypted_content":"gAAAAABo5kgDMdyWJm_4EBrnLS26HcJ_EvaKlVyP7dihwR4bkE1kXTcQkdj8PYFl5tw9am6ZKst4kTo_eR_ho9j0wrT8lpwlPyOvuF978cGUykoGJXGMlvBIEt7oBgrQGD5cchOr7uNqTfVo5YLY0ywfmTanxgyR3CD6xZiSrSRuG6e4xGfa_5J2FK6EOqQOvXoXI-KRMwXTlrqh3nMUXnUHCAZAQnplTHHg61Muu-tBIbG8dOXvvIxRJEomIJJdNg_g5pRhCvzqfzA7MR5_oBvQtkn_l3U_mPzvjfIITIJT4iCsnxJbYNE-XP8mokgKq6zZNfL4wi_az44_BshnwJA6fGrAmmMSEMgkFli-XYvTEleOqiHBttqB0ESRoztYo0Pxc_rn5AtsvvPtQUYYapaV4qlL3O6_Q95TsXBlnsmSwjTMSIEmSb-X0BMzMtoYV1srbiQiBq_LXwpI1owWH8rJsw4x9C60dWk7xY19_6bdcYE4QuRPnPmyO91pl33iw5hBOGckfKsMjERAmbsjcHSczOO4xa_30EqrDyx6py50KJQMFzFEdZQXdfM7lRIgbUW4ixWw8YP54vfVgSBNz9HsfRfn8V3jvQ0saIMW-M54aISG6hWs45EbtRv041W_J3SlYcDtOcjsusR5c0vd0Nl4VIECHLQc1ULnqkJZSk334B7oUppZNX7hL_Y6Q3m5mbqgbGsNRo40-W4I0lGTDBnSti4-BBEgXCcjZDQBpguyZR7F-sVUbli8kublzIkxzIYxXLpTFKGMNhBtsfyHEKXZB-I_bCy_rxlszOW3NtodoIdavaVbmbpCwXZAzOE4TJPPlQ82F7rEAX-sCtDzzqlvbs6ZW9AS_3r92Y_kx_apxgcl-uQ16pEXPlwy6IQHFhKQ2m0qj-yDc2C25DyJ8oVBK2vGje0Jn2ppsR1m1knfBP4UTogTMCp5MvWhvgsZgJsRRoL_WxQP9HoYajepLrRXft6yVfQKTh1ksaYIWxZeVfatBoFrUEQ7xVcbPVIduEON8tzPqzRTUaYY6z9HHpYennJmk4Iz3psJRHqa68O4WJN3P91FaUgwx600-LwcOimodB_BUsMBKAMkKMpDroLPoLwzAu3ubU_dUqmn2Dj7HEAjjO2WdN0yAn32AgZP1nqUjxed0Yght2LJWdiiPHAKr5DAd6uq0QTUqSm9dOJI3n-HgXMVNDasRYj-DbwsBN14qUm_zzJg8X28U5vXJZ398OhxRcS5x2uZ51xfPL3wwHy3uGpqUR9ZrMr8nEeC-FDYX--QEXVvCCzJsBdGDbW6ufLmMfbcsv9eAXQJEdCk1xvorwQXONydra0Q66ZC38d1rEdfYEa3FvuqK3o8ug9Jkt04RyMqYFjziIC2D2Jf55jSJhM8fGo=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":25,"output_index":7,"item":{"id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":26,"output_index":7,"item_id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":27,"output_index":7,"item_id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":28,"output_index":7,"item_id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":29,"output_index":7,"item":{"id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2","type":"web_search_call","status":"completed","action":{"type":"search","query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":30,"output_index":8,"item":{"id":"rs_00e767404995b9950068e648060b088191974c790f06b8ea8e","type":"reasoning","encrypted_content":"gAAAAABo5kgGPV-gvfGXSZGSBsZ1tPi2vexid6rlc1tiCGxsX_wkazKNg6x1Amgex25VFLw9V3MALILyIQY4Eiqlzr0IYWacB4uVN-u-56RdJqRLxRXh4cUkB5aKIYpFoVf1CHt7ONNkClsQy6cp9pDKrqKvNYy6zG86wWoMTxyk09HRTfrvzGZq4ibeQ9bqknDm4zGH50QyqG1SW8rhIiNFATbSSakM-_4RGOGMlUvLye0dxKFRcIVUJnR_cuxmwhjAzWgU4x2AJuHX13MDVtNe_Bv73xhyLxXeYHvVb_atvto5PBkde3gXMMJT0PHv_p1U3wkCgqpZnfFxKsInEzgmgpsPpThyLKffsfYH_gjs1jx7f2Q_Lni0MFBkA7dk2l9l2HFJSsWT4rzlypw8PqUmc6msnZTF9YC2QQhYT8CUElKnOZ-D9ILd3z9mfxzgVP-4f3zGF-nEBfwVXssXtQCx3ynAGKMh0LH5XxACuD2E-h7xgP6fUmBCEtdhHjSaJ9rriiJ0fgkBgbxJkP4tg31ue1TOIWjFfEajZeHmrpqpYetLENGrqEz68TTnEjl09NyQT_jtljWUisra-h8w6E3WXSVUhet9CT-KQq-cKvyIq4w0IvR25DxACLZ-HGVnfQelfA4p45dQl5AJepsT_yRnEIuw_S86ddjnxpOsDXTvnLNcKUq4tdy9pFdLKgfxjUAUymt1Dow2PUl0C6zC9h0rVd5C-3gQr7DbKOQ92fxx6yUvYe5k7Is_hIo0AdSKqzdjLDzc7Hr7pLuy_bPk_BEGbstjPNZgpy5Jo-l5EoG9ASMx5Ykk_mag3rvc6ULU0VjPGPp6rw_V","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":31,"output_index":8,"item":{"id":"rs_00e767404995b9950068e648060b088191974c790f06b8ea8e","type":"reasoning","encrypted_content":"gAAAAABo5kgKZNRzXIfjM9mJNtrOBB9QgXmxr9lqDw4vJJnfnrxtWXUO-0XAPYIAWwBgPImyrwdNXlHHny-QNhswuEa8gbgpPdhkhREfd-OY9s9gOe05wD2oXgLO_2S9kPLA0wnB0HriVNEYXhsxk93jMwj2XmtS6da0O8lO0KvALs3HT7EtVTNTShKqlumzH2HbA3tQFiln7yxVk8C_FIfDhWpzkLeqDGvIXy2GUyUHhQ3-Qmid9KMkj2jwI35TawAMvxckbMWcroYTRXsuVgvULRoGoU7npo_YFTvjQYIHKKKnMKzM0wBef8hPLgsYOKBVwvFvKyFph9fUV6TGDqzX9daq5Jzl3PgMVKm2o43HjN9NQu8nKf2Dg_VSIbfCmxq1Pde95MV4IuF7UEFCAREF4Nc__K5P4HRvroScjXiWur4FJulj6ngOMDnZDVcP1MchELP3KWe6L4Rz2k6itK_xDugXHnq8Ev3WuZfmibDePxi9KpLnjsvQ9sR2yRstKABto_fhbTuTBuH-CjiFV2nP-HuHlIymzmK_OhwX7v6BHvEfTdiCaDQQUD4a7uo9fwCdHK7mK7CFQynRU42fJzW35phx3QJv6XR4BzxzRUavJy7ZqoptixLGI0A9_8QLvh1BluN0ysh17Q5x49Dr5Nsme9kp2gablAXNbz5PWhrHypVba-GAS9K1AFiOIOLYDrSL5o1-QeaJPT0HK8Bvog0fMBuVP3-4UNaxDHKrTLD1eyTvhZpHCctA93zPlaOh1VOsY9k8C6wAFF2BfvUsdRraQZqN3Ec6QxU3WZAGuxqoWlqgzOzxT7OzzGTRfaa25797koiBwy8J7ov2A0Uu2-GsvJKWwv3Ncc9trFH1QaigqB2lHRi-zWAUAz2Zc5fKB0BXP5NoolEHTk9__VEqOBtXMzEbHVzOtVujoX6Xz2JR3AFs_y27UTYbFxpWBEw9zUHw2K9L5pNpLDJxw5Pe6k6scSEajcRTEPXrg7ztGJlhC4SkTuJT989O6MgZDp0U74FnWs0Q89PCgAIKJWTlzXUpqr7WKva11p_MHlLUJBa_zruktgENfp4h0w7WRNdENl-sUJCqZTfdzdcBH7Nvwsg-qq85gLw73JA5-5p7pYkuD-bfk1rOECiq0pufDxDr55hoZC_QeGuGpjgoJpYeYbkPNQR3lKa8GJBL6liu8pmDNbInBkLPU0OIfhvo56shLSdifP2Qc0UnaNARgXGtVIwmgRHWy_IqJKLFB2YUcawSAv6R19IzlLh6tkJVWOAe4gRckuU8W7eBNeY3fPs6ffRE-A7Bx-RYWtjM7L5OFYuB3y2vPVSGUvAcRxmTfLqOwtZao7ZPcldM7ftkZHMiw4wdFGgqGxUTSRzYS7ESMEGLM3sAp61BLB_X_8qMDceC3Kpo2rHiem_TILbOrW_vDnuvfAxB1oWBrr3IsbHb_BDe2fOEnXT5KlJ6XWc7VhCHKi16RJH_-tpN_YHlncSpjjtkPvu15oVB9gBXJ7kvLATMygy4L3_B-PkjNgVkf1-QmOgJ7GPtxk69jAklSKHvVwPN0s55u5RuvKroyZyoNdIhiSY8EvMjZT9MvmcBgUJNGS7ZbKegVeMR-kcsRxSmJS1_png1lv4bX8x7OVONm7kSC_YBFufov8ul45veYCi6ATuGsbPrC_QKp5mWJfYjnopTwKxs33GvZ_q_AXnmRC3zl2dqcu0cwbJVYniVS1qNIi7G9lxUi9Zzlss5bt3jAvVXbAO2GH1Ij0IrNbGB7SlmJLbnochFU7HweHn9aNWTD5khgwfqwZZNli13SS7r1EmCyi8Xt0KB5MAGOz86o_xgeBkIyaxv4ZSb6wY6g2oVu2XPo1229iGh5-_qHhFvpkDXfzIyrAAbs8ZD5ScXlUUgBzK_JOJImSEP4VSlQdgMBl8KLQgQpZtsPhtdCCT8nmhFRx0IYZxs2yLJMukWyY4iUQ00UsLXtVUyufQ08xjcdrVqevdXvJpVA3GIdcktxb9U5wYdMGOPMUFmPejqLZvC8dQwbWOYdGATfGuPdgv9LS3OTpUXkBnfYtTA3P8zk3RxWFCgDCjekxVgHPJ11BCMZFxw2nHjdqFjyAcISvwf9ix2UXCb9DSdmvbjqUTaJBCQD83JZn0lowtdNwTEWG1nUln8Qe9DIn7Ly8Vj6l0Fq-jkllBa7aWUTBS1H929Yx8gpYRApjBhPCvwVX8o6Lp-7qeb9PN6vmgUDfAG-8T825gkMVwyQVhnQvUlAL3D3Mh4YMdPHS5W83YyhawYvYWRgxcVis2oEMeSvr93CpbvmnbtYPRiousGdxu5Lc0KTNop4y0T5hwHGMpgrP4u9H_JaSBUaf9CO0dO3LyZ_x-rdOM8K70m2vmYDxbEHS7I5EBCN83PJnB8_HTV2KgmUsJcE-5OdQk_AEXikVd6ujMum2gA74MbOJCI89NmC6C5Hv9OS8lzDaaJi3KnhCvqePBweQLQrWTT0mc=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":32,"output_index":9,"item":{"id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":33,"output_index":9,"item_id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":34,"output_index":9,"item_id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":35,"output_index":9,"item_id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":36,"output_index":9,"item":{"id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911","type":"web_search_call","status":"completed","action":{"type":"search"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":37,"output_index":10,"item":{"id":"rs_00e767404995b9950068e6480bbd348191b11aa4762de66297","type":"reasoning","encrypted_content":"gAAAAABo5kgLGc8_lD1si00enbgvnvB46KkRQyUMkOtLBq8HAyHqzCFp0CGUaBOXEidqCKAuoy4CZFGpoUNKF_5M-ovQ6z7ccWfzsOw45z_DYEybftvgIHszxR80FiQr-g58dNDTEp5DwTF5nzS4D0aun7gCTHU9_sp9hMGOq6POVJP7dJIEmoVqZ0Ymnqso-mDtyk0Bl77a5AauYVZ5z1cNmtEha-gqyGCx0LV0Dsb4SuoYK2JhM_AjhYsifQWq73PzbTcAAiERWLqljEnsJGaGBBvr3j8bV_OxnuNa6ql8dd2LKCh-hMfxqBWkLHDHIDICF2sQEvpW9vHkfI3mUnLcSmcVy0ro_KJbI2pbqwPjisSwTsauv17urDFFtx0hGJEwbCVYst2_tUYu-anwkl_f-yBnTSGqL_t5aZnGtK_cWyLI32VDZhTyuHSnjbgDX0-QcaLIFwC7u3pP0XcYOHqILPLtRAPR0HvkbwmMSMxh_o3Digx04okk5PIXfrv50x7Sn8f53vILtMffUh01NLEjtl6DySuxk2d7WptAzZbZ7rws6SggDAQjIBatDe4lDaY68rItn7Rc-gAE-_yqAU6BnNKiGLGrzLnULXIv0wRnOhy-afbAjn9_HAFUQg5Di9urbISnZYbwK0_om52NXOUCzezRGP1VR42_S3aTxHUvt4Q3Ub4tOfniGLeqxzckr-7V8Q0ZggZWe6ca61OTwbPAVShVreMaQZHVEdEdVlD1D6FYrvC1xRXde-U6c-h8AyS-FUTny25IUjB06ChaLBrkOtRW0IFkIqALDxAmwCfEKX2vDofpb9jm2PoDG99Ej2jC0hXlKXjY","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":38,"output_index":10,"item":{"id":"rs_00e767404995b9950068e6480bbd348191b11aa4762de66297","type":"reasoning","encrypted_content":"gAAAAABo5kgO1HDzgclsJ0nxbd2rPdcO-gp--TPErZWuWzl9b1tNkttOes7PJp-a5m3GggvshsVGOf4Bm5sgp5_xK6z5zpwoyaQVsac6roXrG4QpXIQrXo-2zZUBwnUjP_yV51JDI2Jgjb1kgcgYz65WB0soyx58CMi67hVyOP6dRvR8wfjoBPEHq_rTf3J85Oux3zIH1rQFviHzwKx01RqPeA22bjpNNoYXkZXMDFvBK6xEMox4MzQjPTjwvymDreDTQImB_qVspiqN5EWcRLWEkTSzM4hu8BgI4tW6zaH530pBL95QqkaWF4VdjQ3c6yEBXXJMXIgmvP0avmgQh6fvMAQ9XVeAWe0tqXMmZ_bU1TCqF4wPYau7hJlLaZqxYzN1JH4RHaE9f6oDAc1zy1n1aUL5qtSiCFD7TR5ADPsdX5D3sa6j43uGGmwrUfHCKnxdZcLY5r1FSHpYDah5pKbxGLWdtG3IxT8-c0RadmtIW8j3nrDy0RaZlLjDXaD-IYxsyRyZLIgZU9IvcT5VKJxGmNpWzt5HDVvv_IUAx2FtbTkPRgmz2rRVR-jx1PoAB5Gf0A_DhTt7zlAjjue5GK2GsLqYwv_BoxmfT2132a6dIo5w9JA15W8j4Zlizm5uHL8iLx8lKLwTcHL5YksNUOVp1ELgan9KSXMj87YJabHDq4iio6w9q0EChqL9p4iP7BQvpheHgdgSW-C6H7N3jQZdSq9TTKqIeYjPnP_ZHXubE3jzhd6KWe9VCENUKPXXEuwUHZKcra7NHnmEDJrZt848sw_T3Lar0mnTuuh6-mLjI7rIuxy1VLmLdNyYuWa-hqbQ1aXiqB3E3VKkwhjMOQG8CWGt2jjnJC9OQz9LYrkn-8n-R3jpOKAwhIla4wDvdh9mUFa_1kiHeNSHZjcXnWXXCKDeHs_dZwHkkQ-FL8VO0JW4jueLPZD4EfBPkcEv68kLtI7EcG2yKZ1d2shdCiFrylbAz3bg3MpeN_B7QtusgCaCKDbI5xmwd5hCR6KiUKjaq4nsOQh2y6gfjI_jdWKM6VD1AihhFtNvHDh6wrsv3q5PD5zaoLvMm9maxSLWwJpMr9dfEQ1X43Vs_Lc_PviJIdIHOuo2om-fwRy1FSknvsDSuktqC5g1oNTmcj1F9Vd2uTG48R2-c3USzK5UvJE3qsuIrzzTiM-mTArvtA0R65wni3SzkuppSvR_U7doHWnfHRl5RTzJqZGzZ478r7gWobbVGMDtNM-D1hU09AIq2dqt-IEteNXzSqluyAvdEQwgTgJrznyN-bGA5lbdUlpG70-t0aBiWplVzLVN6GmFuCO4nlsHvz_PVdx5GiahupscwERmH8rJVGP_UAfjnrcEpMt-oL-D50tk-c8hKR92F022b2QSJswabCLzX3kkduQfhWYuWAj_5FTW_8F-Y5SsawarczeT2ajbctFQyCEffxD2jEJEUOZY2wfp1XCcSgkycp6xo29Vnf-1Sk0rpzC6pQE_i_4jX0yzViQcu-YjTzwcp_rMyhEDzYPOVAlDDwdfQscRr9XoZSZ-CNhHYA7C9dn62JHP5tWxVtLxFuLqK0WNaL7l58qHJlAsb_OoWMzgeH40H9CkJuJqhE2SC3HEJBAZyvElvNCneC88YVTNqC-i0GAGyAN9KhAUkt9WDxtel3Q_2W50FEe1FIqMjrmfWwpG9oA6dMk6nrAmN1HhkOSpmEtTVcp3FcaBGQOkm7L51yG1A8FH","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":39,"output_index":11,"item":{"id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":40,"output_index":11,"item_id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":41,"output_index":11,"item_id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":42,"output_index":11,"item_id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":43,"output_index":11,"item":{"id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148","type":"web_search_call","status":"completed","action":{"type":"search"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":44,"output_index":12,"item":{"id":"rs_00e767404995b9950068e6480f16f08191beaad2936e3d3195","type":"reasoning","encrypted_content":"gAAAAABo5kgPlunNex6BuTxOUKCV2iqUXdTuaWPf158hICWGET7fM_K5gb29yY45CFmzFgPCtLcyxJeBn7GlAVj9Y_gBSY8iBgfLYkMotCGgNkuVesA7szthHh1rEgQbVEW3kOG-FNNtSNYSlwfhLVbnQtAeZL6JlE6okA2WuNUkTVC-sEDCu7tRvCvUCbd71bi4JonwQ7s2LXBi9gzJrzmuHcIbxq-owQ6ahDW8SA3hK-HSbC2474-ACC4xL_Y5rtwE5qzlSqsVGQ5vCo_A-fA3plz9H5qO_lTu9jll2df6k0iKOp2LY03kFv7vEAGNycoLU0alyHILclGfElMgW0M4a55PE5LsXglRtjyWXu2Vb-Azu2vYASuVXW90MpO-LL3n-UcZT_EPnOQoPiVLbvYaxQpu_7E6RMG9_Rz8WaOa0FRvh4R4X5_2RRGHfH8qndOJx_NKzrEeuYichJ-ipneBw-1PqIi1wp7tK2wzcCW6LQ51g_Olvz7arxcyqug4K0zuQOPJ7Q5msBWTv1lfz0No-D7LWa7ErkSZib8bshxQQrWuqV9ogHZyUXEghY7JKJ5moY-hsGYx-XpOg2KGIgwvl6xK337c9o9fLkymApc7LXArZNIWdVifGYHAZ2akMew3A20Dbtm-0I-67F1vmrfd2mSVzG49ROvf7xRDQA8bUIS6FQe9ekpp6GOJxnZrV9UOaP37H6YlMoO2rg8deFiDRK317kSSlJYiwVuSu9NGWT0qD4hsLGTdIRPQwUDShfjbxL9PVa5bklEXuFzZao5Iv0LJkSgj3jVkKCsDQ8LLoyJA7PiJ8uGCrNC1oIXuNs0RKyYKhky9","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":45,"output_index":12,"item":{"id":"rs_00e767404995b9950068e6480f16f08191beaad2936e3d3195","type":"reasoning","encrypted_content":"gAAAAABo5kgRqGx3zwhXFb4elaopbSnsU7HznDhJ6BQvZc6F45lekqR0kJF7nDtrPpt9JH9ZwBBl5xHUN2S4dlWBgWiVvQZYUL_3Ytc3AIB7kyDjQLaT9s6EFFsgIE93aeVHE1rZsX3mZm-VUSWLCNz8PxujflN_143h9JLO7AzxB5D0tkd_1bAP9lVEKuAVxdwkGTmGn7oidTHfoAqFgdFHkpJf77ka4FQSrTHVr_CGptlT7N_tJGdUDaTiNgeBak3aadpFxT5dNS21g_DoHzi46ZcguyCt6pgea_fFNwv1QRkE8Jx7qthNfCzMP6g79RAR008SwHrVq6MjQQvTNH8XI6giX75pnyt47fR4oAn3nQJsp60KlCld5vy9V5lQpvrVn4P1BBrXQX0eXqmVIOp2VQYN6dtVjHApFzJBZRb2tqzCiShK939eDoypVtr6oI84oTLlGBtOyeXSsk-kYY_wUwpqPbmCcvj0OfvzXXFzL0knmP5kzKN8KGE1Ko2sOBkRyRJ9dLUEsVQdyhN1QDrtgSNHo7QAKOD2FkcjMwnIXaNya1Pn_yhCDVl-f_jgdPlWvHf4N6nADs-6YM8kewF4VJNIEyVNrOEUkLm9_uIuJngoY9pxvgxEC1zAi2210NPhLbB8rivvOpItlW2KWk436zezajGyS6AdwDNpW88L3QBNNXr2cx_g9KFAmCmCQ97jXDRu9UYyiYR66YblYrxZ3dfc6gy4FCH4yZP-89Kt-o-g1wM3DVKNAF1RhIwHPFP5yv09Zvu3zJYYFvI6k1mUeDBJM_ipR59ja5zLuJZpVRRCPy3GQ5z4ZbjkDjftmN3-A99Bh_6Uhx8MOMspGhdYvK6x__YGp8_UjpqmIMEmqfxsbrNaBaoisMflxzejTo4tlFGQlw9JFC2QHaYN6OG0-ibNF4VR8JSmDlQ8bqUJzXKQvyPKhrcQUqCRbq9N9TWnB7YZcyC18FBdwVrYNTaWMy8AVQUzFEpvErHRFmANwoQcunIYZFVEkBOnO-nf3Qkb6VD2SpnKRf-NGWTai4H3pdbw4-ZlECKi39BWT3w2Dtrp6erWeNyuYcLPasZ8eQoc-2sbn6ahRglb9ElefIrcdw3IIqEF1sE8qsMvoVlRl6trn_kIFZ3e63dSpgmvjWpxYLJwIhtyoOCR6ddGlr7Vz3sEoiVbmp_I7T046EdyIUjBUXfut29WZ5DWpTlaI-q2YsxwLJI5Z6jEAMOue-oJ0LlG_mfGvysspU8LUL6Ls4GOvR5kuk6eyxJ4axuaXICjCgSUJvEvJCz17gMLHKYYeErlYMUgz-GD1yO1pJvsK6k1NX3ggZGWR5Ra2RkoK1h70KidhBAsXiXEFxFNAMi8E0aB5WCydEVlNl79m86CDB7YTE9LyIxrMc9ZyYiSHzLJaLJHDQ10X0KLRxvjM2Upz68u7aJRtRU7JX135cGL5K1MqRFZMA821b-p1mTuez483R3-Q7fAPH2p59s-BUEEIeHJUfc0ZDg7mAEYhNmAtfWbfG5KB6IiCFMRWCm5jeCAUm1KdokUfyEv2I1Qw-JDmmHLdeUCk43If7wgcN33sLiNuS1TMJ3BBBC4qhHrkFYa3IU6ketzYqrqb9SJcWws5xDYxV2oCV5krYoNGg40BdtMfwUAmCI=","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":46,"output_index":13,"item":{"id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32","type":"web_search_call","status":"in_progress"}} + + event: response.web_search_call.in_progress + data: {"type":"response.web_search_call.in_progress","sequence_number":47,"output_index":13,"item_id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32"} + + event: response.web_search_call.searching + data: {"type":"response.web_search_call.searching","sequence_number":48,"output_index":13,"item_id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32"} + + event: response.web_search_call.completed + data: {"type":"response.web_search_call.completed","sequence_number":49,"output_index":13,"item_id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32"} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":50,"output_index":13,"item":{"id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_PYTHON_LOG_CORRELATION environment variable"}}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":51,"output_index":14,"item":{"id":"rs_00e767404995b9950068e648130f0481918dc71103fbd6a486","type":"reasoning","encrypted_content":"gAAAAABo5kgTHcu5OJg4QP9h3bQ89VdC1knJ3GGoDCHDtyhVM20pT9s_x2EFIuYBAU3GH2stuBx8KUQO04_n_IjUDz0cWHWM_K-7A8o2neYCppxILORhbj3Zhuo3mudW9QWHSZcPA3ATtcuh4oyo8-Mp8hcSSx-mVM-FEwvCse9WYndrHYccDriMZ7CPchx-p1Xc0McS0eB4cmdWlmZQqLzp1bhbaRw7F2VYyc9Wx6Ymsh3UpgbuK0YUlcIb1teWYL2MPw759NvTiwOJHhOroQ7w_Hvip_O7bHbiPca0FZlacVFqglkIr4ttJAXs5swVUxaPWljLaD3r8eKdm7ixbdfH8mEe5HRZOe8t-H8hlpx8PagZDXVxA_E_TR0mdpfLgjAxAdIwSkduquGeFQaWMtVhufYUfFPfHGWmCu9oUOYlUxk00PJbgRUufa9xhUwNBBJvHD2pGXB3p7OMZ4xDHvmYGpI39vczQRM57LiXOrEVo5NHHNh4vV2AFAeBvq5XjlhjH9rstuZVeK_QNme2qsk8XLftvvhyA_SyW0hKyRfAMIwMVG_B2qpS7zibtNEDVXXIKfsla1SQ0bsyZoqPtMw2ZH6qciO5BABdk9BMckrVLtrLVNTSX8IwkAT72OCfCZ1AvfjjSnqA_TKeKm6WSnhuuyo6ZhacNxgqw1ADE7jEOlUKEH-M8twJJpkIzRqpyRhIfJb5H16kmgjFSdPH_8kKU3tZaYKfrTGkhgYLNHw9g8LWYs-xOVv6qgqAP66xtJJcdiRyi9nwMgOsWorcYxU6g8PCYvc2721q6oGNhVu9C-hHMejlUgC30SYdPoozAcjgoRHbLcNN","summary":[]}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":52,"output_index":14,"item":{"id":"rs_00e767404995b9950068e648130f0481918dc71103fbd6a486","type":"reasoning","encrypted_content":"gAAAAABo5kgvRqkGOKegaH9grUqGCPO9vfDWr1XwkE-Wtzt6L0t1rany58X7-oMFUxwaIN89pOPaz2k5N6xvW7MZ7NcqhyTtYC03uG2IUnxEN981cD4nfmZq8YJejJK20r0rEEGNWlEzw8g-CAWaeLaEw8Cx3VeleP1vWY3gBurvGnzuhgYn4iZqHB9ShYSI9-nkJw0JNcnI7JFpprPVZlluDcKkTLcfXYfJBgwCTVDVmkqTqNeIxQD7VE6rtzXpgGKiLDoOTEpX-4NzKlFYLw2XN47LbOqlFfW--nNHtfbWh_W0b3cM-sL_PXVSr1jccFaNMR4V7fioRjcdG155tknwzrcWIWkUKHWy_u7kaCQPbG_RKcRVQ0eTziQOdphQESXlPsVrsSnvUYOhsXOEJsbJueHZEEtZZ9wXRk68fEGw0llPcvSEPBj4ThgOrtXKlmOenvvF-WQJbodsIwbXSt3pj7H6yu83IW0sHzmWeESM0f-6SXwZmLevctlxDHAIQnd3KOqRq6uGaYv4bcFIpXH_Rmflacl3R4hlfxUcMdETlywZaSyQhomLvsqxdS4D6BS6uofysgP0YUJO9fnf-18hbyu8OGq5DeeOmdQ8HgULGeyuRCeFeere7jY6blGWtKOzDAhRaaDQbzGZwF7FMbo6EouIYdyI6gyFctN7YlwE71vthJFdlSzHXVQpibNj5u17XXeHbIeY0SI2xciz3javvMxnsgmpTo0PSkcZMYr5OvcA3IqRZl3Q9XfTdySTeFr--kt5c0BLzrdrVamls2fp31jNKO4yVZczUsm3lEQPB-dy8r1KbaGKiEhc45ZoTF6dMoh4eSkpJpwpOb-Y-VOh4MjVAjwFWL4gHgTaTQYD8trLuBBRvf8uCdbF_CS-fA-sDRe843HKHVZM9l-J4rTOGGZ1dTvqOTGv7KV5XJNoGmdJ4rX6m6TAvtPJzQNFQDQcaCuzfYcakxgIPFaqdEoH_NFIKXGpnZLycxtpNm97Ol1qkzoW_K3VNTeEMiWEgy7jC7ATUtyzpZw3o46KsOog-veyfCV0RjZXiMnZLbWo7Syjfxxed7wn3u2jMCRt_8LsOzG8CuwuaMjCTxooxcd7KNBhYxX2LJhKl-F_cFZhPg5U6ciXORcYN9Eusw3P4KDD9A-s534N-xDHSb-cJmg9dmo50IAh2yPCxTMJMK8qIw73o57tHrSab5KQUk-jE3C1ZKRLNig86S49lXwOtZQmHjVVFCKj9V9o7Hx5nPYEb-eT9YwaZBtOB8t5JOvRThlCrX5ygFix48JTF34JJAcwwxDcgB7k4Xg5aeODslV4sggehZreJ7Etaj2m7ymGARjPvnd0-xpy4TE-heP_CX4hnJwL2JinX3FHJwEpDjahNkO7D3x54UmttS9RBCbPTKL7txwpshCm0yhZ3tBZWTN9OFm1HEJbjt5DSHCBKNjcWKWQOHQbfSis8pHjz2ipw_cp_c6qm-n2GsGvMkad56-2BYVZnkN49t5j_yu7nDV0pOifl6LrclQIxB1mRnpvv2bWyeshzgd_VSxIL9x0oLnZ61XQk9ao3FnTp9cUDZqBb67GiJ65agzMd_7vTDAcLFfhBtZX-_3kJ4BLldTxEuilmCtDCHa1PrLxqYKUH3jqSSNY0ya3cdRDNScv8ew4NzEBbYmzWh13IJdwqlgIceleHxKa70zZsudLmXrcoNxBJnQwSk35jxPuYGV-jD9J4pLR3kg9nvwtiTSEYQ8rQBLGiw6c5u9V4-3vhI91n8K1DFA-jQeusftJWUsdQamfONlnnGbDoMA_Da-9nHpNloTFJ6Effeb6RuIYAP-jP9EgXx-6oT8MNoahO7Y20VXf_laHYdNBDeZuiK2EFFyrFsgLEyLyWKSZy_dpsL0qgO1H2LIQ7jSYlfs0s4JC-mhkcfxRrLVsmmVWD1JWdfPXA4rrx1uq3rtNUeZSQQDn9xhgRsDXIbk2Dn6o7tB9jIk_dz3jSwcw76CRSIOelpyc2gH0TffGZ8ieXHhDNcJcjcYpd44J07eQylwC1LxpKtMHV_vlBEgB0hB_gfoVpYXFtx1vikqq_zS7LOegLrl01nypu37YNMPy7UaGxtb0nl64wZpb1B98DKP3VTJT-O3C8UoWqkAfY5a8Bjs8jdO1V-443_GRjOAQQE-qEdrTAM1bon767biPje56J01n0HgYAOPra1RFvj6FWhR81aAaikjfCGdEl7HIfmEdBiAf80kDlgbluiG4daPGs9M6pHU8JVIWqEUQc09H2QFjYMB6Rc4zbFsQezt1x5KvGInI8f5xYsKEHMnoPbp0EfR_6NOZZfRYoEByY_Us7qUN1rZLvf9OaUxbFM19KkRog3g8G_DtC859D4Q60WGmpdzptRmNClzk2r5yOgpUiESfuLyJdya4l-qEhLJK2kAOpSxCc_TI61oeO3I7NBvJZLg2FxOpoACHjMYK7XVKKZkQzo5Z_tY6RjIuMg41ihDvoCukiGxEmpEFL0IGsDXInuviVgwvXeYIYh1rrVSdQYhq8jxTleItj-fia0AXGKNsT_8eW50TS-teK66c_yiSf4ghJU86WrU627LmvgjeNM-JzZd-HVkcMSf2Sew65zhDgzUVte8ObpPHo1W_cXO_uQmcKWeQ8lJ7rD9Jt8G5H2VIA1-HCiZfkJN_3HH71cHcH3zuOh1zjdS9Sp1TUvBEoiIoyw9iZ6J6J8bBYham8TWuilgAmOsqZ7M14eiqr0-Wff3sCAEnpJ7BatjZ0CaQr3bUaRR7zxkRZi2052ivjbFNy_dM2RqFpOl5EdopBjVs6V4eETNd1-4jeWoAT8eMdo28O95MXyXnEwHPTYTKafjtvw6HyBGRLUb6tVNU04CWtswWjNYgrz30Ein3ym8GbgNWa6t9U-T6Utiv3KUwhA4lQSzm91XG7WEEhjzPeXL5MfOOkmbrVLecWrNQzaUgPaBZ04HwIXYBOF0wLuJEC2WG6RA2jxa2G5RS_EwX3Pph5_nAWDpHCv-sk81D-k9xYXdobZlD5dCK-4TbXu0IksOmlYkWTqgAj_GMat1PMLtgNsxACR3e1SgUQnOthvz1UZ7z4q46yMM6yJr-NN4Kxud-3jV1wF2GjIaQBveHIuC7i_we4ObNlt9FL68wC6zPgz6N_Ma4f-Rhu-zQpmIoJxoq8h6i7K0AjEIV5KB4LfSeWsIPh-wJtZihoU8H8MDLq2IRFDYskCj2tmQu56ciHphYWxyS3J2PaNJPJwdPYN8g-UAH_DGQjsXjOL57WW66fabkaMRTWaUh8_S8dj_PY_c51hxFYX5pOKLoZPOshEBFwIGfIAINUK41o39ZKY_tzcqfytYtZ8lUjCVXtNHKDWbluKbEiRWoWdp0IFkViRhvE5J2ZUfc3qNUSi5-opQmFf7FUSqSl33dIcc5nOenxX2cv6bKBYl5fZ8olRGr_YFbb9hiq4bN7QQ7BRlhGEfU-Jn5jkY6ousus5n8ZMDiTo121SLtM7FYHAwlDWFqaTPu0WLg60cAFqXru8eRB2n1HA-2WwoTkdz0SPnCjMoAvqZGW2Y1Fxr9tohOZJpOm-barcwKtFvWC1tOkgf2GnxReTIjYZq6cmVdhz55Wvgy5vduwWoFLGMb2EdeHl2C-y6uJI9t57junYhsxVx84mbHCJttZRB7nq1_ZLrnNOUh8Ot97Z5yMVK2Vfn-88HcT86Uo9O2hFgO0DilmqEIf89EXh-a44NGZvXLToRwZDc-JkmeRB5E9fCjF2uuOv9mPyrFtM8LhMteUunUoETNWE4Fq82svMKq9Mg-k-cNuAbzk34hClf4-c_yyHZw0LKuySRM8jYK9k5FPfDaLPTuDfJ96JeJ5GTb35cF83Ee9r0OVe1TvODMk6t1D6kAHmRlLK3MJkZktHq0ETGlKtNmfHIS7vyn8Ok7PafSWyd6c4Dz50vfNH5INXYm6sruqXlRLTyjL9eg6tKxguFBdk4wsKgVBIgzJ5p1Xs3iDNlwdKekxzPzpHp7blZLUfDtsLSFY0Krf0L1pmzWGSGpEzWFupadiYHG2O7MBe02wibtx01Wi-KyHbeiwO64VuHJplTZ_z0KYSzkrqmWlzy9hXxSu3lBcwbFPLgO0-dlhmJbXHORqBj_dBEbL2N-k9KJU6qqHSM7t1Jj4KaH-Y-8wizK5b5t0ZzU3XEtMY_EAwsdnpqJs4pXBEliZ56KuoemB-lAnd853rwIqcQ4A1Flw88uSMvOHTSTZ0ZirD0pCLyzwW13lBUDpgp7zwZHwDR7DNPUtPpHKDWeK6YkLO5EjsPSLfwhcpysCDdHfrMF1FrcwT_WE6Z7ui5o0LEi-335c95Q8qwlGof9P7UxOna7DjTunXijF-aJNPqmhKE4fY92Auvy4y2wvmAVNuLKdPdS9fVnhMZh19Air0ri8WiXyWvqZOPL1MYL3PElKC5uk_INyK6L3S7aMCmws1zFA6PSb2FvogcDSONeNM03Gd68nJlf4daNQXErKkyt5w5ULSHamEqcT3PZDa5en_X_R4lgL_pUFmy6K70Xxw4LiUvH0gO1-v1jP5IH6aNc6N1cXQs6tt1ajViXcD7aCcUovQ2Ejmvq9EUmgVLWkMaXJB06c7ErXME7-9wqas_bj48FBV9mGwDLxif1lsxoe7pqT8xZ9QwGiKoRVb4t7nJUqLWcx3oGDgEetUMyI0VctLixoDdvuO4CIZliRm6ilMomyfNwlHdBMoU7-AhZThBjp8eA9vMz7LWIl-TFuueueodO9FZEdGOs6uBZffBYU39oImI0di6oeT5upaPkVCBEBLmwHmDEsfiaKR8IPuVjNSo-gX8JKu-GHmECGYb_3ffN4C3IG5w3RACsKzQfL2L_tXBF5a1T0NtDtrjeGI-kYFhwd4mM1HQFNKVYx_475tInEGmwbdtzYKi4OrdOnNUG8QyxnRl5tBXWwOqHvorIJvwzQHgm9jnaNMekwUhb_8fiUhm16G1qhBLQZlz7eofhB9WcfuC4dxrdvMhfGEsXH9b_gCfj8vazaJyfmm6PGnyJIK6b82dXtE6JIJfwmX-m1J7AGCaW0So98-XuOUAvDfxDtiOap7JxiIRbrmrY9rLDAMAkE3BmXY9tX-_tMQm8c2pr3ioc94mWntYbIT0QIiGpentwiHJHComvQkb9Ss3DtOuz5vtP6ImJXXOMzb7FVEg6Qc38qfpSw4XtYfrjI5EaCmPl9P0exeeHN_RrI9-VfuM8KdMrjlHd1LbgwkOOKB-O8YpcuRgUTySEDngxQYYA62jmu03Dfc6eyA4WwptQQZZt5RPiXRD-gxOdhjcNGf6WLMYTmI_pBeo56qSbhbt_tllPYbvP1NzZcKb-ssdYZX9m0eEZTqxWeHT4iay6kq3QIo7WUD6PYXin5jRpGfB6FstSoJcUhcJ5B0ld6A3ij6DsdzOy799EVtxrANZ0VEEg86O1x2ft8wl481wDAcDBObPrTS_i7peacVDWiWxYIRWe3NGXCmq6ZU9lBB1iQeOZ16WRVU1g50_AHxiuwPU_x6DmWXTofqr5VNcpMeIMe7JUxE4OF97r9J7fVomM3JoSUirPXc9Fqk0vqj2aZnqOLyq7AAT-4yqb72QpokQhSixAfWUxg167vijcSHoijL_OOxg8LTBrBnmLKTKwsbKtlSJzY9Pj4iws4tQRpH6LkNNkB0EVZBQNuqEzOy1GAeVfC3fttdV6oQLpq7ZGwt777Fw6HORX8dhqpOsaHyQz-VeWxtnZEfWLfWyIy1gouiDmTLdoNKmON8xy_dAWuEerNf0_Hqm8zbqlBGq2xiictUZZTrPtiX-u5y_nRldhSSIz5lDlDaNcSCNnlElMxjQ-pbFKQg_Zr2jGJeuAY6AuIvW9M7sDROlh8loCGzO9klwYfBjdldE0HsQhyJ8h2sg7qRaxcgGSFdnIVaSnj5tLyVtOiskFQOGj1U7tUD5jE_6nro0Np90_N2X4OquME6lqwofyMOSU3kUo-4CKiq9EF9RksWM9NCpZ59XQcWGTKqujf1cNXlXo4Oz27eRLGzUAhxY8XdTdxEtQ9qiNlAgnWMwFwDuJneZKoKxE3BFf2vXtwUdTMG3mAaLnRfqxRNb8o1QthMVoeLGVDIf6bTGGBVjOHJ5Rsdbq1ZsiQzJcNynvSfAK57BzMIOF53JZ-R8qvrgxapUM7gIqgtdajzTdl9Dj8MWcm1n6DQSWd0au6ZskpDnyfRmF0Vx5qYOwdHcWel0XtDUEh4pBzCxpXnIR2UFF3EbUYfSOZlk2c4RrdGaKApiJhrSveWzbyA5BwbmSj5QRYc8WY1Rjwf8FygQRXQpnFPsgoMuyxcverMLIOGJ2vnAPy3sQ9d8nOYQlOVD9kXv27eRl6ygYXGNm6GRr9lpRU9TFGaoLF4JDFQbQYvQvM_UaGUAM9h_X2Lnnljw4s9AFPV1QR4MgKg8_X6dQt3DKD0Lf76I8jXqdIzIJvBNbY3tAAgvC7uypf5cqUk27ImdMZ2OZoJmz9NcqTiutxBRmJbTIzzWqdwHLKgjeAz3OdTf-eNQO0qNSsMGs8-xC6xm7puBCjKIFkc2xbd3F2HZ4N91sGczTakCJB-5h3G2TcKHFZptvEGfvM06KqRlq6JG_XVgZuG1_O4im0FgDoEgpsR3_wddshPpOavxFpAzOOIY7A344aHAHXyGZtU0SG0Otbq2U2iUTSsmmnvbwTKhnh8OYyytl_zoUPmNSNdkOrQLw0T2K4DNTXyhhdttvff4wSlZ5JNgPWc1vS8Olz_QE1EuZFDrxRFkQo0Tf3mhI1c4LGp4WY-MuMXhS90rQFIsuCUcx_l7KqKHcJMxMFri7M0uY1NwBgvTjlgwsNwZAF1CTUBpnbXMk4dBkxDNM-ha5w3PF3u1JLKNxlGtlqX7QeQQc7zmMym3grPBUM--XV70NvwAKRaGlPSGLnM3ESjO5tXEH1T0ksEP9CSAPfMWk2N4B2Mr6tyyBQTY1GYLDJlam_PbnIVySDjQB9IoPYzfdI4vUWGY-rX8rPF2pMde24r6SqUI9K3hbSgkb9gq1SUNoAsuZRhelLURV_UM3Y777XdSxdCQzoh7sTFu547cwX97Yr3cBZFyzf0BHlt5mNX0WcQK_CHJr_QqYIGrkMilcLulApWXdYTb0P84ntX_LR7rrmFQBfjlVIacsAJ4YvitUS8uRKfVdG1BxlCxaUmVBJjjSYwyPCEUy3g7EyUSn7TSUKj3zBH6o1mZQTXHd36j8AEqeCWNofmrr__j0dMcg7mGlAEE-5h547MDpfNn_bUl6N7S5FzIF_5Y7Cu6c6RtCb63F_XVZm9nMrYOaww-0OAKjPiLiQ59fE3Yb9tACEgwmbcdh_txOmqDu4wIeJmspE6qgxfyadieE9j1gdI0rNWhe7saTKzzo48wGT9ljOrBRUAKmjYR14RwRqfeJqG2hHKAWoZrWAQ2QlxXk3SzVhHXzBTEmCCO_hZFd4YuhSRi65EeAakE_NyDzcsz05Ez4aZSNXonyv7CsNLc-EXrxXgVBReRKxawt8e-NMFy4monSuxBalAr1LArj3f9HHgnb3dERoqG-qfQr6c_0Al4kdm1FFsuX8vtbIPaByUPVI_fQfu3BScJZkWZEMZAoTliy78JhxKPVEQq1JjTVc0rGCcFl5iL-_s6oK9vYbgevfPTl8PFBNUbEr6cfNvikMOeJ564506ZVuzKsvTgXniSQvlk8dgDW29AGSE7dzeLQqq5IdLGqqz_fSc2mQxcAcJgwzRLT1LQr0WkHC17yIbh6MtCfRcLm-vyTuzLiDeTBqoOrH-3WOB3AnHae3nRrlwAR_UFsA1l_fme2vruv2MUY9y8DHX4vwmAhNpj1w29xEuzQpjNEMjKLJiyvf4CwzN7tQU3zUtRnQ8kTyoj2Re5EzETQ8jeF7ZWCZwmMbSQWH4xDyYj5_P96dMaUHGaLBOW-hT5fG56kCyUnV_AcnYAa55FeY-J_j0qHdUV3ZK-bizwcNIlI_kpe0GUGajdNvJZv6r4p9-BxRIDbIHidNjaP8nnyScCYU9uWfJxykcKGgl7jZeWfSAfJASkwsbbfH6Ug7Msv9i2R2HnZNK3WdlAWA4IcpHw19wp79Jq9yE--FTuWaORGD_jcjE1bNPDbQ1AFXc_naxhzV5-XRsFSa_wp4qQznepjGjc8PpczcoCcdfYobR1ZOcrBhhtufEoA_1-ZRjl3f3HPVX3rLFMrIIopS7saLTC_d3e1J3f9CQktA2zGfMKErQmyBPk-UpW1QnTVi3E_qB_IDbQNkV_cOU3doKW9hQXPb9di1An2qhOQSoBaoEnF4xxUrrlfrF7g4WZKM4hcJ-LXNOIk0lhlImnArFnepgH2outlUM1zdn99PXWFbVnnj7VTIvHQ7ZiLV3ej_Tv7oimAme2zB1f83PkrAXPZZ-TzfrFkyFSwOJs32U7VBpKjVhsd3_RxMtrm5QkZiMHKfobGlV5Wd5dmpYgdjSyVv_FkJYVd4itJYgfIh-R9ckUy25Qmy6mlKPCHPB6t1-Ip5e9ihYCJ3jiCCTuw6lGdp2quZc2VWWOm13nPA1Xol0bGIPVMFtuKXYXCRomgSomjqhm-z0NVvQrMsm_YFXl8Yiajq5mXxpDI3rkV5NbHca8mQItDwZ2sfbjtD_UIpk5lGXufs2M5xOzrdk8hSOfy3tLR1q-OkOZaE9U_KWPkcP-FESCoG7sTnVHpT18Ht7s0AneBoo_3q1qEEcX3jmGHwlerOLjpSyZRTaspteUfYXH_0NeiL2HBn3aMW14GP9VsBHAUPlynmFlxiqM8YDpG6bIAt4Yila0cCM9DLn8ja3_-tJt7rIdf7iCJd_753PG24KeKpr13wea0QZ3aBoXwnpqa-C4gbH7hKnExALeEDoUzc8vdENrlaJTYCAI6yM7lg_EZYSRhrTFqDZYFKW53I_i8YU8IfO_7R8s3A2jbTSuhf9WhxD30HvbAlDGk27LSbF0keP88_Vnru_mFuz7z_u59iwhnfDxyEmgNtwPWj7NIOThc5CdEjvTiCwMhHF6u5CxZDN-YNtwQK3ai0dnp2w==","summary":[]}} + + event: response.output_item.added + data: {"type":"response.output_item.added","sequence_number":53,"output_index":15,"item":{"id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","type":"message","status":"in_progress","content":[],"role":"assistant"}} + + event: response.content_part.added + data: {"type":"response.content_part.added","sequence_number":54,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"part":{"type":"output_text","annotations":[],"logprobs":[],"text":""}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":55,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"Short answer:\n- Default","logprobs":[],"obfuscation":"QW656YdVl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":56,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPI/OpenTelemetry","logprobs":[],"obfuscation":"3U4PZ1gTRV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":57,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" instrumentation already records method","logprobs":[],"obfuscation":"q2ksPhSf2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":58,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/route/status","logprobs":[],"obfuscation":"sw1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":59,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".\n- To also","logprobs":[],"obfuscation":"pc5Ee"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":60,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" include HTTP headers","logprobs":[],"obfuscation":"Ql6sBgDH0a3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":61,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":", set","logprobs":[],"obfuscation":"e4Pg4aL3Sh5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":62,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the capture-","logprobs":[],"obfuscation":"Fno"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":63,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"headers env","logprobs":[],"obfuscation":"EpeQm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":64,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" vars.\n-","logprobs":[],"obfuscation":"3C1N25eH"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":65,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" To include request","logprobs":[],"obfuscation":"NrPzS6H91D8FW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":66,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/response bodies","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":67,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":", use the","logprobs":[],"obfuscation":"ZnJ5pMj"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":68,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPI","logprobs":[],"obfuscation":"7sm32rpp"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":69,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/ASGI","logprobs":[],"obfuscation":"2gFmImEqUg8"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":70,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" request/response","logprobs":[],"obfuscation":"KwiDLsxL9vtk6Aa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":71,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" hooks and add","logprobs":[],"obfuscation":"m5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":72,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the","logprobs":[],"obfuscation":"02ehOPl5ojtF"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":73,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" payload to","logprobs":[],"obfuscation":"oo1Q1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":74,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the span yourself","logprobs":[],"obfuscation":"lr2fU5ljGqaz7s"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":75,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" (with red","logprobs":[],"obfuscation":"jfWSWe"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":76,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"action/size","logprobs":[],"obfuscation":"PYKcu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":77,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" limits).\n\nHow","logprobs":[],"obfuscation":"Fz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":78,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" to do it","logprobs":[],"obfuscation":"zOdlIvE"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":79,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\n1)","logprobs":[],"obfuscation":"w0105lPiPO2c"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":80,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Enable header capture","logprobs":[],"obfuscation":"3zy8uPPFOT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":81,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" (server side","logprobs":[],"obfuscation":"zQQ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":82,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":")\n- Choose","logprobs":[],"obfuscation":"7IXrNN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":83,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" just the","logprobs":[],"obfuscation":"1pPuoih"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":84,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" headers you need; avoid","logprobs":[],"obfuscation":"MQJJ4QNI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":85,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" sensitive ones or sanitize","logprobs":[],"obfuscation":"uPTnW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":86,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" them.\n\nexport OTEL","logprobs":[],"obfuscation":"MDKjWaiX27Zhd"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":87,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_INSTRUMENTATION_HTTP_CAPTURE","logprobs":[],"obfuscation":"Tys"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":88,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_HEADERS_SERVER_REQUEST=\"content","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":89,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"-type,user","logprobs":[],"obfuscation":"1GqmGk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":90,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"-agent\"\n","logprobs":[],"obfuscation":"vAP56nOS"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":91,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"export OTEL_INSTRUMENTATION","logprobs":[],"obfuscation":"xZc1w"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":92,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_HTTP_CAPTURE_HEADERS","logprobs":[],"obfuscation":"4gkvCmoHp5U"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":93,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_SERVER_RESPONSE=\"content-type\"\n","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":94,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"export OTEL_INSTRUMENTATION_HTTP","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":95,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2)","logprobs":[],"obfuscation":"TvIZ"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":96,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":0,"annotation":{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":97,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Add hooks to capture request","logprobs":[],"obfuscation":"EEa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":98,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/response bodies","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":99,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nNote:","logprobs":[],"obfuscation":"XaIX2CSRdY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":100,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" There isn’t","logprobs":[],"obfuscation":"rsOM"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":101,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" a built-in Python","logprobs":[],"obfuscation":"J7hU40MnWaDWKm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":102,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" env","logprobs":[],"obfuscation":"NGoH9cjie1b0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":103,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" var to","logprobs":[],"obfuscation":"6lVnhLDY0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":104,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" auto-capture","logprobs":[],"obfuscation":"iSF"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":105,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" HTTP bodies for Fast","logprobs":[],"obfuscation":"s3ZURVDE7Pe"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":106,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"API/AS","logprobs":[],"obfuscation":"LxfNueuvmk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":107,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"GI. Use","logprobs":[],"obfuscation":"yOzBvJyAa"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":108,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" hooks to look at","logprobs":[],"obfuscation":"GQdMrMH4MLkmK4j"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":109,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" ASGI receive","logprobs":[],"obfuscation":"g5q"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":110,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"/send events and","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":111,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" attach (tr","logprobs":[],"obfuscation":"cZoHW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":112,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"uncated) bodies","logprobs":[],"obfuscation":"9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":113,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" as span attributes","logprobs":[],"obfuscation":"Y238731jczz4l"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":114,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".\n\nfrom","logprobs":[],"obfuscation":"E8EBQ9xD0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":115,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" fastapi import","logprobs":[],"obfuscation":"b"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":116,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPI","logprobs":[],"obfuscation":"VH1Hv2e4"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":117,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nfrom opente","logprobs":[],"obfuscation":"TbsE"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":118,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"lemetry.trace","logprobs":[],"obfuscation":"dXL"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":119,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" import Span","logprobs":[],"obfuscation":"unv2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":120,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nfrom opente","logprobs":[],"obfuscation":"7TVO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":121,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"lemetry.instrument","logprobs":[],"obfuscation":"jMHMj7SWlgIy4A"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":122,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"ation.fastapi import","logprobs":[],"obfuscation":"9q4BDBkbIraH"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":123,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" FastAPIInstrument","logprobs":[],"obfuscation":"ZfamyqTfcvhQ8g"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":124,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"or\n\nMAX","logprobs":[],"obfuscation":"NTlVsxprA"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":125,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_BYTES = ","logprobs":[],"obfuscation":"sd1zZTo"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":126,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"2048 ","logprobs":[],"obfuscation":"Zsda1j71SNR"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":127,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" # keep this","logprobs":[],"obfuscation":"j5DU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":128,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" small in prod","logprobs":[],"obfuscation":"rK"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":129,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\ndef client","logprobs":[],"obfuscation":"nvHY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":130,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_request_hook(span","logprobs":[],"obfuscation":"USmXfXueAuq3wC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":131,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":": Span,","logprobs":[],"obfuscation":"Ws1OsCUbr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":132,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" scope: dict","logprobs":[],"obfuscation":"4iFz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":133,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":", message:","logprobs":[],"obfuscation":"MOHXRd"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":134,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" dict):\n ","logprobs":[],"obfuscation":"FYlN9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":135,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" if span and","logprobs":[],"obfuscation":"zzyk"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":136,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span.is_record","logprobs":[],"obfuscation":"z"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":137,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"ing() and","logprobs":[],"obfuscation":"fw87omu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":138,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" message.get(\"","logprobs":[],"obfuscation":"Yr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":139,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"type\") ==","logprobs":[],"obfuscation":"fqUD5uU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":140,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" \"http.request","logprobs":[],"obfuscation":"Ka"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":141,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\":\n body","logprobs":[],"obfuscation":"R"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":142,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" = message.get","logprobs":[],"obfuscation":"6U"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":143,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\"body\")","logprobs":[],"obfuscation":"L3lZVjRl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":144,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" or b\"","logprobs":[],"obfuscation":"Fb6XQEIoBL"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":145,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"\n if","logprobs":[],"obfuscation":"UQY0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":146,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" body:\n ","logprobs":[],"obfuscation":"jXQYhCBIvivXgj"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":147,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span.set_attribute","logprobs":[],"obfuscation":"2oyOpzLJPthfZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":148,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\n \"","logprobs":[],"obfuscation":"lucm0soBA3V0L"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":149,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"http.request.body","logprobs":[],"obfuscation":"S9olTEyX8wJDM5m"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":150,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\",\n body","logprobs":[],"obfuscation":"OD1u3emOO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":151,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"[:MAX_BYTES","logprobs":[],"obfuscation":"WKrMe"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":152,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"].decode(\"","logprobs":[],"obfuscation":"uqZx7I"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":153,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"utf-8","logprobs":[],"obfuscation":"XSqoyd89uue"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":154,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\", \"replace","logprobs":[],"obfuscation":"l7Pww"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":155,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"),\n )\n","logprobs":[],"obfuscation":"Ak1JmuUaWvBkZ5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":156,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\ndef client_response","logprobs":[],"obfuscation":"agItEfDavuTZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":157,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_hook(span:","logprobs":[],"obfuscation":"0VAuw"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":158,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Span, scope","logprobs":[],"obfuscation":"YghN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":159,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":": dict,","logprobs":[],"obfuscation":"YibR257Bu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":160,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" message: dict","logprobs":[],"obfuscation":"38"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":161,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"):\n if","logprobs":[],"obfuscation":"JUf91vW"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":162,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span and span","logprobs":[],"obfuscation":"Sb"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":163,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".is_recording","logprobs":[],"obfuscation":"Hkb"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":164,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"() and message","logprobs":[],"obfuscation":"z1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":165,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".get(\"type","logprobs":[],"obfuscation":"yKuYux"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":166,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\") == \"","logprobs":[],"obfuscation":"1c6fxZTMb"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":167,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"http.response.body","logprobs":[],"obfuscation":"9aZ2PGrpKvVn13"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":168,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\":\n body","logprobs":[],"obfuscation":"b"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":169,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" = message.get","logprobs":[],"obfuscation":"x2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":170,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\"body\")","logprobs":[],"obfuscation":"Dp8N8Iah"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":171,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" or b\"","logprobs":[],"obfuscation":"Qx2tMBpbMG"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":172,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"\n if","logprobs":[],"obfuscation":"pbgt"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":173,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" body:\n ","logprobs":[],"obfuscation":"ODgYUulNj6ENmJ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":174,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" span.set_attribute","logprobs":[],"obfuscation":"AApyjf2xz4kJu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":175,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"(\n \"","logprobs":[],"obfuscation":"gOJF0rdcPoyhO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":176,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"http.response.body","logprobs":[],"obfuscation":"jumSpqpiJy5SRB"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":177,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\",\n body","logprobs":[],"obfuscation":"sqLISm0AU"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":178,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"[:MAX_BYTES","logprobs":[],"obfuscation":"hrYZz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":179,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"].decode(\"","logprobs":[],"obfuscation":"wysmH9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":180,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"utf-8","logprobs":[],"obfuscation":"IhrVs5vsvpY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":181,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\", \"replace","logprobs":[],"obfuscation":"PEUqY"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":182,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\"),\n )\n","logprobs":[],"obfuscation":"ZueyBhDDVziCUz"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":183,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\napp = Fast","logprobs":[],"obfuscation":"R9HPs"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":184,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"API()\nFast","logprobs":[],"obfuscation":"kcdIII"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":185,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"APIInstrumentor","logprobs":[],"obfuscation":"U"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":186,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".instrument_app(","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":187,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n app,","logprobs":[],"obfuscation":"tzAvfXN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":188,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n client_request","logprobs":[],"obfuscation":"0jlVOgOrTXf95"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":189,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_hook=client","logprobs":[],"obfuscation":"42nw"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":190,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_request_hook,\n ","logprobs":[],"obfuscation":"XdokTWr5sI53Rq"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":191,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" client_response_hook","logprobs":[],"obfuscation":"Ryt6PC32eRZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":192,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"=client_response","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":193,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_hook,\n)\n","logprobs":[],"obfuscation":"gUGvAuV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":194,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n- The hooks","logprobs":[],"obfuscation":"mHgR"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":195,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" receive the AS","logprobs":[],"obfuscation":"2"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":196,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"GI event dict","logprobs":[],"obfuscation":"Zhr"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":197,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"s: http","logprobs":[],"obfuscation":"jfNWuMPm7"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":198,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".request (with","logprobs":[],"obfuscation":"Yh"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":199,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" body/more","logprobs":[],"obfuscation":"YLpSO3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":200,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_body) and","logprobs":[],"obfuscation":"Y4el37"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":201,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" http.response.body","logprobs":[],"obfuscation":"tGVrDMzxQGABo"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":202,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":". If your","logprobs":[],"obfuscation":"H4TMy4C"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":203,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" bodies can be","logprobs":[],"obfuscation":"uN"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":204,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" chunked,","logprobs":[],"obfuscation":"5k253hM"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":205,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" you may need","logprobs":[],"obfuscation":"fq0"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":206,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" to accumulate across","logprobs":[],"obfuscation":"i5xiKyaUonq"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":207,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" calls when message","logprobs":[],"obfuscation":"fQC4x9Vw6BuEC"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":208,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".get(\"more","logprobs":[],"obfuscation":"PbbrSS"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":209,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_body\") is","logprobs":[],"obfuscation":"GMX10c"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":210,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" True. ","logprobs":[],"obfuscation":"o74PybOdt"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":211,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)","logprobs":[],"obfuscation":"tEbKNEDIrbvhWx"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":212,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":")","logprobs":[],"obfuscation":"Lz0hgKWzSnbRROi"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":213,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":1,"annotation":{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":214,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\n3)","logprobs":[],"obfuscation":"aBwAhxAEaX9w"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":215,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Be careful with","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":216,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" PII and","logprobs":[],"obfuscation":"Rwe2UmHi"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":217,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" size\n-","logprobs":[],"obfuscation":"7DffUf27X"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":218,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Always limit size","logprobs":[],"obfuscation":"pR7FBMSjv390qu"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":219,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" and consider redaction","logprobs":[],"obfuscation":"Vf9TN26VP"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":220,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" before putting payloads","logprobs":[],"obfuscation":"h51TunAK"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":221,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" on spans.\n-","logprobs":[],"obfuscation":"JDdO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":222,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Use the sanitize","logprobs":[],"obfuscation":"91PRbxVnGZLvgGc"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":223,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" env var above","logprobs":[],"obfuscation":"s4"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":224,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" for sensitive headers","logprobs":[],"obfuscation":"Sy0mQJNYtV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":225,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":". ","logprobs":[],"obfuscation":"Tv2saDsNvGP0Ss"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":226,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n","logprobs":[],"obfuscation":"60BO8HvmqJxN"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":227,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":2,"annotation":{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":228,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nOptional: correlate logs","logprobs":[],"obfuscation":"7YeMGHx"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":229,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" with traces\n-","logprobs":[],"obfuscation":"EZ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":230,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" If you also want","logprobs":[],"obfuscation":"k2hW00zK4y3eqbT"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":231,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" request/response","logprobs":[],"obfuscation":"7NpO5LJj9Gy2m67"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":232,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" details in logs with","logprobs":[],"obfuscation":"gs8UaJ5wQd1"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":233,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" trace IDs, enable","logprobs":[],"obfuscation":"Z6a5MVGPKdbA1u"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":234,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" Python log correlation:\n","logprobs":[],"obfuscation":"JIOMeuA"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":235,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nexport OTEL_P","logprobs":[],"obfuscation":"e9"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":236,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"YTHON_LOG_COR","logprobs":[],"obfuscation":"OOv"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":237,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"RELATION=true","logprobs":[],"obfuscation":"DbI"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":238,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\n\nor programmatically","logprobs":[],"obfuscation":"qnwdSrvm5by"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":239,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":":\nfrom opente","logprobs":[],"obfuscation":"Esl"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":240,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"lemetry.instrumentation","logprobs":[],"obfuscation":"23c4XsjdJ"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":241,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":".logging import LoggingInstrument","logprobs":[],"obfuscation":"o7DV6JLpDWxpucv"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":242,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"or\nLoggingInstrument","logprobs":[],"obfuscation":"TTb6bDzWYMof"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":243,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"or().instrument(set","logprobs":[],"obfuscation":"gUwt2OhTADTIR"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":244,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_logging_format=True)\n","logprobs":[],"obfuscation":"ELUu76gEV6"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":245,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nThis injects trace","logprobs":[],"obfuscation":"VVQ6lbmXDfXy5"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":246,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"_id/span_id into","logprobs":[],"obfuscation":""} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":247,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" log records so you","logprobs":[],"obfuscation":"73QqvEOleOa3u"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":248,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" can line up logs","logprobs":[],"obfuscation":"47BlS9DSo91ld15"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":249,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" with the span that","logprobs":[],"obfuscation":"IiSRw4HuErGhm"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":250,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" carries the HTTP payload","logprobs":[],"obfuscation":"NIq2zfj"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":251,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" attributes. ","logprobs":[],"obfuscation":"idD"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":252,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n","logprobs":[],"obfuscation":"VJ71qk85d5"} + + event: response.output_text.annotation.added + data: {"type":"response.output_text.annotation.added","sequence_number":253,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"annotation_index":3,"annotation":{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":254,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":"\nWant me to tailor","logprobs":[],"obfuscation":"GIvIt9hZF2F2nG"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":255,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" the hook to only","logprobs":[],"obfuscation":"N1ua4nlopnoaPkO"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":256,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" capture JSON bodies,","logprobs":[],"obfuscation":"Vw7qJi4GhU3"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":257,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" skip binary content,","logprobs":[],"obfuscation":"2D08dmEQMoV"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":258,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" or accumulate chunked","logprobs":[],"obfuscation":"4qW8RFDKgK"} + + event: response.output_text.delta + data: {"type":"response.output_text.delta","sequence_number":259,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"delta":" bodies safely?","logprobs":[],"obfuscation":"3"} + + event: response.output_text.done + data: {"type":"response.output_text.done","sequence_number":260,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?","logprobs":[]} + + event: response.content_part.done + data: {"type":"response.content_part.done","sequence_number":261,"item_id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","output_index":15,"content_index":0,"part":{"type":"output_text","annotations":[{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}],"logprobs":[],"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?"}} + + event: response.output_item.done + data: {"type":"response.output_item.done","sequence_number":262,"output_index":15,"item":{"id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","type":"message","status":"completed","content":[{"type":"output_text","annotations":[{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}],"logprobs":[],"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?"}],"role":"assistant"}} + + event: response.completed + data: {"type":"response.completed","sequence_number":263,"response":{"id":"resp_00e767404995b9950068e647f092888191843d25c5285ef895","object":"response","created_at":1759922160,"status":"completed","background":false,"error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"max_tool_calls":null,"model":"gpt-5-2025-08-07","output":[{"id":"rs_00e767404995b9950068e647f10d8c819187515d1b2517b059","type":"reasoning","encrypted_content":"gAAAAABo5kg_CSDv3y6-9rcajr9ztNF7AK9MAhzPmxGVnKj4L0iSbQJ-Q2asW9GGjj-PqQroo8OJbOexpQb-Y7t1rlh56uJjv6EzdemlPhVEyyUo4cc3UKGYxJTrLsfbgEGAFwfWgokayL-nt-6UULQmzGyc3YJR_CCDJPfEqz-88vaIfc2An_oGEwf7DGiUqM318Xw6K4rY0yu34lHMFAjwk6ux_uQeUcJ0Qat_h10P66peAx1tZq0lxkZX7v5HJKCxOjejslsYl9uownCGFWyPvpk7ZwEMQt5Ww2jc7FF8WZBhPfBkSvPxQGindrz5g_Gye0WfM2Bboh-oEH5RBO4jqsPomBUXKFgLdMnA9ACNJiIbLsFDgKpc00Ro4J10VoC7AlW6z51QUli4FFoR72hG4dzOEJm0zq0Sl0xrldm4Iw4fAoql4A0jB2E_No8cVHxHdjsC3OkQEYuPdl9PMi8ALNfqKlaBhkLgenGOSXnCuD-8GhzRUCeFHFXHG6xyEeWlodZmATtbhHqm0F4tkAgGgeKbXGdbRGy-njBt_oIslXxd-_jYJ4j6ekxbRdox9xpfcebq9xzLquOVG_MomzHYx08Ld-B9sBjl_3CzmzPmV8KnVOQ1CWXLIsChniNBCM56HNfgHcHoh4IRPqVUh4_PtmRpH9FtW5-Dw2dJYuXFOwkJJ-MUzNxdVhBzjQta9yWGOxT-U_VBQdT48wLD1RFx9DOamJ65H1COwIdthnFk7l-l-tCv0_KgfQnzppQJdXVGF2VfujBfXCCNzyVQF_HahwWAS77KQnofUqtev2ahZzvMk8bDKu7q9UOod14s1AoIQx-a_9QO3NYNvAeTpz1yNm6c2-EIznBsAkXOpITmR5zWXQ10NbBjbIjoXVhU0-EIZOLa0JNuJnbEu_AwMKy4el1BMQ2kkHElCCo15DjqDTZ3MouglZQ3sb4zHfUM5pc0nGGk5YrmZbVt3NYgtA9CuCDUibrlQgLluFd9jSzfbgoSNU0F5cQ_WKa5CaR0IujnwUPKpkE5-4KVdlmXeZ_3FKhSMZGclMVAXyl3SC-9I4KyhhtRFb4JoocpNkjhS21O-yahy89q1pe8jdbUSeCvPt3DoHSTLjtuXNLjSmuvtwBDPlOOC-sEVR8_GbzdwVq_Tkugg4wT8ErAuSrov7VenzMPn9JnG-2ttXevHxfO3RsdYlr0VxMhZ4Iid6aFkLOT-qNaVj2ld71AFcUCkqaww50k4skbFRHX1F7vuHRdK1aCTtFFc7JSTlIElHNnpI4ODykTYRjG3bHPeop41rpWTxC1clJdDfdcSosBsFfEukx-7Iu4mr1idxc5fizh4NelAql7n0N3LRIR5t44Ls2D7c0oL1kL42VKChuUsxnXkVbOIuDW3N1GSNyoLk2qHi9AVkkl29ruCgcBxPG7_UCsc0GmSDf_VcRY7qdASOxIvoALZAzhwBBXKsGvjkvoYwznajbjPhaW2qCaZdJzLE7NZneBsw4XzoUJG3BPY_TBmj6sU-7DlwtKO_yuArmXChCsAL-ItF_Hj5ROsdekSDvlxwy1wXFPJF1oDizO9inPYpRk_RhKaMcaSqQQcOOoV81uXKLvrEOHRmBf-MsfQ2-GiX6U5G0IRzgRgtCtl6srWJXMoBkmtETiSo04pc4x5lhD__djzoJi3XlVid6ahAbU_ez11afqeREP09ZFj5UV7UXWVpwwlJdEoQGe-mkmZuHMfYjG3D0_6SYhnqOc-hgu-L9y5qhjh66TyG8JN7OjYFj_VQ9oo4io0JfwiEThHYVycxvJ812XhUgKCyxz4YIzgU63Pf-TN0VOraahE4WWu8mjmbkydFDKtN4qpdBrZekyAqVimK28n0Dmag1T2PTz2IG0QLJisZGd0mqPHI6Re-qbV8fwsrq9XrbvkPc6eMd5L6xt9P7w_IMmGyXN9t9Vr5B77grhGQjqSB20PAcC-FYA1lG7XajweXC9vaIGd69KfsVWwukJko8S8RNyS9XNmQO0JAPdl35iOIn1yBpMMBBRfU59Zvqb7i3TmeyxiitOYJETi5FiggwZ9em9UaO8ojJblY4VuzrshdlPddN3av0os4izexyI5RzPa5DcHk07qKm1Ky8Wwa_g8kmmuGUns1QzXHWTQYestcG_5N0zmawYzwmpKI7LXGMBevcw-VrY3koPGAdyIoZhCMrta1wC3U6Umx-gAbmB7qCv8TNZApxYQbO8vJ4aNiG2WOWQxtVA3jTvC7DDh_gmi2EnG1PHiiTEi1jDeKmnpxW_y61wIY6M0LozLjO_tkVCkFLyBkTzECqXLubuH7__h3tFxKkMYkEq5shJa7A87MM-Tn_RsCkx8EgorBCb--g6EMJ-HRx0YVOdd_WjMrTKZwwykeJZINbVBDXIao0-tLpXP1RBSFSaKOuZMOsmjTqHCLQJGYKxdcYOiXZOZfKY3zVN4nanMbFsohGlLVUDsz12zZCWU_x78X9ypZcuad8RSA8II8cXCXrsKfeHPhKOxKY_o6WtJxCAMxNQrr4flyJA_18zuX9cT8S-kHepBSisDO5Tzzb9rNa99eRs4mC44c-07w90gsaWU3EcjAGfFRQFTaGMqV7yhbgpTGF0Yr0nT-BKdKqpgmdPCgCicAjOd9eFeY6vE6_0osdZ9B0y6I3aEiTSfcBce3VfsImm5ZeZCi_n3Z2adjCv9hnMXP1DoyFxuDD6_rRXBxWVtOpofcPdObOhrwuBBq6jHodev1cL5DTRWIUPCU2PqKdQvtTjxUP8HBjysN29Fsle7KZGgBcpGboIv8Gz3B2jTRRWdLZKeP5gSGxO9oHkT3YlbDTNNNwQQA0Vy4DkUpHJXdfTugvg8DWUOhnjThe1upVcPHoa9z4e0h9h-2ERp_JCwP9o3oRsJq4_9fLAr9bWXgBbOxDsCdYI2MHGvJ2yxzucaemiNAjC1hNkADJ84KN2RvjnxGCg6KiHVc3xKrxRK6GpSI1l0WiGvUmP0HwGEUjHYU_LNKvvd0vzNzcSdI50BEv5NvElu-HsrHtyG7CYGzJ54tRegrx5_OfYu1_SYXP_YNQDZBik64U1qAscFEtQEDmxoxfu14hIBgwmjY5eA4zDXa44whNjnwSmIHzpMe9NjJdl-LXtbYCGVHx9i2ZAdxPg_JNOKyCDEcPREnvefjnivjkuonFL3W4cgYXWVyRpMmYvZ6G5iLhrAPSuF6SyNhPhBHsQ_aHKGQednxoZmwhPI1ffJPUZowV6cQeEua3g6ageq3hBdKvBiPR5JyfmkV23UEXhz94wjpoiPlDcPdTcqlqBGFMzFLsy62R1iKCCEB2TSzJAOzysWRcK6nYM7XFV5U_WKRJfWh6zGNk3twZv657t01TaNZp6EMNoJPRh-qWKbYNs3DTgjf8uUb5eGWyTUqr1ztUr3547fQhGymPuqYdxX3SKd4kQYUk9cxUVanZc64Qc4czDapb_qzzzIpRv2sQbAFiXYAMhHNOq7XLk12DPuEGhETLCpPUxcRojQ_gUxy6q7mKpO95pkjUvEXLEv_wHnOM3GLTi-4dmulHrPmnfwn0JK_MG5s6BEyENX-Zbnw3641Y-oudnths77KyuBqSbLpge88fO47AEfrUzkDE1WmbxfoeJ_s9AeVutrYpu2Lk6uvdVBQSXNGKkAiiAz-6IHaYXEMCrrfIqw4E6AR1-jVRjFLT4Dh_HHDWv0RgdSPlDvA_3HtCGGKVg0qwaLkwBZ8wO0Jtt7Ca-PdX16UL9DrO6WsvRN4LwAwHCFnwI_BQoY-6S0zKZXqneplRzaeEcHsi-7c_X-8BxZ4oHFkGBBrzEobvo6JWO53gtESnHAcZFvqxsKX5sVc8f160tegs9C7X8WbXxDTCCBWgmDltW-mZeEuRfktvLVYvY2TU5S-eQ_etgObXvL5fbVx8RxWKd56vdmlRhzZVAZsNpkiBkcJssDCy_HjOcTYN9W1EJpMH9VxL2Uemai6O53mW9-8ePx0Lldti-ygHBH0d2zdNO7lUiXcqeFhwRgAtzLfLy9okUptwW8t0KJTHyuyuk1IPnqXQEeYD7GdL8IdiFbjexoLPDx7GfNyMhcJY=","summary":[]},{"id":"ws_00e767404995b9950068e647f909248191bfe8d05eeed67645","type":"web_search_call","status":"completed","action":{"type":"search","query":"OpenTelemetry FastAPI instrumentation capture request and response body"}},{"id":"rs_00e767404995b9950068e647fa69e48191b6f5385a856b2948","type":"reasoning","encrypted_content":"gAAAAABo5kg_Sp1yCm4uugfmhl_J3A7rbLewjS_TZUss6txJPcX4gObsnuMJ4yy85eTHxOopWB4VbGd7XTRIKmgnESjpTO-rmeur1CBN5YRjHmmifJcFAmVF7DrEfnNLzKd7BY492iflfRV9VPlV9J3zYI5ocBMvyM_MFhp8gCHEVfes_sqa28XGrGmYB57irWufiaIJsqwlZ1VUlIxBKRlO8-peh_FJlU7Jg6BeFbOI7dj7POCyxR-D2gHwRLlZegPSLoeFfWHObBOoxW-PFJTqA9awnvv3GGptXNCtaN8jBJlpzYakgUrWqS3Y-5-R3GUGTzGCRqAdew9WlUnCNf7kLKaVLw3VDdY9VMVP580MXMdlxqfNsL9xlieaZaOWLwR7VftD2aLFCuzjPFkoUOt0x6reM7dxrWfx3C8oIT6E046ispJAZk8pqxgwSaYDAfvsVBG98UgZYJICK-oue0B0IjPpwMOv92pkmXhlwdoE7y4G714HtA3V5JzPMD9UP2Rn3HGl541944qZQQjYquTk6F0wEZWKwYZJOAH66-vxG0x_34W7Tr74M7-2_iH4TUIImXEPKB0p9X_M50WvwSFkETEoTf44bOi7EWEBwxmt4RlvGPj-nBN4G3i4Csr9t54y3qcX6NTJu9dDYfBPZnmCraPQvYBvrYdHjEcESZ4jWR2h2A-iFzzr5cBKB6f1yldsxCqx6Kuny6MtfyNZvAKCWPEB9cHhggoEMzQm7lrsnKCp2sLvS484F3S37qy5XMbcGQeFzCdQVlz952510N5IF8gtIpzMFHd9qUHtFo_DqKj-zxK7ioOs2J0PxWFQC7bz4Kqe85fCakhwLUBrJ2UiMtFqB-ENF8MwvIQOk72LRkjnLwQJM8IS_th0Z0HHXqimo1ckS9LcH_CrMlolcRhQPHVFJzNyvhaKqG54fq5YR3CHoExq4AF8zYbepmhN7xotDHhQ_aHzgkSw-0cy10pL02VaE7_olxKq84HBbySvMnG3LXSJnhM0aMxHOM6Ezq_HiAYWZ9ovXyL0eyTXZUAgx-Mj0FrUaZKlHsyGEb0hkFTk_4iV9psCOvjg_y5T0Cw6iUcoQ61d","summary":[]},{"id":"ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python"}},{"id":"rs_00e767404995b9950068e647fd656081919385a27bd1162fcd","type":"reasoning","encrypted_content":"gAAAAABo5kg_kN_dYVYqoCoqiKAqcYBoZoRVg-POK9d8NoGhEBo5xGroENXeq0yu8Je7BaUvZKN_jCgsMmCno-wzFB-kn199MjhmKnaSid3JuqurWRD1SNCene9GQAL3mcGJjr10Vu16WdbVEte_23siMlMZiOp5EQ08u30pkujK1UVoKCNkCvG6xYNVPN_XYgXGr4jABNFCiHtaxpl0FG_6XcVqeWFIRlFC8wVBmyAMQmfvshmgnGlJa6q77ktaYZQeOWtIfPrNrJX1NAYIWzceSOhC2P3kuxEEThdT7nJqxV-xMSiwvOr_GxTrVT2gZVcEJPT14iMcHHLdVGT7QMRp3sMCBZFeIo_3qHCGTV_IRikEH5JIrJ6QKA--FgEaNMHBqyHNLG-O9QW9h2HFO1hvioa2em_ew42INDVQ5GY-CLG6e7qepgoA0sstJuRrD8t9k1bhH7w8tXqDCK77VHSOzxHKapSreYSKDnqXq6qNaT14bxYh639uVeT01QixfDmbZyim31DeBkW6DoI-49mwiGasuBwhWJMDThIwY9R6azSZgGH3-7hnGeGMTcbbIFPXu6XNe7QeKm03xJM-bJO9PANfBVGpr6OLyGjhPPDhFVJPhRwDtLMEEtCtcH0oYXmX1-3duIzq8Fe0TxW6NzcNfa9QC6TnWrp9EriBuweoLfV2O3PrYfjBsqR8-W38tlnvlr1doHJpvnQau6DbU8Cdqi8YcQ9iZc-YB1WCzgiUz14P7uDHDf3pKeW2khRNx2QNInMlwS4Co6ANeDybA4QYEbIzOoWOao3f_YflkQNy9tkQxX-4pscz69MTyOlX47Qu1oqinf5jIBtNM2O4A2h0eFH_bamY3yMup-v8wYDhAcavca-3O2ADRxbNhDYfe1D1jbomBig5OExb0zlsZ-qNWUGZrvgB3Dg7r09I88QXODVpspr5sD1b0PqfFFp1ESiGLLW0P39bcpDKT2XO8VYbv67_VAdlJ-RiInCk1BqbISO9WJOEKej6jUtCkIObMJmWTZ9d-KbsbOlVv5bFyIjVbp09tmxMBrpu71JvTaVfnQn_6Dc-BGdjgewkO589WXYaB55YiuNCkmsCfbfp-sam3DMYG2oN0drVQPUoShjEA66DSaCNoQqRNBIUKqCVUIIpuTQ1jUuw0eVuL0GOFEVBYjtVV9Q_pGTe484ZwSuvOXc0s7fM4f4IcoYUKjqlubcQU4LcBsbN","summary":[]},{"id":"ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python"}},{"id":"rs_00e767404995b9950068e648022d288191a6acb6cff99dafba","type":"reasoning","encrypted_content":"gAAAAABo5kg_k54K0s32OZAU2_Yl-wVjWQMqgM72qY4zVDgmF55Vpagp213AcsdyUqd4C-N9QLt26OyQaOQxDd7fEv1cIpTslnMQUPIwtCUeAHKl5o0dqFphqQqDI7UtLErXrq1mfs-V1jMeXAF8CJZNZpz4KxcnrjAE_j2fHZ4TOq8CuvaByOCvmK_k5fkrpBhCB1BxDJUZIoHFuHuDXG_GejaiMPmYVepcc-X5MqUTeWdK9X7BzMA_Cqg6bqXoZNnXr_s9UCcYR5WW48B1xCfQHF4ZQzIGby23Xj2txIIPa9hT35rzP1LE89xGh00vlOshEwG7GlIa4BOSy7iymhCRGdX4tGCLOuFJaw4wRdBXyTthFxB6DhkHUEzYlp9a1MVqrJvruFUa5pqG_X8FQzF7B_EGk9eiR8uct7e1wcrXpSpEqB_0rwVTg7mtKLj9KR7ytU-D2-fUyQZ1I_yUhjPxozaN4KtcnvJgrwvR7kndNzxYChY0mQ7wznp19NhzHX08_iVjTGyCSdU6IfKkfWahE6_mjqrBNDKQd4Z0B4eyXz6ZkBF4UrcjSfWsVJS3I9kvJ1TOAz-26SAHuWefvFEXMAQpen3UZqwS44z66CR8dFrmmkYS5EP36KhNRWGmGxFcWe2iM2LS6u6QTnir66SHTgugczKfPHYW3-GtYskI1GF2Gmaf4Xzrfa_A3hmxQTYOVBAR4fKfDylPXAuQZ8fdxlBsg3rSKKId8xLp7W6lVyN0qWZF1lROLQKnoSjZGL2om90MpmQsPPYnerhX5HMDQOucEIsz0PlpQEj7EsHjA6b4BJRDlqx1T5dN-GqmyqQLjA7CCo7oIyh6MD_SJvtbRSKMwVEkLQM-UOO8L6o1R2y4q0t_TRze96BKSP_Lz6B9XECg-B7AkAWUaKizRXV7m4yzodu83YdEx7Sc8KMQ6PjNd8xVY-DEzMWGIw1uGF0XDSNyT5YT0qIq2MfaC618LBqRtJXMAdVabKoqvFlrhyUrfCmCQgCXGyQSPPyc8zQ0gpaw7ZTuAGEJmPNLwa-WDPBT14UIs5uaSu4VZ9-fgBKbANgGcSpPdUGwk4lHwGkYzY4NelS4MfsojubmqgKSSszpEuoKTD8bzbdrSEesszfenXL9EfUJIKM93M6YWvKxivwZCFHVJmuP2C1iQae6ypXtXCY3oaUjdN6PI5_6r-Hqmx_9SDArQN_MrkiB-jjohXkKJF34yJqyxlSbP0bisEN0YPap_zMITgiaf6AVlTk1r-PqI7H8l997OvnR89F4C-7WbfZ53JorfmpS3mV-Jmx3wUW7qyZKohRVmIGFtCxIXVSLmj25plxch-Qdw6pQmgPQYyuch2gzziGj5j5Al4N3jV9BCd2-IiRj49v5D05taD82Zf8=","summary":[]},{"id":"ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2","type":"web_search_call","status":"completed","action":{"type":"search","query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY"}},{"id":"rs_00e767404995b9950068e648060b088191974c790f06b8ea8e","type":"reasoning","encrypted_content":"gAAAAABo5kg_3PnkkTC-1Vl-yPxw05UstnGmJBji8GLZs-w_dNuKjCXDNbYPGdLLKdiDxCVtW02Uqczoes6bIFeXiYtO1FV9k0nUcDvggFYPMOaVb4XfKzzJhdEdLyKEUnqzGIXVflvI7sUJ7JNxsgR9H3ywKBbs5AjoPt3HpLEZi1cLRZvbKvceiS44Y2lUhKldl6PdszZmp_LF00K0Bkcbu3UHnCq1lT5MgGboSE1dRJZdpWnUzSG-wcSset2XUkmFAT_I3JhGfYcLXrUN7YxCB2JrtyiExIRBRyePqZDLTmE9IXOv-FMvzIUt0sBTb1KbUPLik64oVIjf2w4wa3H9MohtYilVeavn7ZA-rhE8nobVwunG86yJM-MrkyDuXW5BWiDgxxbz9srIsWrIV_oJMjAxdqijVKhDTQXfbt1xb_gbdfHFBRZZ9OtynF-JoMkwgH87V-xB5a-z1xoa-qWLjgWC8sNgX1QZ54wygIIbJbeKksR06BPMejyLI31ZuiyaVb54mIarpiAuUsBMNALQ2PTRYkJHGtt5O3ZZVD6qZpvFF5oTUKNpu4fYZbiWsXo6vbyBnwv09B50mOajYkQKwBl0HRygSJGA29MePrjSX-gd_TW0vGSFcL_7W2WEDL_o1HJI_1Up5Vj1fhSIIctk3OdEsQ1_G6Z7-xR0NEMY5RHvHc1S6i0jrnhRhz_A7luMP_gpIxAN373q6RkPo0bFbNzmoNL3k3HQ9UDlziz_r-0FV2tDdX5MyWQAaQTMpl9PoSH5yvTfTgVPsEbkSKN9RBTOXpRBGflQAWf9XbJYGV7iZA6TdJBJHY2vus5opufgYg0msZS8MvzKD7YRrPT2d14_bTXWsDT4gjESkaBiwc6razfqXlkdZgpfThntxkyCv5sj8RX08C7noaplaYEgweJhTzHSNmCqBVTALPKF0QJsdvyfar2s1z2ZYh8n0HvXFWIYBFgn_DjuHhcQFxk_Xs13zu0OnuaTXNdN7q0qcpmHjo_RR3n1DpPnNDn--9My8-ZWrEePeWO9SvOZajsqwm-aEtMuyEMPY3O4muTeNXWCP1ge_z9E27dupvGn3m-tNuBqzd09KxTmsWcp9Un1agDWANcfn0ASHXIWbiqOH9qWFNFjP3QfPPO3ly6zxkY4Vhv8vOlAveAgW_nHS3NgQIU_W9fHMynwh5fSKamiJfGuwK3z8pIR0g_ZMyUtUBopgSnN_0DtVwvm0wYcF8-Qrm1W3jpuMGFPMjyBQVKBpO9cuV85RRJtBr7iIAj0OfxCB-UDagn_UjRYEcTOkmYiu1REHSD3aTTNfn-7QAz2oe2kPq_JSAYGfdANjndM6RSqdJu21MVt1nlb5J7OTIvHWYht56Mk_taCzdgS1jQRDA3wC3EEqfrUJMc-UfCY3cnIoDOl5F-LcD5D_b1rhq3UkniCWVMW5lxlc-6rnJyVSlMLIv-HjeRi8IbBb8DZLNLlVvQnVfx-9egN8rZ_PjlLYfUzDefTAWUdQFCIL_1U2d2XhmkUOs7WR2Feq1Tfh9kpw3_vb2tbjF4l2GNwdOZUjtSASL9fsvgR2AL_qRPcPeTo2_ZrxAcQhQsrjbeo27wQbYLWmbKTWpdLEkXYrSBnr1cCnU60tZ_VRs8Fek_pPLX-kfVMfUclEFGcT050ThUm84ziB-XeFLka5e1zWrRg6tbMFrGFy1yqxBZa7XZEIWEIYWHooxi7UVFiyipqFAiQMevPcJwsgIMM50YLL1yl-2VPNO3e0qbVFTvptvQY69sLx-jniWu86sBN6oCEQBXAj58nWDvhhkTN4BUJbeoXP5Kbjq3ETs-GgB0E6Sbwf-som13glZ_qchn2Z4Zv_unuKnnAfiDmbSXKkIoSPFQC1aayI4qVMjaUeRyvC54niGxn1KtrKoPTDLAJ3rW8slBo1_-txy8dRsjdDzpGKd0fuubjWmdikXVd2gtE7KAb5awascJs1y0VaP7kJL1MAeTyl9_tC6OZBltSCAG1OrrqROLUvrtFV16cqsrzldHrI78S7MK68TJ0-h_HLI54B1j3rjsC_IUk5bACpemwwQtANbN1ARXLnzk_mA3C9tuowN6mGV7rRyWxJKD0sVlSrM9v7HqgWgBdiAWzmRVYRDoQ3KHRSNPQNDTSTrpw9MM90rS-DZUumTQDQ-SOq5kdxTVem-G4w42hhIb6KkSsW65kkcZkkqpgon7OJkf3MAqA-MhkDkYsWRtY-W202jIKXmOxcboCqPTnPI9q-YCUBQb-nYgqBNjYH6LBGs7_p7PAgHLFLV57j4Qh7_zMBVX2Q12TN4wgLFxp1nk0Zg-iwBR-f-0An32ujBDvSofYwh7TgZwOUaTRIGRvJljZ9yD4rUg1Hd33ULH2--zvjmySiQLGYFDU-cbVzi1SVZCHJONxB2VgcINcYx59UZROua4GUQGC4PKkVZHFYNmfFEBNX2TU4PcpDKH-c6f3ZQpbHIuTv9CkskxdLDU=","summary":[]},{"id":"ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911","type":"web_search_call","status":"completed","action":{"type":"search"}},{"id":"rs_00e767404995b9950068e6480bbd348191b11aa4762de66297","type":"reasoning","encrypted_content":"gAAAAABo5kg_y4PU_steG8-2WnHp2BjMRNrLizrgTKIRAvGwaOQTvzCFC9x2r_8uPBu7VhWXYk7WSgzy4toG8PUYNjii_eppkdKJbKTxugRIKqHQNlcUqqoV_BUETVH4_gMfydnqWM5xOt94CQTWHHkTYqF09RtfMXORdqeTHMyAP9WjWPGjTmj-NHgOUfuwl9PlkusjPhdvN0_-nJpb-hUVm6UCJ03MnE3aber5Gw3nM6CqjQ--pgaR2WJSohyWikLB60rOQeh55XURNRI6Pt3MQHK9Cmo4Dm7botBWXCdlTS5Zi0-AChluqoBitnS2ScrksTL5RoSU6n3Oje6ySOg3aYVtIjfPtERUk466bqiZolGFoiTyPLn8hvvivB7a_J-DtZGFD_f44XzNLlFbmmM1YrW0fgJQAmXoIrWgJ49e6yTIlJNPVfnU3J3qMc-sJSpN9HOwYU9hvSGoNDjiZ2oy5j6eas8ED97kxbAmLn4j-ewdCG_jflXP8YcAsjB9RHi9ovwLKqJ76sT8rw-E5UlDBa2N0sPTAhY9tMFAlHHygc8lSCRxcOMQUmVtnX9b9LGe6gCwNWoPdbFSHdzDPSHUkyIUWgaPLsYUSmNMP2b8dM5iBPHs5bLyFG2IwT9QjdUu4u9OPCSyEy9Lpgy-pktWGyL-4m6VJIFvHe2HvMH1rgHcTkiNU8tkPzdT_BySHMqOgFzYeGf-x9ulW2TBLNCpkZmLloeQU6voMRlEYFnJ3g4SWO07Bc3JP2o1WV5sMmJozlO3KHk8h1Y4CpXRYyo4gYzTWJhfxa65Tk7Xth5JbFtW2F4Ufk-FkSi0zsLLhvPqQyky56x1RQd-kCmIPzNFzb3_2kXEySRNRGCyo3G9MzDJUU4ThYrXfXoikuW1Rj-GpOzAU6NTMslEwTXbHc8MOU5fNW7gQ48D20-3MXjobW3_cPZLjOAofWGEyZN5J0_jsEWM8XpRhEXHPcfMbTh-Az-pL8fCvNv3dH-fsj_iC1tTd9f2QtenznAlkMJK1qZoz4z2674zdbltWEZd9iu7Dcbc9br7Z9t9XPuy1XzBKGm4zDeVAnFgOh-2vHBT32BIHXN3qWfP3uWpA5dPBvw_Pr4hLXE6A03_4urXN_bUFEr6P94zN90Rru8J_sOP0x8_HOjkXMCYX4jmEo8MtVqjQRb7uqsbb1Qko8bZdDHUCQtgjnIokngyTjCzxskxUCxA5Y0if1hTctbDbC8KReXtLTlyaEVUAAlbrhwp1Ir20cab1H_DGNxjAz_o4flyHPm0DSBINUjNg1tj0sVPsqUKpwPGl8OdV9XLSjusKxrnUhPf48t25DcfXXo_CX2sZ1n3T8rlLuU5sDQUW9lNpgtMRS3oRPx8lZNXTrIL7SYy0y2CqVFCQfv6iskVYhnL98cli1sGCRt7ZdCU28sW1RCdvyq9AL4N65qPFNP5RzZaf5sO7U9CJSYwNjUbzvPY7YXIvpSvBSKGOqE1twPioyF-c7ZkkU4lPB5k0R8M7wTb7IwxGdX1NiS5FPhrqe7h58gEUIKg4oA09mHr6mZuU-rH6_3CbVIQdedfQPwgArHiN8vTBR73EfrywzPkvlXKxko_43HiQwXXj9ZaTSZKnfkU3WzV-5xjbjIOl3NrjjqFuTitkEvAZ3M4fyW9VQlr966JMKO16GA3te4jQ6yEDlgq0Z7yduXEZ0aflcZkYZYL9M1a5hGqzkb0lPvDPGW8aYjQ4uthcEre","summary":[]},{"id":"ws_00e767404995b9950068e6480e11208191834104e1aaab1148","type":"web_search_call","status":"completed","action":{"type":"search"}},{"id":"rs_00e767404995b9950068e6480f16f08191beaad2936e3d3195","type":"reasoning","encrypted_content":"gAAAAABo5kg_BfyVIs2KCOfcaV48FiJ8R8MAvpEuclA1XLwVGsC8GxGSbNacB9jncfhfvQPgMcEI8_sFCrZheuzRPpAtorOTqu76pvawISbRKLoBs8qzp-IcpCHT3WGamBqBzr1Wq-Ykp6StSreWlzcFkG-WofGPzH_bruRxEFzgfbpfuumlCU4YX6s6mITY0m8JCHX08FE4kSSH3xiUXXyiLC3czwFEAUqOkKhFJ0555hEpSs8sgUMbl7J60R4VtN69ToTsMiPoLhLV3kSyI2KLvUN5Q42iyHobn7QVgi0p76MRJEU4bXyqvVCpDTy_b_HFDRJCrIG1lyvasm-ZbwmL5bUJ-ZchSytzRtMAJF5lh9rt58jihEvi0eaz7DJ6D0bkGpMln5HF1WjIFc-oYmJaAXJov9tPayrTcE3TnlL8VV8Z3qiKXtvbiRBhiaLjJavpxpduN497ijtOnz9QYWLR9RWonLS9TKOkBqm-gwsgWPKpMwfOO4nlUG0OCmlPFXoyVOle5pSkbd0-fDMCkYVLlXzC0-Eth09l5tJemsJNYrmDqfwh44nk3j5HX3pEK1k_aFetF8j3Fgwy7DhQp76MMKZH57k9YBwIe7-vBP9x4KJQpxRvakony1hLS4N0bJakZjaij3A90pOiOgHl4VnguUYyyhPcY4TLPQvpPmCDaGWBbt9RQKriIe2WLD8gJ4xU0pOSN433lPYXYTHH_gAfavfsgrB-gK1rOeXuEvc3nWPOAAduI_MHx-ciPpCuyDeUEqaoDQmyIEcAaZg47SjWmVcb34c-tWfee0uNkLQQSNSLgiFbTerfHmLFf_UjI3LMWCMZrJoPwCZxceaoc0CWx-y1w0T93n9a_wc3tQDtdMGXrFR_5Nl3c4NiMVYj_Lf0wSawgrtiPET7_DJR6SLsKU7DgIpSyJqgi_JaiXtnN54KgtalKtFmdp_zIaBaNBKySj8l8P1Op_PP5Eo-adzdVEXcEH7ewPLFVr57PQHJRySftEox-ab65I4SFE-8eMQhbVp_WGcpMUXUxzBdSF4Id4bCGRfqxUH7a-Wx4ISD2XMfo0zTG-XrC1lAGfId7A-2Sn6th4ycMKNHb3p15dOmaLL1Zkd46VQJ6t0xPZOwB6pKS7rhd3axblSK0A2ijuA5OWstrRpXeu1km3IqRgsqOsxZ4nMLL3DLsD6-5W4wOB8Qt4FSuc30s_tccIeuEr9z70dOolipeKC2axYGKjMpc_TNDJnwBhGuh7GpipnqS6OWZMKmpXOoY7yvkC7y6U_XFovpWa5SHW2MDKf-qOuHxI7djELwGVS419hbHkRku68dqCg1o83CR__k3IpU15Gf4PcXwTU8vhbLAFZ6_eDCF2KZw7PxCNWhH5HnmZ9TwNO9DaBbl3f0VtGaqu6l2DRMT3RVD9BECOPNnctCHzozfyrIpTmMPFZyT8daW5HM2rChrFsz-cpknuj4nfCc65_61SH9fUMsMP0kk6NF9xcRmKDAeo-twGN2rAsWj_06K2QXJzLcXm1JqvNlo1diB1Z5bFnB1FNY8EtbHS7yDKDWVhSGXBz7bfVzQPynTJU0807yUE55bF0CbeFZyendVDLIBS9Zf5_q1JmJNTfMhQHQkdaJqbjrqSi-DFs0g0Z_xpcW8UCfukw=","summary":[]},{"id":"ws_00e767404995b9950068e648118bf88191aa7f804637c45b32","type":"web_search_call","status":"completed","action":{"type":"search","query":"OTEL_PYTHON_LOG_CORRELATION environment variable"}},{"id":"rs_00e767404995b9950068e648130f0481918dc71103fbd6a486","type":"reasoning","encrypted_content":"gAAAAABo5kg_rOCWwVycYlebbwzyQ0iLP4kA_eRibpv5pLFDHOfFeX-37mPc_nw3EG1bdco2ABtem4faffR8smLbG6uLSdRdWRo6V3VaVYVMZT7kN-knJLrSgTMPuqg1syrk5MW9GnQs3StCbDINGIjpFpCcrkT_wE3JhTUrFLPojMHEvSK24JvcYMKQr7t-zfS9eLitx5edQOFlULxpofOwAaTbIoGTu2dOA0BZL8LjXo2-7janXWcpIZQij3YQh4IIzvBdRHv0fh9LPlxWgr2QKdlcqA8imrBSc5EeBYWcj9uqhMtcItqGmrqyqYElBEFkouFDFTgwBvWh15Lo4oKALDQWATVYIBbx9VAHfMy2W8HVYz3B5hcVWSltXGSi63QGEdrsxqgVwntUKUhCH0r8Mv9MHY17sWquro1Ygv-hZiN-4F3SJ1E-ftaCtJ0OGE4uDV5kzYoJOW30jsym-A-eG_kj6SVV1qrmeiFRlVbcwSK8pWxux7bymMUjgsnMvEc8Ty1Ar3kVsosUG8iGqHBzwl_m5oOl1FCUjOLwV7PDMhFDKX8TFpD2OA913b9q1--RY_UfXiBvV29KZAzqQJpnP_fTqsteRujS3ocdwXa2yxB3pUnXXJnzf-rXAm3oAN3dYKXnXRj_EklNdXCPsdhGgnHevnBl3xK-YTLuMuRyTFUmCkc0cDYRic7ke9N_nsBcNHjY5JDMi_R2GVOhMlmu9BUwPqcOlzlkU31jadZIlfNTr_P-B8mxEdPhW0z4hS_vsHaay-fgNobzX0gn5KL2tF6rOEiajfPpkRiynJFd4JxWEaKLqbsoWTvqH3dI4_wLCQMB_cShgkdMUylj6q6HUcu5xjsrpTLld4oOxRVxYIky6lhT4B2AOcwB3LqJeMO9LkyfagfZZyc96obybLe4uVNtMwCoxBkI0IpGoJJKsckg0FXCDBSWcLMmyoGkpUXI4GD_gHih6wj51nD9MsBn-RE1MbaL0wQ3vyVBl_5P8qVO4l3OzK8CrtyIohzDrNKWtLBSFuXlaYgtLq9joQzULOhD6qWym-dyjXpB5bWy6ei9Bp8ao8Mxwn4EPmYwsTIRQYhBN3Ys5bImolsz0NcRYOBequG2YUNFgO_6S3tuzoWeKH45ALb1UWJY_a2_8z3GT_kygDm-4v71QS03TBQFIaTYaQqQXZnNwZ9FCWNfSyIKfFbtWY_dSsKePozwy6baq7pQRc6DAnzlNpMRGyDqCq-KufK8Jvg3Ox_CrWAdKH110hAke7BckIR3nd_QaKRvBllGDHNnyFUvAmwW5mesEb13eaxAhvDIt9DuUmQp1bWWOhKj2ApdhXfVpp5wWUUvCzVSVHrN6auqA7tf_kHXmWcmDiwkNcRtEwug5HngUtdv9qqzDbamIxPwu8djSTFmz7v03DyWC4a1P9NBhD75nQ4lVnVw4wBT6s_zQec1RlkXPLxf-75w-oQ-LDpog-bdid2IRlsXN9xQNm2j7G37AZaCx3pTnpVT5XJ_jh3V5QALDZDuPWI7U_8rI-MM6hkC3nhF2f57cCoVVWSh97IJsiV5pFZaq9iYCQpR67N2Q0TT8bDoHS2_13ihrRyjMsgzhcl3W7wgY-zrEb8r9bA1zU4aclJZ4tEQn3_2QjSEKKSVRySg6MsStIt6EC7MS2SKzFbQhM6oqWPhcMhyLUHcVQT_ZUzOb-UOPIApBdGTipEXu9zTQBdruwvZ_nSjWX8kLsG7iTLol3GDXnsCngYLX-rcZc0-WpIT9lsxEx76Ha9GHk4vVaxPoSz9erAP2J1Ll_C0r3vR1Wg3mK-0hPWPGCfYp490f73N7ZujReQTRtnGFWTPT4R2B9CZioI5o9MmpJwsg5TNZ0Nt66KPh3jSl6pKM090Orft20xo8j-TKygOOTSoO_UD4Hk1TBm9EDeD457-zIVCxGXwAtLvmNcBUBs8UpTOyxAeWc35GxP3ceUmGPPjqL1KLNFzrn8e8ujhfRlwwTXOMJMMgdt1tJO3OC8irJLeD2bjuw75TXaI8RiWLKgei902UH1D6SDYE8SgKEQHeD4eCbyptrI7jUVFLnTGn5pzev1Dg-Yaoil4HcqhSy4k5QNQwLg1oHc5XJLh_Y_z6OvHswzLNKBsZhivWlcG45yosnLajunXgoPIy-A9gylwkZkZ8dcc0czWdl3nNo-P7IaPtt1B2ExxCYzTIspFTcTD9TCzytwE4e_8YepJreiKsU2NcQFlPqYFKlr22GN-Z0BhY8oH5-LWgOCBiBhuKFZoXZaFPOqK8657oV69odXLa-5xTmy8mWdHvYA2Td2D0X_awYUnOhqOdrhalS9djCyd7UREVzwDcgwgpjAW9LfmY8182atvkhwaJ0deTWmX-iR1r0FB-iGTa-BgLkjnzcPiW_v9qCD6k7ch2Ng7-aBl-4mueNp6x63_wBpab75V0tKwdjcQA-WBsLULCl3Oj2i9q5yq5kaqD6Q78n4vvKPctwKHmaH-aPhEQn0GS200dkkxzRKHybnB_ZdJumQGg-5RS6cf1p7VTW6LAVUupoMiZ6mYhs12siSZARYXzadWlxq1THqw7p7BsqUWmXNOrJD4h2fUz5NIUeXHOnYW7Fr1AtlZyj0BAru8PwJ1zfAw2vECVRQZEAUoRG11fnaS2s2NcLLuuAJdfbQwytgBMI-d9FvegcAwZsDHa-b2kCB_2bCSrCPUzDwL9ObWUkYTd2A_F1KDq8rxVd7lvx_ZW0KGt57IZJVHSzOFsXm8khGrBxeyB8IPkNIFtNmBqCTu1mHiBps4lihvvbsIecEZdKynrlslSN3sNW8u3sCpcpekgDt171H6LSXpsHlnuNAqGnuPMdQSSLByLHo-PnzQiHXU1nlqSpNglHd9ysh3UCUZDJsV6qDPZglrbAnOtQeRzfsl-0OLjAKqclB8UxUysLKR8eZNepNBUpidprDrv-SVTGzCBqOrTad2cmP1qn-YAEdbBhMUJpfKquMx3XhXW8BZKAtP_iXxD689HhXM3zEv2Hmzw9TEZLkn059eo7ADsd8DzClpqDDlXCyfzu5L8KoCVNgAi1b4MmSToj1EegOvPQLDVttHZ0siV-nOBmuAFFti_2uKgf-hHWABI6lhqCqEAM_VHw7mITsSPhg2ewVxKDEBKNPIRlkA1j8aTQ11TqsrxuK642DxwV5lOvC0729OZghzAewTb64ZiZTa2lQqoPOgF7eok2pK8x1ra1A3w-tD5s58sSoWN7GrMFHK9Q74OTrCNWpeQP3CFNtJ-BWIdgfNc8BGSbgB_DugTFriucIuUyzpFX8CPDT8P9SZmENpy_bQeOES6AtJ8RdGX6zCiaUQYLUok8OTfzIc_2etib4LXY5ywyZLNvm03nfRtL1WolSW-HaILqg-AhPfJIivCo9pWyN-aMQKbeviOOwDLdm905eueShCFheT6Se2_jvzZiNIbSd_BUDvarvRF5HM-XEL39HUthreUUSAa4vnIYgxF7Vk3wXiwyYv8u81Vli2BrFqxzJbuqNT-PVuf5ZHsoGRMKSve7bpr1Rpoy6TJ_a8c26FoxtcmR6cMHQszUR7WxA2nfEGWs0--929wVIqbp61saOgIDs0O39FZRtPZ5aBWDazf1zzlAczDLY6Dtehp65rMRTaFELgvDLcwFltYb7fZGYMHUq8vFWUnDHJevAGCviquy5JUhqThQsci_wEAOIP-OohY9Z_j0hqRCN7hsKz99l96qEiNtpJ85J9kJ2AGoc6UaWJDUPIS6uOjdNhtS4NyjChCkYPERzzrIbozDTMhWRA_PziOWAaNTNgVF22YQqMA6SIw_Otpq5ezcbS_SgbexeyrJ-warYIwRA4kX1T40TiSdC31cu8dHIVDnkx_fHTLuTNThtQUjWq6U2W9xOMEcZiJn63u23KqUimrKIcYjNxKdlujM-BvDJmrYkpYFV_zowZZ1RPVR2jRFZTBMB-d-taSCIgToBzupM9utIVUdEEc5xP0yqflftaHhnKMnVIRCGHwaeLGeKmJtIjtYFk7ZOzsas-4NHSeBUfw4Fz7wQRlhogfADjWeTNqm8512Z3nFPDS-5uzbARKfhzCZHNoxFKRvuFKApBBTQ-V-OI4sUKeW9wagVoRs5Smsc-x6EU4LBrTLStFPEiMmxvTud0LGZrQypSUrZT_sWkccUNrF2_8D7bgEhUzOYHqmTKyJQhOWXlmKvhioX5AI3tr_cbvRJSPSAx6KfoIFKKaGzvcSnw6iOvNd9i8YmIVWdFC-8GjMNE7k6D7XZel9E7ks50Daf1j2vuMAB12UEInT3LZPv1-IbkimxWwmhKB9vdJSTbNeuy6EN0KoPW1AGnF09G2DXQip7EOqEAuS_Blk4KKvQhOJyfD7QtQ7_cPaZOeMykF9OY-luiTGq4a_MGA220v-sOjn3HJPV7iRuyH4EBn67ZO7HUPTHWociFOjHojUU2RZL2UtKn4uEiWcPBKPDeLK5uED9s-OzKDiYXFj5TylR2yGc5o1ye3VoxrewdN5t7QXcxVnuUq14JeMADb2Dd5Le1nKrbPEo10pJ5KC-Z8lBQ2-EnN68so4YebxODiRwy4FPkKIAeLJd9Wuhihbu8sKujVY5L71BGIgmJ7gaXZIy5mk_55X6-IUa_qPnj9njTgd902aSuJcLwEH8OBuzsnkFIlySWvhA7SOHeXsEyqkahueiSMm7E8OGeGJ7h3NynOMRKWGFrkle2Fm37nrUkIVDUfGeXsDcNTphGrEQD9tvJkJf1CIy1LLXJPtUJc8ULAsPjgRAuXByjNeLcQrRVXPJHz8cQLi65tAtTo_l1i1Q5J4gvIuDINBBRtr2erulw1m8c4E7EtwSslAXzXxgSX0E866RP7JhGTjTqnlZaJxAujH04UvS9ImwJwJkcH7TVnoG1qTxT00I5SM6zjiGJdSzuCpHyK_92sgC_O-qSBJsMQ24eX00yKuQ-5kjbjxbpNzaOR015RUORTpY3xisYtB7FlTna-jG3CvI8TGAq4397gmdvwmw2LP8x5qVuwNbYgJL4LrBRE8F6bYNMNhZnHUJ7kbET176vBUvAntm4OamoH9J5ZsQm6J9UdYl3DgrJTRfBbDvZXMcvpvwK_LOCe7TB6PUOUt7pGiGSIXd3Cb83yHlds9uQx1YD_hT4vN4aKZceIWzLwnK5IVSzIWo2iHuhlqatpuTEKeBejxpb4xidhpLtn-ypXb1GCwCTqjbFlTweVCMcXeMMMSyRR1W2Knl_CDNWN5lvJYe5SCTWwPZ0J-aSmNj1pMtAFL2qDJRatmh9s7lMpMvcairMB5DW7CREWt2QrqPHaTUNOVY-a2TC43muI60MIiO-r8TVz2NtW5Lrj4lhkI4cRjbidyR71NgD452REDrp8Yg83b5e8IhHGvF_0Qwr_ao9d9Vj_NDHd2hST-Og-aSogpYbvVZkC835krUmYi5eTcxceldRQsSRi2NsHcesbvHZeu4STzhR0QdnYEoE7eDOBpjRLHvsJo-x1L2bIUAZUU-PeWQui9Hj_uVkJDvKhIde15VNlWLu62JcZfBCG2i0c6K-40b25Q7l2r52X4Mdb5itN6gOhg9TqJSKr8qNMmruIPUmshOzyJKpdm2rimw5pXjbee3PmYkIftFGct7fYc9WHIFLXhOBhX4RAPMU2PohNpWyiF3scOMQH7TZ-YOfejnjpF9CgppXCSy56oZwnfe8hZM13uLqbwQYBrVA1J0RThqBZ_0-m8P5V0lpnztH02TNi1QFNWGimQWVeuCYluPR0SPscyd2cH1lsINITVBXRZCN4gacGoq5APPObOVL20wrhrj0yE9POHPmNgmod6S6fxLfpqx4hRVNj9lOor-pJtR4NpSCiJJwC4tW6nVICvGq6Zu4Zl_uvxqQ3VisdxdjvWyfj1n7qfqxc-K8WiCJxUusFSscBDcA2AOgl_GbsXm6JAMqWHdV3EQVAQxhjn3K3wk2Ob1TOA8_IvzB-D_xLNMUUVfMjNTq3amyY4JX7AFrey8FtJ2YzGURJr2KDzk3QbLSNzgqE7w6BxgLf6m4W2G3d63Fg-q5GaxYZjNcFQ70WXu6M8B7CliMYUi-CHiyNe0U_wloGoxNHC__XrL-btq-4scbRiDtdZSngRoM89N0s-hGObXuKb6oVauH0QA5HODqXHYXHgYMKW_Aa26yHQqIvBdd4XQ-aKJWm27OdtdmuA8FAhXirmlTTCo4jJKMo6dn5mgpHOLya4nG98eoH0Wj9MutktX2hA9wCv0Nm-QvItws6E6oYj_Djf1ZjzFleurXwvSAbHqlARaJfnFHE9RswT3ukfXHpjh-37O6muNMnlUeIRYKy0GTScHHZf5MgRmEFo3iRiJ7ZPfsAhJoRJLHsmP2_DTchGmTE7S6VT6KAm2Onrb1LaKIGGIlI30RMs8M2r_YDOWwjXqfUd3KYlNELtpx5rv9qzahOhd7SOEWWwpf19sVU2NNK4YW_VhGyOMeufdWk5W4iwvvGbjjoFycoFGNIjMPf9e2OeTsgkaZbWVufbwjZjfuU-xQ7SVfroo8bqybAEl5evHnBDKCFiuhufPQT5Jy2dUdVMDNIFb9nLgYNYkRhTtt_gtqLyLps2d_prBDy-AnickpdnSOrCRBxNjvbDV_FRWhIjiGtd97pXqtBUejDsQwZ2ALv052F-BUeYODreLFzwKlKPD3sPDVcaoOuXZpsnDbznwqI30lpBvYo3kq3q2eKky-GWOUlUUJ0fjo5sZm_XVu6oFOksz-kwArnG7YkJzeW5ItoffFhdPf_RkNJeuNx4tzQOtYuiZ0ARxiS6Ms8AJVyXLvGr6CKMqFYdZlQd8vr6RoFYsp4jK7CC8NBcA9gOAC9aoV8ta-4Wm5PEW9wCRBizjlwMQwowsDYjRb4wgJWDECeoe45znYTAKWnM0QxbbpCETnMqpCJP6gVfDmi4xFBK8Gk7CpKfYzukEMCPXTyX-xfJVim-DbxnttxVHvpRtzjNBL_7ipIR9Ikheljc7NmThosHnj1SksFD2mmIee8cYTiJY3_0cd2BzF8S8y9uB1q8ZrJkfD3WfZ0JNhz4J019vU_iBKqopV5CLazwBQrahYMae1jiHAeCy3_ToDVl59gWyCEK4FraAWlldkBwYZ5-Oa6TtTYNsrR8kxAW-XkTnMU-72j-iYf1ageSE3GcOEak6RTi1o8kB00t4NCRy8OJF-tDM3D-coHGrRSXUDdTqqWNWfDwvGp-I7_2X3whAWWA5Q7lHOVJv6DAvlbJ7RMFnHHphCQlxhnLhWOzP7Ok9r4Nl9KAGO7BTi_EXjQ-DebpDeCju8KD-GiD5w6WOSLcJCDed-uS1QHozHnt0cm2ugWwQeBAJAnrNBA6yBsH6wsu-Vdk6Ix70T00h00zKNwJyqRgG7Zca27q9nI4PIYRciXI8X8F6HpzoRVg7iKJpMMPejtF3ZL5tWG9pYbUysPWSyfWpwZKxZKtV32htE_JAtHi6PnvneirgNaNSjHcmJeMjJPk_Z9z0rUtiFpWoHarXJQpyyi5W8qIeqJXWzjwua6FDRc7OMOtGRQ-slqXvuHXOnQFBX4DdJngcmQ9cZw_KzOEcZynZNVw_czjxMXKhCwQiZJou-liq2ImLQIkiDx2GCKy6EUTlp8GGoX-VF2-hQIleLmC1Fy3ZrXR_4IEmLx64vD0pAUfGO_FRyj5nlDbxIwtxghQI3Mo2bA6ePwWkUEVTVLLepeC2s5L2Rvf4KQ8ieQ5e81FWE-lZUhWGNar0GCJuRvevLpqSGfSFi9LMxvSZfA7EBwCKQJ0jrL2Z-wsfotqa0DhfOm0iJ2qxOijclgJ4H8mfJJrODkoHcJ8CMroB88KoS7PA0q58Nh2vktlRe7gdwO1CMZ-XO7ZQ7HMSiUhTspqekt2C727tyBt9_Nw-Mq1yyV8EQ1y5K4gCVTCPeCZEamditUq8a1VFCjRDAwreRWw0DMIHEYp2Kz8Qcfph9KGwDhTSvR1hOlYOmujb-kztUC20OCchX_iuE5U_dX9zo4xDq49w-c9tKORi8pfBXXb0WCf6yBDrLadazmNPFPlJd1eWnLFgateb_p0DVTIM9_JyTenqxZU1DPwjxIvO9MEXK3jjgC7tJoYOM3olvLgfy3OKv7S3O3vuGscBA3BKR0ndZ46q-x9GiEachS12NOBajlZ5kDE_7J3oCA7jXiT2O61Nz8Oxx7fZnKh30F8FIegvkwJcH_BkZFuXR3rscSkhq0jpUNPDfsnTYd54ALUXdgzOs9i16_X9RXJvcYcSjD5tODZqcDbcb1R-ce53khutjhuT8aGmm40ZN5kCjz9KwKJQSFnr85aErVEIeGYmuyvS7hnNT025zKL2pJ0Ow4EnMzbOQ4NhK5SnuXNLwwjd8GpcLRmlGEMZj_SW8GTP-hjrG1KyrchFFvEkgtfWqlfOnwRNowy7AbDOUJnIa8_9Dali2hm7huliL4Bl6m9PXB2y8tYnHDjW0PGMr8C51BL_84En0yAnvHGoohAop0T1UWaWQXG4kO9t5m0bUpmKbKdWL0ynfS7sZk4mqD1_4sR_r1G46QXYhw-GkNCPt9dmlys-v61lMYERe8b19cYKnn759INiY-rEMqKbelA6Wu00DtG7vIRSPEc_ktATgXTvibEL_OKHy_eUhmsvBUvVYihvh7j6iOSikKMsSpueDTWHdGQXiE-t_Et3fGTYsLpoB1cI_CMOBl8TCvULsCz6H90uYxHNDMryCE4UlYiP-tdXTPst302x7bgYY39qj8y9bRh5Lrj5F5r2t2YP0AHBSYtIEpQcYrvt3yoHCqVUMa-G_ugc2wP0QoPUrYGUyNMfH3qVUNCdvuHN5QDamUj9KK-_BQ5VsXMDDhYH-wx9FxIewv-FlhlGNu42uezZbZsjFIbOzH2PFzfjIcsShuyacmGGmWT9hz7QL3Kk2RokBNCumiX6jlrpkESpuGFwstBPGXOofpJGd4Vf_9o62YnoOcP0mNjs1FHeW8NMA6mwo_NAArMlmG-fLMyB0Ziwuc6XBKQtVxEjT2Z689V8lxTzjVdkTIswWEyR4kOVyC2AtFg==","summary":[]},{"id":"msg_00e767404995b9950068e6482f25e0819181582a15cdd9207f","type":"message","status":"completed","content":[{"type":"output_text","annotations":[{"type":"url_citation","end_index":946,"start_index":799,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2582,"start_index":2435,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":2896,"start_index":2749,"title":"OpenTelemetry FastAPI Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html"},{"type":"url_citation","end_index":3518,"start_index":3353,"title":"OpenTelemetry Logging Instrumentation — OpenTelemetry Python Contrib documentation","url":"https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai"}],"logprobs":[],"text":"Short answer:\n- Default FastAPI/OpenTelemetry instrumentation already records method/route/status.\n- To also include HTTP headers, set the capture-headers env vars.\n- To include request/response bodies, use the FastAPI/ASGI request/response hooks and add the payload to the span yourself (with redaction/size limits).\n\nHow to do it\n\n1) Enable header capture (server side)\n- Choose just the headers you need; avoid sensitive ones or sanitize them.\n\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,user-agent\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type\"\nexport OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS=\"authorization,set-cookie\"\n\nThis makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n2) Add hooks to capture request/response bodies\nNote: There isn’t a built-in Python env var to auto-capture HTTP bodies for FastAPI/ASGI. Use hooks to look at ASGI receive/send events and attach (truncated) bodies as span attributes.\n\nfrom fastapi import FastAPI\nfrom opentelemetry.trace import Span\nfrom opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\nMAX_BYTES = 2048 # keep this small in prod\n\ndef client_request_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.request\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.request.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\ndef client_response_hook(span: Span, scope: dict, message: dict):\n if span and span.is_recording() and message.get(\"type\") == \"http.response.body\":\n body = message.get(\"body\") or b\"\"\n if body:\n span.set_attribute(\n \"http.response.body\",\n body[:MAX_BYTES].decode(\"utf-8\", \"replace\"),\n )\n\napp = FastAPI()\nFastAPIInstrumentor.instrument_app(\n app,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n)\n\n- The hooks receive the ASGI event dicts: http.request (with body/more_body) and http.response.body. If your bodies can be chunked, you may need to accumulate across calls when message.get(\"more_body\") is True. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\n3) Be careful with PII and size\n- Always limit size and consider redaction before putting payloads on spans.\n- Use the sanitize env var above for sensitive headers. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n\nOptional: correlate logs with traces\n- If you also want request/response details in logs with trace IDs, enable Python log correlation:\n\nexport OTEL_PYTHON_LOG_CORRELATION=true\n\nor programmatically:\nfrom opentelemetry.instrumentation.logging import LoggingInstrumentor\nLoggingInstrumentor().instrument(set_logging_format=True)\n\nThis injects trace_id/span_id into log records so you can line up logs with the span that carries the HTTP payload attributes. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n\nWant me to tailor the hook to only capture JSON bodies, skip binary content, or accumulate chunked bodies safely?"}],"role":"assistant"}],"parallel_tool_calls":true,"previous_response_id":null,"prompt_cache_key":null,"reasoning":{"effort":"medium","summary":null},"safety_identifier":null,"service_tier":"default","store":true,"temperature":1.0,"text":{"format":{"type":"text"},"verbosity":"medium"},"tool_choice":"auto","tools":[{"type":"web_search","filters":null,"search_context_size":"medium","user_location":{"type":"approximate","city":null,"country":"US","region":null,"timezone":null}}],"top_logprobs":0,"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":33151,"input_tokens_details":{"cached_tokens":4352},"output_tokens":3367,"output_tokens_details":{"reasoning_tokens":2624},"total_tokens":36518},"user":null,"metadata":{}}} + + headers: + alt-svc: + - h3=":443"; ma=86400 + connection: + - keep-alive + content-type: + - text/event-stream; charset=utf-8 + openai-organization: + - pydantic-28gund + openai-processing-ms: + - '86' + openai-project: + - proj_dKobscVY9YJxeEaDJen54e3d + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + status: + code: 200 + message: OK +version: 1 +... diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index fcd0fea9c5..cf321274b7 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1,6 +1,5 @@ """Tests for AG-UI implementation.""" -# pyright: reportPossiblyUnboundVariable=none from __future__ import annotations import contextlib diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py new file mode 100644 index 0000000000..9ac0fdc908 --- /dev/null +++ b/tests/test_vercel_ai.py @@ -0,0 +1,820 @@ +from __future__ import annotations + +import pytest +from inline_snapshot import snapshot + +from pydantic_ai import Agent +from pydantic_ai.builtin_tools import WebSearchTool +from pydantic_ai.models.openai import OpenAIResponsesModel +from pydantic_ai.providers.openai import OpenAIProvider +from pydantic_ai.vercel_ai.request_types import ( + SubmitMessage, + TextUIPart, + ToolOutputAvailablePart, + UIMessage, +) +from pydantic_ai.vercel_ai.response_types import ( + FinishChunk, + ReasoningDeltaChunk, + ReasoningStartChunk, + TextDeltaChunk, + TextStartChunk, + ToolInputDeltaChunk, + ToolInputStartChunk, + ToolOutputAvailableChunk, +) +from pydantic_ai.vercel_ai.starlette import DoneChunk, StarletteChat + +from .conftest import IsStr + +pytestmark = [ + pytest.mark.anyio, + pytest.mark.vcr, + pytest.mark.filterwarnings( + 'ignore:`BuiltinToolCallEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolCallPart` instead.:DeprecationWarning' + ), + pytest.mark.filterwarnings( + 'ignore:`BuiltinToolResultEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolReturnPart` instead.:DeprecationWarning' + ), +] + + +async def test_run(allow_model_requests: None, openai_api_key: str): + model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) + agent = Agent(model=model, builtin_tools=[WebSearchTool()]) + chat = StarletteChat(agent) + + data = SubmitMessage( + trigger='submit-message', + id='bvQXcnrJ4OA2iRKU', + messages=[ + UIMessage( + id='BeuwNtYIjJuniHbR', + role='user', + metadata=None, + parts=[ + TextUIPart( + type='text', + text="""Use a tool + + """, + state=None, + provider_metadata=None, + ) + ], + ), + UIMessage( + id='bylfKVeyoR901rax', + role='assistant', + metadata=None, + parts=[ + TextUIPart( + type='text', + text='''I\'d be happy to help you use a tool! However, I need more information about what you\'d like to do. I have access to tools for searching and retrieving documentation for two products: + + 1. **Pydantic AI** (pydantic-ai) - an open source agent framework library + 2. **Pydantic Logfire** (logfire) - an observability platform + + I can help you with: + - Searching the documentation for specific topics or questions + - Getting the table of contents to see what documentation is available + - Retrieving specific documentation files + + What would you like to learn about or search for? Please let me know: + - Which product you\'re interested in (Pydantic AI or Logfire) + - What specific topic, feature, or question you have + + For example, you could ask something like "How do I get started with Pydantic AI?" or "Show me the table of contents for Logfire documentation."''', + state='streaming', + provider_metadata=None, + ) + ], + ), + UIMessage( + id='MTdh4Ie641kDuIRh', + role='user', + metadata=None, + parts=[TextUIPart(type='text', text='Give me the ToCs', state=None, provider_metadata=None)], + ), + UIMessage( + id='3XlOBgFwaf7GsS4l', + role='assistant', + metadata=None, + parts=[ + TextUIPart( + type='text', + text="I'll get the table of contents for both repositories.", + state='streaming', + provider_metadata=None, + ), + ToolOutputAvailablePart( + type='tool-get_table_of_contents', + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', + state='output-available', + input={'repo': 'pydantic-ai'}, + output="[Scrubbed due to 'API Key']", + provider_executed=None, + call_provider_metadata=None, + preliminary=None, + ), + ToolOutputAvailablePart( + type='tool-get_table_of_contents', + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', + state='output-available', + input={'repo': 'logfire'}, + output="[Scrubbed due to 'Auth']", + provider_executed=None, + call_provider_metadata=None, + preliminary=None, + ), + TextUIPart( + type='text', + text="""Here are the Table of Contents for both repositories:... Both products are designed to work together - Pydantic AI for building AI agents and Logfire for observing and monitoring them in production.""", + state='streaming', + provider_metadata=None, + ), + ], + ), + UIMessage( + id='QVypsUU4swQ1Loxq', + role='user', + metadata=None, + parts=[ + TextUIPart( + type='text', + text='How do I get FastAPI instrumentation to include the HTTP request and response', + state=None, + provider_metadata=None, + ) + ], + ), + ], + ) + + events = [event async for event in chat.run(data, None)] + assert events == snapshot( + [ + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', + input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', + input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', + input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} + ), + ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), + ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), + TextStartChunk(id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +Short answer: +- Default\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' FastAPI/OpenTelemetry', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' instrumentation already records method', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/route/status', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +. +- To also\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' include HTTP headers', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=', set', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' the capture-', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='headers env', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + vars. +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' To include request', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=', use the', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/ASGI', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' hooks and add', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' the', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' payload to', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' the span yourself', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' (with red', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='action/size', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + limits). + +How\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' to do it', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +1)\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Enable header capture', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' (server side', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +) +- Choose\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' just the', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' headers you need; avoid', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' sensitive ones or sanitize', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + them. + +export OTEL\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_INSTRUMENTATION_HTTP_CAPTURE', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_HEADERS_SERVER_REQUEST="content', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='-type,user', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='-agent"\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='export OTEL_INSTRUMENTATION', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_HTTP_CAPTURE_HEADERS', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_SERVER_RESPONSE="content-type"\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='export OTEL_INSTRUMENTATION_HTTP', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +_CAPTURE_HEADERS_SANITIZE_FIELDS="authorization,set-cookie" + +This makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)) + +2)\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Add hooks to capture request', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +Note:\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=IsStr(), id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' a built-in Python', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' env', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' var to', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' auto-capture', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' HTTP bodies for Fast', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='API/AS', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='GI. Use', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' hooks to look at', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' ASGI receive', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='/send events and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' attach (tr', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='uncated) bodies', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' as span attributes', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +. + +from\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' fastapi import', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +from opente\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='lemetry.trace', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' import Span', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +from opente\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='lemetry.instrument', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='ation.fastapi import', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' FastAPIInstrument', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +or + +MAX\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_BYTES = ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='2048 ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' # keep this', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' small in prod', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +def client\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_request_hook(span', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=': Span,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' scope: dict', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=', message:', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + dict): + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' if span and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' span.is_record', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='ing() and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' message.get("', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='type") ==', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' "http.request', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +": + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +" + if\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + body: + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +( + "\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='http.request.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +", + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +"), + ) +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +def client_response\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_hook(span:', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' Span, scope', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=': dict,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' message: dict', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +): + if\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' span and span', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.is_recording', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='() and message', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.get("type', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='") == "', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +": + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +" + if\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + body: + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +( + "\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +", + body\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +"), + ) +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +app = Fast\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ +API() +Fast\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='APIInstrumentor', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.instrument_app(', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + app,\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + + client_request\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_hook=client', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +_request_hook, + \ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' client_response_hook', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='=client_response', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +_hook, +) +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +- The hooks\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' receive the AS', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='GI event dict', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='s: http', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.request (with', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' body/more', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_body) and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' http.response.body', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='. If your', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' bodies can be', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' chunked,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' you may need', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' to accumulate across', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' calls when message', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.get("more', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_body") is', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' True. ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)', + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=')', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +3)\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Be careful with', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' PII and', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + size +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Always limit size', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' and consider redaction', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' before putting payloads', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + on spans. +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' Use the sanitize', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' env var above', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' for sensitive headers', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='. ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n', + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +Optional: correlate logs\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + with traces +-\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' If you also want', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' details in logs with', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' trace IDs, enable', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' Python log correlation:\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +export OTEL_P\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='YTHON_LOG_COR', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='RELATION=true', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + + +or programmatically\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ +: +from opente\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='lemetry.instrumentation', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='.logging import LoggingInstrument', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ +or +LoggingInstrument\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='or().instrument(set', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta='_logging_format=True)\n', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta="""\ + +This injects trace\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta='_id/span_id into', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' log records so you', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' can line up logs', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' with the span that', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' carries the HTTP payload', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' attributes. ', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk( + delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n', + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk( + delta="""\ + +Want me to tailor\ +""", + id='d775971d84c848228275a25a097b6409', + ), + TextDeltaChunk(delta=' the hook to only', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' capture JSON bodies,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' skip binary content,', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' or accumulate chunked', id='d775971d84c848228275a25a097b6409'), + TextDeltaChunk(delta=' bodies safely?', id='d775971d84c848228275a25a097b6409'), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', + input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', + input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', + input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', + input_text_delta='{"query":null,"type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} + ), + ToolInputStartChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' + ), + ToolInputDeltaChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', + input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', + ), + ToolOutputAvailableChunk( + tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} + ), + FinishChunk(), + DoneChunk(), + ] + ) From f0a03d9895e93a8d57bf48de600b3267ea781fa9 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 8 Oct 2025 16:04:20 +0000 Subject: [PATCH 04/46] Claude-assisted refactoring to unify AG-UI and Vercel AI adapters and event streams --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 571 +----------------- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 457 ++++++++++++++ .../pydantic_ai/ui/ag_ui/__init__.py | 12 + .../pydantic_ai/ui/ag_ui/adapter.py | 334 ++++++++++ .../pydantic_ai/ui/ag_ui/event_stream.py | 548 +++++++++++++++++ .../pydantic_ai/ui/vercel_ai/__init__.py | 94 +++ .../pydantic_ai/ui/vercel_ai/_utils.py | 26 + .../pydantic_ai/ui/vercel_ai/adapter.py | 149 +++++ .../pydantic_ai/ui/vercel_ai/event_stream.py | 235 +++++++ .../pydantic_ai/ui/vercel_ai/request_types.py | 70 +++ .../{ => ui}/vercel_ai/response_types.py | 116 ++-- .../pydantic_ai/vercel_ai/__init__.py | 0 .../pydantic_ai/vercel_ai/_utils.py | 18 - .../pydantic_ai/vercel_ai/request_types.py | 274 --------- .../pydantic_ai/vercel_ai/response_stream.py | 122 ---- .../pydantic_ai/vercel_ai/starlette.py | 85 --- tests/test_ag_ui.py | 16 +- tests/test_vercel_ai.py | 11 +- 18 files changed, 2045 insertions(+), 1093 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py rename pydantic_ai_slim/pydantic_ai/{ => ui}/vercel_ai/response_types.py (81%) delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py delete mode 100644 pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index fe0ed77951..0ae9211bbc 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -7,88 +7,31 @@ from __future__ import annotations import json -import uuid -from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Mapping, Sequence -from dataclasses import Field, dataclass, field, replace +from collections.abc import AsyncIterator, Awaitable, Callable, Mapping, Sequence from http import HTTPStatus from typing import ( Any, - ClassVar, Final, Generic, - Protocol, TypeAlias, - TypeVar, - runtime_checkable, ) -from pydantic import BaseModel, ValidationError - -from . import _utils -from ._agent_graph import CallToolsNode, ModelRequestNode -from .agent import AbstractAgent, AgentRun, AgentRunResult -from .exceptions import UserError -from .messages import ( - BaseToolCallPart, - BuiltinToolCallPart, - BuiltinToolReturnPart, - FunctionToolResultEvent, - ModelMessage, - ModelRequest, - ModelRequestPart, - ModelResponse, - ModelResponsePart, - ModelResponseStreamEvent, - PartDeltaEvent, - PartStartEvent, - SystemPromptPart, - TextPart, - TextPartDelta, - ThinkingPart, - ThinkingPartDelta, - ToolCallPart, - ToolCallPartDelta, - ToolReturnPart, - UserPromptPart, -) +from pydantic import ValidationError + +from .agent import AbstractAgent, AgentRunResult from .models import KnownModelName, Model from .output import OutputDataT, OutputSpec from .settings import ModelSettings -from .tools import AgentDepsT, DeferredToolRequests, ToolDefinition +from .tools import AgentDepsT from .toolsets import AbstractToolset -from .toolsets.external import ExternalToolset from .usage import RunUsage, UsageLimits try: - from ag_ui.core import ( - AssistantMessage, - BaseEvent, - DeveloperMessage, - EventType, - Message, - RunAgentInput, - RunErrorEvent, - RunFinishedEvent, - RunStartedEvent, - State, - SystemMessage, - TextMessageContentEvent, - TextMessageEndEvent, - TextMessageStartEvent, - ThinkingEndEvent, - ThinkingStartEvent, - ThinkingTextMessageContentEvent, - ThinkingTextMessageEndEvent, - ThinkingTextMessageStartEvent, - Tool as AGUITool, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallResultEvent, - ToolCallStartEvent, - ToolMessage, - UserMessage, + from .ui.ag_ui import ( + StateDeps, + StateHandler, ) - from ag_ui.encoder import EventEncoder + from .ui.ag_ui.event_stream import RunAgentInput, _RunError # type: ignore[reportPrivateUsage] except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use `Agent.to_ag_ui()` method, ' @@ -322,464 +265,26 @@ async def run_ag_ui( Yields: Streaming event chunks encoded as strings according to the accept header value. """ - encoder = EventEncoder(accept=accept) - if run_input.tools: - # AG-UI tools can't be prefixed as that would result in a mismatch between the tool names in the - # Pydantic AI events and actual AG-UI tool names, preventing the tool from being called. If any - # conflicts arise, the AG-UI tool should be renamed or a `PrefixedToolset` used for local toolsets. - toolset = _AGUIFrontendToolset[AgentDepsT](run_input.tools) - toolsets = [*toolsets, toolset] if toolsets else [toolset] - - try: - yield encoder.encode( - RunStartedEvent( - thread_id=run_input.thread_id, - run_id=run_input.run_id, - ), - ) - - if not run_input.messages: - raise _NoMessagesError - - raw_state: dict[str, Any] = run_input.state or {} - if isinstance(deps, StateHandler): - if isinstance(deps.state, BaseModel): - try: - state = type(deps.state).model_validate(raw_state) - except ValidationError as e: # pragma: no cover - raise _InvalidStateError from e - else: - state = raw_state - - deps = replace(deps, state=state) - elif raw_state: - raise UserError( - f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' - ) - else: - # `deps` not being a `StateHandler` is OK if there is no state. - pass - - messages = _messages_from_ag_ui(run_input.messages) - - async with agent.iter( - user_prompt=None, - output_type=[output_type or agent.output_type, DeferredToolRequests], - message_history=messages, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - ) as run: - async for event in _agent_stream(run): - yield encoder.encode(event) - - if on_complete is not None and run.result is not None: - if _utils.is_async_callable(on_complete): - await on_complete(run.result) - else: - await _utils.run_in_executor(on_complete, run.result) - except _RunError as e: - yield encoder.encode( - RunErrorEvent(message=e.message, code=e.code), - ) - except Exception as e: - yield encoder.encode( - RunErrorEvent(message=str(e)), - ) - raise e - else: - yield encoder.encode( - RunFinishedEvent( - thread_id=run_input.thread_id, - run_id=run_input.run_id, - ), - ) - - -async def _agent_stream(run: AgentRun[AgentDepsT, Any]) -> AsyncIterator[BaseEvent]: - """Run the agent streaming responses using AG-UI protocol events. - - Args: - run: The agent run to process. - - Yields: - AG-UI Server-Sent Events (SSE). - """ - async for node in run: - stream_ctx = _RequestStreamContext() - if isinstance(node, ModelRequestNode): - async with node.stream(run.ctx) as request_stream: - async for agent_event in request_stream: - async for msg in _handle_model_request_event(stream_ctx, agent_event): - yield msg - - if stream_ctx.part_end: # pragma: no branch - yield stream_ctx.part_end - stream_ctx.part_end = None - if stream_ctx.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - stream_ctx.thinking = False - elif isinstance(node, CallToolsNode): - async with node.stream(run.ctx) as handle_stream: - async for event in handle_stream: - if isinstance(event, FunctionToolResultEvent): - async for msg in _handle_tool_result_event(stream_ctx, event): - yield msg - - -async def _handle_model_request_event( # noqa: C901 - stream_ctx: _RequestStreamContext, - agent_event: ModelResponseStreamEvent, -) -> AsyncIterator[BaseEvent]: - """Handle an agent event and yield AG-UI protocol events. - - Args: - stream_ctx: The request stream context to manage state. - agent_event: The agent event to process. - - Yields: - AG-UI Server-Sent Events (SSE) based on the agent event. - """ - if isinstance(agent_event, PartStartEvent): - if stream_ctx.part_end: - # End the previous part. - yield stream_ctx.part_end - stream_ctx.part_end = None - - part = agent_event.part - if isinstance(part, ThinkingPart): # pragma: no branch - if not stream_ctx.thinking: - yield ThinkingStartEvent( - type=EventType.THINKING_START, - ) - stream_ctx.thinking = True - - if part.content: - yield ThinkingTextMessageStartEvent( - type=EventType.THINKING_TEXT_MESSAGE_START, - ) - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, - delta=part.content, - ) - stream_ctx.part_end = ThinkingTextMessageEndEvent( - type=EventType.THINKING_TEXT_MESSAGE_END, - ) - else: - if stream_ctx.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - stream_ctx.thinking = False - - if isinstance(part, TextPart): - message_id = stream_ctx.new_message_id() - yield TextMessageStartEvent( - message_id=message_id, - ) - if part.content: # pragma: no branch - yield TextMessageContentEvent( - message_id=message_id, - delta=part.content, - ) - stream_ctx.part_end = TextMessageEndEvent( - message_id=message_id, - ) - elif isinstance(part, BaseToolCallPart): - tool_call_id = part.tool_call_id - if isinstance(part, BuiltinToolCallPart): - builtin_tool_call_id = '|'.join( - [_BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id] - ) - stream_ctx.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id - tool_call_id = builtin_tool_call_id - - message_id = stream_ctx.message_id or stream_ctx.new_message_id() - yield ToolCallStartEvent( - tool_call_id=tool_call_id, - tool_call_name=part.tool_name, - parent_message_id=message_id, - ) - if part.args: - yield ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=part.args_as_json_str(), - ) - stream_ctx.part_end = ToolCallEndEvent( - tool_call_id=tool_call_id, - ) - elif isinstance(part, BuiltinToolReturnPart): # pragma: no branch - tool_call_id = stream_ctx.builtin_tool_call_ids[part.tool_call_id] - yield ToolCallResultEvent( - message_id=stream_ctx.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=tool_call_id, - content=part.model_response_str(), - ) - - elif isinstance(agent_event, PartDeltaEvent): - delta = agent_event.delta - if isinstance(delta, TextPartDelta): - if delta.content_delta: # pragma: no branch - yield TextMessageContentEvent( - message_id=stream_ctx.message_id, - delta=delta.content_delta, - ) - elif isinstance(delta, ToolCallPartDelta): # pragma: no branch - tool_call_id = delta.tool_call_id - assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' - if tool_call_id in stream_ctx.builtin_tool_call_ids: - tool_call_id = stream_ctx.builtin_tool_call_ids[tool_call_id] - yield ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), - ) - elif isinstance(delta, ThinkingPartDelta): # pragma: no branch - if delta.content_delta: # pragma: no branch - if not isinstance(stream_ctx.part_end, ThinkingTextMessageEndEvent): - yield ThinkingTextMessageStartEvent( - type=EventType.THINKING_TEXT_MESSAGE_START, - ) - stream_ctx.part_end = ThinkingTextMessageEndEvent( - type=EventType.THINKING_TEXT_MESSAGE_END, - ) - - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, - delta=delta.content_delta, - ) - - -async def _handle_tool_result_event( - stream_ctx: _RequestStreamContext, - event: FunctionToolResultEvent, -) -> AsyncIterator[BaseEvent]: - """Convert a tool call result to AG-UI events. - - Args: - stream_ctx: The request stream context to manage state. - event: The tool call result event to process. - - Yields: - AG-UI Server-Sent Events (SSE). - """ - result = event.result - if not isinstance(result, ToolReturnPart): - return - - yield ToolCallResultEvent( - message_id=stream_ctx.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=result.tool_call_id, - content=result.model_response_str(), - ) - - # Now check for AG-UI events returned by the tool calls. - possible_event = result.metadata or result.content - if isinstance(possible_event, BaseEvent): - yield possible_event - elif isinstance(possible_event, str | bytes): # pragma: no branch - # Avoid iterable check for strings and bytes. - pass - elif isinstance(possible_event, Iterable): # pragma: no branch - for item in possible_event: # type: ignore[reportUnknownMemberType] - if isinstance(item, BaseEvent): # pragma: no branch - yield item - - -def _messages_from_ag_ui(messages: list[Message]) -> list[ModelMessage]: - """Convert a AG-UI history to a Pydantic AI one.""" - result: list[ModelMessage] = [] - tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. - request_parts: list[ModelRequestPart] | None = None - response_parts: list[ModelResponsePart] | None = None - for msg in messages: - if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( - isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if request_parts is None: - request_parts = [] - result.append(ModelRequest(parts=request_parts)) - response_parts = None - - if isinstance(msg, UserMessage): - request_parts.append(UserPromptPart(content=msg.content)) - elif isinstance(msg, SystemMessage | DeveloperMessage): - request_parts.append(SystemPromptPart(content=msg.content)) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise _ToolCallNotFoundError(tool_call_id=tool_call_id) - - request_parts.append( - ToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - ) - ) - - elif isinstance(msg, AssistantMessage) or ( # pragma: no branch - isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if response_parts is None: - response_parts = [] - result.append(ModelResponse(parts=response_parts)) - request_parts = None - - if isinstance(msg, AssistantMessage): - if msg.content: - response_parts.append(TextPart(content=msg.content)) - - if msg.tool_calls: - for tool_call in msg.tool_calls: - tool_call_id = tool_call.id - tool_name = tool_call.function.name - tool_calls[tool_call_id] = tool_name - - if tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX): - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - response_parts.append( - BuiltinToolCallPart( - tool_name=tool_name, - args=tool_call.function.arguments, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - else: - response_parts.append( - ToolCallPart( - tool_name=tool_name, - tool_call_id=tool_call_id, - args=tool_call.function.arguments, - ) - ) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise _ToolCallNotFoundError(tool_call_id=tool_call_id) - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - - response_parts.append( - BuiltinToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - - return result - - -@runtime_checkable -class StateHandler(Protocol): - """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" - - # Has to be a dataclass so we can use `replace` to update the state. - # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 - __dataclass_fields__: ClassVar[dict[str, Field[Any]]] - - @property - def state(self) -> State: - """Get the current state of the agent run.""" - ... - - @state.setter - def state(self, state: State) -> None: - """Set the state of the agent run. - - This method is called to update the state of the agent run with the - provided state. - - Args: - state: The run state. - - Raises: - InvalidStateError: If `state` does not match the expected model. - """ - ... - - -StateT = TypeVar('StateT', bound=BaseModel) -"""Type variable for the state type, which must be a subclass of `BaseModel`.""" - - -@dataclass -class StateDeps(Generic[StateT]): - """Provides AG-UI state management. - - This class is used to manage the state of an agent run. It allows setting - the state of the agent run with a specific type of state model, which must - be a subclass of `BaseModel`. - - The state is set using the `state` setter by the `Adapter` when the run starts. - - Implements the `StateHandler` protocol. - """ - - state: StateT - - -@dataclass(repr=False) -class _RequestStreamContext: - """Data class to hold request stream context.""" - - message_id: str = '' - part_end: BaseEvent | None = None - thinking: bool = False - builtin_tool_call_ids: dict[str, str] = field(default_factory=dict) - - def new_message_id(self) -> str: - """Generate a new message ID for the request stream. - - Assigns a new UUID to the `message_id` and returns it. - - Returns: - A new message ID. - """ - self.message_id = str(uuid.uuid4()) - return self.message_id - - -@dataclass -class _RunError(Exception): - """Exception raised for errors during agent runs.""" - - message: str - code: str - - def __str__(self) -> str: # pragma: no cover - return self.message - - -@dataclass -class _NoMessagesError(_RunError): - """Exception raised when no messages are found in the input.""" - - message: str = 'no messages found in the input' - code: str = 'no_messages' - - -@dataclass -class _InvalidStateError(_RunError, ValidationError): - """Exception raised when an invalid state is provided.""" - - message: str = 'invalid state provided' - code: str = 'invalid_state' - - + from .ui.ag_ui import AGUIAdapter + + adapter = AGUIAdapter(agent=agent) + async for event_str in adapter.run_stream_sse( + request=run_input, + accept=accept, + output_type=output_type, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ): + yield event_str + + +# _ToolCallNotFoundError is defined here (not in ui/ag_ui) since it's specific to this module class _ToolCallNotFoundError(_RunError, ValueError): """Exception raised when an tool result is present without a matching call.""" @@ -789,21 +294,3 @@ def __init__(self, tool_call_id: str) -> None: message=f'Tool call with ID {tool_call_id} not found in the history.', code='tool_call_not_found', ) - - -class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): - def __init__(self, tools: list[AGUITool]): - super().__init__( - [ - ToolDefinition( - name=tool.name, - description=tool.description, - parameters_json_schema=tool.parameters, - ) - for tool in tools - ] - ) - - @property - def label(self) -> str: - return 'the AG-UI frontend tools' # pragma: no cover diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py new file mode 100644 index 0000000000..dec4830efe --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -0,0 +1,457 @@ +"""Base classes for UI event stream protocols. + +This module provides abstract base classes for implementing UI event stream adapters +that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). +""" + +# pyright: reportIncompatibleMethodOverride=false, reportUnknownVariableType=false, reportGeneralTypeIssues=false + +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator +from typing import Any, Generic, Protocol, TypeVar, runtime_checkable +from uuid import uuid4 + +from ..messages import ( + AgentStreamEvent, + BuiltinToolCallEvent, # type: ignore[reportDeprecated] + BuiltinToolCallPart, + BuiltinToolResultEvent, # type: ignore[reportDeprecated] + BuiltinToolReturnPart, + FilePart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + ModelMessage, + PartDeltaEvent, + PartStartEvent, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, +) +from ..run import AgentRunResultEvent +from ..tools import AgentDepsT + +__all__ = [ + 'SSEEvent', + 'BaseEventStream', + 'BaseAdapter', +] + +EventT = TypeVar('EventT', bound='SSEEvent') +"""Type variable for protocol-specific event types.""" + + +@runtime_checkable +class SSEEvent(Protocol): + """Protocol for events that can be encoded as Server-Sent Events (SSE).""" + + def sse(self) -> str: + """Encode event as a Server-Sent Event string. + + Returns: + The SSE-formatted string representation of the event. + """ + ... + + +class BaseEventStream(ABC, Generic[EventT, AgentDepsT]): + """Base class for transforming pAI agent events into protocol-specific events. + + This class provides a granular method-per-part-type pattern that makes it easy to customize + specific event transformations by overriding individual handler methods. + + Subclasses should: + 1. Initialize state attributes in `__init__` if needed + 2. Implement all abstract `handle_*` methods for event transformation + 3. Implement error handling methods + 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) + + Example: + ```python + class MyEventStream(BaseEventStream[MyEvent, MyDeps]): + def __init__(self): + super().__init__() + self.custom_state = {} + + async def handle_text_start(self, part: TextPart): + yield MyTextStartEvent(id=self.new_message_id(), text=part.content) + ``` + """ + + def __init__(self) -> None: + """Initialize event stream state. + + Subclasses can add additional state attributes for tracking streaming context. + """ + self.message_id: str = '' + + def new_message_id(self) -> str: + """Generate and store a new message ID. + + Returns: + A new UUID-based message ID. + """ + self.message_id = str(uuid4()) + return self.message_id + + async def agent_event_to_events(self, event: AgentStreamEvent | AgentRunResultEvent) -> AsyncIterator[EventT]: # noqa: C901 + """Transform a pAI agent event into protocol-specific events. + + This method dispatches to specific `handle_*` methods based on event and part type. + Subclasses should implement the individual handler methods rather than overriding this. + + Args: + event: The pAI agent event to transform. + + Yields: + Protocol-specific events. + """ + match event: + case PartStartEvent(part=part): + # Dispatch based on part type + match part: + case TextPart(): + async for e in self.handle_text_start(part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_start(part): + yield e + case ToolCallPart() | BuiltinToolCallPart(): + async for e in self.handle_tool_call_start(part): + yield e + case BuiltinToolReturnPart(): + async for e in self.handle_builtin_tool_return(part): + yield e + case FilePart(): + # FilePart is not currently handled by UI protocols + pass + case PartDeltaEvent(delta=delta): + # Dispatch based on delta type + match delta: + case TextPartDelta(): + async for e in self.handle_text_delta(delta): + yield e + case ThinkingPartDelta(): + async for e in self.handle_thinking_delta(delta): + yield e + case ToolCallPartDelta(): + async for e in self.handle_tool_call_delta(delta): + yield e + case FunctionToolCallEvent(): + async for e in self.handle_function_tool_call(event): + yield e + case FunctionToolResultEvent(): + async for e in self.handle_function_tool_result(event): + yield e + case BuiltinToolCallEvent(): # type: ignore[reportDeprecated] + async for e in self.handle_builtin_tool_call(event): + yield e + case BuiltinToolResultEvent(): # type: ignore[reportDeprecated] + async for e in self.handle_builtin_tool_result(event): + yield e + case FinalResultEvent(): + async for e in self.handle_final_result(event): + yield e + case AgentRunResultEvent(): + async for e in self.handle_run_result(event): + yield e + + # Granular part handlers (abstract - must implement) + + @abstractmethod + async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: + """Handle a TextPart at start. + + Args: + part: The TextPart. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: + """Handle a TextPartDelta. + + Args: + delta: The TextPartDelta. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: + """Handle a ThinkingPart at start. + + Args: + part: The ThinkingPart. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: + """Handle a ThinkingPartDelta. + + Args: + delta: The ThinkingPartDelta. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle a ToolCallPart or BuiltinToolCallPart at start. + + Args: + part: The tool call part. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: + """Handle a ToolCallPartDelta. + + Args: + delta: The ToolCallPartDelta. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: + """Handle a BuiltinToolReturnPart. + + Args: + part: The BuiltinToolReturnPart. + + Yields: + Protocol-specific events. + """ + + # Tool event handlers (abstract - must implement) + + @abstractmethod + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolCallEvent. + + Args: + event: The function tool call event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolResultEvent. + + Args: + event: The function tool result event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolCallEvent. + + Args: + event: The builtin tool call event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolResultEvent. + + Args: + event: The builtin tool result event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: + """Handle a FinalResultEvent. + + Args: + event: The final result event. + + Yields: + Protocol-specific events. + """ + + @abstractmethod + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: + """Handle an AgentRunResultEvent (final event with result/usage). + + Args: + event: The agent run result event. + + Yields: + Protocol-specific events. + """ + + # Lifecycle hooks (optional overrides) + + async def before_stream(self) -> AsyncIterator[EventT]: + """Yield events before agent streaming starts. + + This hook is called before any agent events are processed. + Override this to inject custom events at the start of the stream. + + Yields: + Protocol-specific events to emit before streaming. + """ + return + yield # Make this an async generator + + async def after_stream(self) -> AsyncIterator[EventT]: + """Yield events after agent streaming completes. + + This hook is called after all agent events have been processed. + Override this to inject custom events at the end of the stream. + + Yields: + Protocol-specific events to emit after streaming. + """ + return + yield # Make this an async generator + + # Error handling (must implement) + + @abstractmethod + async def on_validation_error(self, error: Exception) -> AsyncIterator[EventT]: + """Handle validation errors that occur before streaming starts. + + Args: + error: The validation error that occurred. + + Yields: + Protocol-specific error events. + """ + + @abstractmethod + async def on_stream_error(self, error: Exception) -> AsyncIterator[EventT]: + """Handle errors that occur during streaming (after stream has started). + + Args: + error: The error that occurred during streaming. + + Yields: + Protocol-specific error events. + """ + + +RequestT = TypeVar('RequestT') +"""Type variable for protocol-specific request types.""" + +MessageT = TypeVar('MessageT') +"""Type variable for protocol-specific message types.""" + + +class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): + """Base adapter for handling UI protocol requests and streaming responses. + + This class provides a unified interface for request/response handling across different + UI protocols (AG-UI, Vercel AI, etc.). It handles: + - Request parsing and validation + - Message format conversion (protocol messages → pAI messages) + - Agent execution and event streaming + - Error handling (validation errors vs streaming errors) + - SSE encoding + + Type Parameters: + RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) + MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) + EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, AbstractSSEChunk) + AgentDepsT: Agent dependencies type + + Example: + ```python + class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): + def create_event_stream(self) -> BaseEventStream[MyEvent, MyDeps]: + return MyEventStream() + + def parse_request_messages(self, request: MyRequest) -> list[MyMessage]: + return request.messages + + def protocol_messages_to_pai_messages(self, messages: list[MyMessage]) -> list[ModelMessage]: + # Convert protocol messages to pAI messages + ... + ``` + """ + + @abstractmethod + def create_event_stream(self) -> BaseEventStream[EventT, AgentDepsT]: + """Create a new event stream for this protocol. + + Returns: + A protocol-specific event stream instance. + """ + + @abstractmethod + def parse_request_messages(self, request: RequestT) -> list[MessageT]: + """Extract messages from the protocol request. + + Args: + request: The protocol-specific request. + + Returns: + List of protocol-specific messages. + """ + + @abstractmethod + def protocol_messages_to_pai_messages(self, messages: list[MessageT]) -> list[ModelMessage]: + """Convert protocol messages to Pydantic AI messages. + + Args: + messages: List of protocol-specific messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + + @abstractmethod + def encode_event(self, event: EventT) -> str: + """Encode a protocol event as an SSE string. + + Args: + event: The protocol-specific event. + + Returns: + SSE-formatted string. + """ + + @abstractmethod + async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: + """Handle a request and return a response. + + This method should handle the full request/response cycle: + - Parse and validate the request + - Run the agent with the request data + - Return an appropriate response (e.g., StreamingResponse, EventSourceResponse) + + Args: + request: The protocol-specific request object (e.g., Starlette Request). + deps: Optional dependencies to pass to the agent. + + Returns: + A protocol-specific response object. + """ diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py new file mode 100644 index 0000000000..620b74cd28 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -0,0 +1,12 @@ +"""AG-UI protocol integration for Pydantic AI agents.""" + +from .adapter import AGUIAdapter +from .event_stream import AGUIEventStream, StateDeps, StateHandler, protocol_messages_to_pai_messages + +__all__ = [ + 'AGUIAdapter', + 'AGUIEventStream', + 'StateHandler', + 'StateDeps', + 'protocol_messages_to_pai_messages', +] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py new file mode 100644 index 0000000000..e6ffa784f3 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py @@ -0,0 +1,334 @@ +"""AG-UI adapter for handling requests.""" + +# pyright: reportGeneralTypeIssues=false, reportInvalidTypeArguments=false + +from __future__ import annotations + +import json +from dataclasses import dataclass +from http import HTTPStatus +from typing import TYPE_CHECKING, Any + +from pydantic import BaseModel, ValidationError + +from ...tools import AgentDepsT +from .event_stream import ( + AGUIEventStream, + RunAgentInput, + StateHandler, + _AGUIFrontendToolset, # type: ignore[reportPrivateUsage] + _InvalidStateError, # type: ignore[reportPrivateUsage] + _NoMessagesError, # type: ignore[reportPrivateUsage] + _RunError, # type: ignore[reportPrivateUsage] + protocol_messages_to_pai_messages, +) + +if TYPE_CHECKING: + from ...agent import Agent + +__all__ = ['AGUIAdapter'] + + +@dataclass +class AGUIAdapter: + """Adapter for handling AG-UI protocol requests with Pydantic AI agents. + + This adapter provides an interface for integrating Pydantic AI agents + with the AG-UI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.ag_ui import AGUIAdapter + + agent = Agent('openai:gpt-4') + adapter = AGUIAdapter(agent) + + async def handle_request(request: RunAgentInput, deps=None): + async for event_str in adapter.run_stream_sse(request, deps): + yield event_str + ``` + """ + + agent: Agent[AgentDepsT] + """The Pydantic AI agent to run.""" + + async def run_stream( # noqa: C901 + self, + request: RunAgentInput, + deps: AgentDepsT | None = None, + *, + output_type: Any = None, + model: Any = None, + model_settings: Any = None, + usage_limits: Any = None, + usage: Any = None, + infer_name: bool = True, + toolsets: Any = None, + on_complete: Any = None, + ): + """Stream events from an agent run as AG-UI protocol events. + + This method provides a complete implementation with all AG-UI features including: + - Frontend tools handling + - State injection + - Error handling (validation vs stream errors) + - on_complete callback + - RunStarted and RunFinished events + + Args: + request: The AG-UI request data. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Yields: + AG-UI protocol events (BaseEvent subclasses). + + Raises: + _RunError: If request validation fails or other errors occur. + """ + from ... import _utils + from ...exceptions import UserError + from ...tools import DeferredToolRequests + from .event_stream import RunFinishedEvent, RunStartedEvent + + # Create event stream + event_stream = self.create_event_stream() + stream_started = False + + # Handle frontend tools + if request.tools: + toolset = _AGUIFrontendToolset[AgentDepsT](request.tools) + toolsets = [*toolsets, toolset] if toolsets else [toolset] + + try: + # Emit start event + yield RunStartedEvent( + thread_id=request.thread_id, + run_id=request.run_id, + ) + stream_started = True + + if not request.messages: + raise _NoMessagesError + + # Handle state injection + raw_state: dict[str, Any] = request.state or {} + if isinstance(deps, StateHandler): + if isinstance(deps.state, BaseModel): + try: + state = type(deps.state).model_validate(raw_state) + except ValidationError as e: # pragma: no cover + raise _InvalidStateError from e + else: + state = raw_state + + from dataclasses import replace + + deps = replace(deps, state=state) + elif raw_state: + raise UserError( + f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' + ) + + # Convert AG-UI messages to pAI messages + messages = protocol_messages_to_pai_messages(request.messages) + + # Run agent and stream events + result = None + async for event in self.agent.run_stream_events( + user_prompt=None, + output_type=[output_type or self.agent.output_type, DeferredToolRequests], + message_history=messages, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + ): + from ...run import AgentRunResultEvent + + # Capture result for on_complete callback + if isinstance(event, AgentRunResultEvent): + result = event.result + + # Transform pAI events to AG-UI events + async for ag_ui_event in event_stream.agent_event_to_events(event): # type: ignore[arg-type] + yield ag_ui_event + + # Call on_complete callback + if on_complete is not None and result is not None: + if _utils.is_async_callable(on_complete): + await on_complete(result) + else: + await _utils.run_in_executor(on_complete, result) + + except _RunError as e: + if stream_started: + async for error_event in event_stream.on_stream_error(e): + yield error_event + else: + async for error_event in event_stream.on_validation_error(e): + yield error_event + raise + except Exception as e: + if stream_started: + async for error_event in event_stream.on_stream_error(e): + yield error_event + else: + async for error_event in event_stream.on_validation_error(e): + yield error_event + raise + else: + # Emit finish event + yield RunFinishedEvent( + thread_id=request.thread_id, + run_id=request.run_id, + ) + + async def run_stream_sse( + self, + request: RunAgentInput, + accept: str, + *, + output_type: Any = None, + model: Any = None, + deps: AgentDepsT | None = None, + model_settings: Any = None, + usage_limits: Any = None, + usage: Any = None, + infer_name: bool = True, + toolsets: Any = None, + on_complete: Any = None, + ): + """Stream SSE-encoded events from an agent run. + + This method wraps `run_stream` and encodes the events as SSE strings. + + Args: + request: The AG-UI request data. + accept: The accept header value for encoding. + output_type: Custom output type for this run. + model: Optional model to use for this run. + deps: Optional dependencies to pass to the agent. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Yields: + SSE-formatted strings. + """ + from ag_ui.encoder import EventEncoder + + encoder = EventEncoder(accept=accept) + + try: + async for event in self.run_stream( + request=request, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ): + yield encoder.encode(event) + except _RunError: + # Error events are already yielded by run_stream + # This shouldn't actually be reached since run_stream yields error events before raising + pass + except Exception: + # Let other exceptions propagate + raise + + async def dispatch_request( + self, + request: Any, + deps: AgentDepsT | None = None, + *, + output_type: Any = None, + model: Any = None, + model_settings: Any = None, + usage_limits: Any = None, + usage: Any = None, + infer_name: bool = True, + toolsets: Any = None, + on_complete: Any = None, + ) -> Any: + """Handle an AG-UI request and return a streaming response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Returns: + A streaming Starlette response with AG-UI protocol events. + """ + try: + from starlette.requests import Request + from starlette.responses import Response, StreamingResponse + except ImportError as e: # pragma: no cover + raise ImportError('Please install starlette to use dispatch_request') from e + + if not isinstance(request, Request): # pragma: no cover + raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') + + accept = request.headers.get('accept', 'text/event-stream') + + try: + input_data = RunAgentInput.model_validate(await request.json()) + except ValidationError as e: # pragma: no cover + return Response( + content=json.dumps(e.json()), + media_type='application/json', + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, + ) + + return StreamingResponse( + self.run_stream_sse( + request=input_data, + accept=accept, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ), + media_type=accept, + ) + + def create_event_stream(self) -> AGUIEventStream[AgentDepsT]: + """Create a new AG-UI event stream. + + Returns: + An AGUIEventStream instance. + """ + return AGUIEventStream[AgentDepsT]() diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py new file mode 100644 index 0000000000..5ce69abfda --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py @@ -0,0 +1,548 @@ +"""AG-UI protocol adapter for Pydantic AI agents. + +This module provides classes for integrating Pydantic AI agents with the AG-UI protocol, +enabling streaming event-based communication for interactive AI applications. +""" + +# pyright: reportIncompatibleMethodOverride=false, reportUnusedClass=false, reportGeneralTypeIssues=false, reportInvalidTypeArguments=false + +from __future__ import annotations + +import json +from collections.abc import AsyncIterator, Iterable +from dataclasses import Field, dataclass +from typing import TYPE_CHECKING, Any, ClassVar, Final, Generic, Protocol, TypeVar, runtime_checkable + +if TYPE_CHECKING: + pass # Agent type is not actually used in this module + +from pydantic import BaseModel, ValidationError + +from ...messages import ( + BuiltinToolCallEvent, # type: ignore[reportDeprecated] + BuiltinToolCallPart, + BuiltinToolResultEvent, # type: ignore[reportDeprecated] + BuiltinToolReturnPart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + ModelMessage, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, + ToolReturnPart, +) +from ...run import AgentRunResultEvent +from ...tools import AgentDepsT, ToolDefinition +from ...toolsets.external import ExternalToolset +from .. import BaseEventStream + +try: + from ag_ui.core import ( + AssistantMessage, + BaseEvent, + DeveloperMessage, + EventType, + Message, + RunAgentInput, + RunErrorEvent, + RunFinishedEvent, + RunStartedEvent, + SystemMessage, + TextMessageContentEvent, + TextMessageEndEvent, + TextMessageStartEvent, + ThinkingEndEvent, + ThinkingStartEvent, + ThinkingTextMessageContentEvent, + ThinkingTextMessageEndEvent, + ThinkingTextMessageStartEvent, + Tool as AGUITool, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallResultEvent, + ToolCallStartEvent, + ToolMessage, + UserMessage, + ) +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +__all__ = [ + 'AGUIEventStream', + 'StateHandler', + 'StateDeps', + 'protocol_messages_to_pai_messages', + '_AGUIFrontendToolset', + '_NoMessagesError', + '_InvalidStateError', + '_RunError', + 'RunAgentInput', + 'RunStartedEvent', + 'RunFinishedEvent', +] + +_BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' + + +# State management types + +StateT = TypeVar('StateT', bound=BaseModel) +"""Type variable for the state type, which must be a subclass of `BaseModel`.""" + + +@runtime_checkable +class StateHandler(Protocol): + """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" + + # Has to be a dataclass so we can use `replace` to update the state. + # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 + __dataclass_fields__: ClassVar[dict[str, Field[Any]]] + + @property + def state(self) -> Any: + """Get the current state of the agent run.""" + ... + + @state.setter + def state(self, state: Any) -> None: + """Set the state of the agent run. + + This method is called to update the state of the agent run with the + provided state. + + Args: + state: The run state. + + Raises: + InvalidStateError: If `state` does not match the expected model. + """ + ... + + +@dataclass +class StateDeps(Generic[StateT]): + """Provides AG-UI state management. + + This class is used to manage the state of an agent run. It allows setting + the state of the agent run with a specific type of state model, which must + be a subclass of `BaseModel`. + + The state is set using the `state` setter by the `Adapter` when the run starts. + + Implements the `StateHandler` protocol. + """ + + state: StateT + + +# Error types + + +@dataclass +class _RunError(Exception): + """Exception raised for errors during agent runs.""" + + message: str + code: str + + def __str__(self) -> str: # pragma: no cover + return self.message + + +@dataclass +class _NoMessagesError(_RunError): + """Exception raised when no messages are found in the input.""" + + message: str = 'no messages found in the input' + code: str = 'no_messages' + + +@dataclass +class _InvalidStateError(_RunError, ValidationError): + """Exception raised when an invalid state is provided.""" + + message: str = 'invalid state provided' + code: str = 'invalid_state' + + +class _ToolCallNotFoundError(_RunError, ValueError): + """Exception raised when an tool result is present without a matching call.""" + + def __init__(self, tool_call_id: str) -> None: + """Initialize the exception with the tool call ID.""" + super().__init__( # pragma: no cover + message=f'Tool call with ID {tool_call_id} not found in the history.', + code='tool_call_not_found', + ) + + +# Frontend toolset + + +class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): + """Toolset for AG-UI frontend tools.""" + + def __init__(self, tools: list[AGUITool]): + """Initialize the toolset with AG-UI tools. + + Args: + tools: List of AG-UI tool definitions. + """ + super().__init__( + [ + ToolDefinition( + name=tool.name, + description=tool.description, + parameters_json_schema=tool.parameters, + ) + for tool in tools + ] + ) + + @property + def label(self) -> str: + """Return the label for this toolset.""" + return 'the AG-UI frontend tools' # pragma: no cover + + +class AGUIEventStream(BaseEventStream[BaseEvent, AgentDepsT]): + """Transforms Pydantic AI agent events into AG-UI protocol events. + + This class handles the stateful transformation of streaming agent events + into the AG-UI protocol format, managing message IDs, thinking mode state, + and tool call ID mappings for builtin tools. + + Example: + ```python + event_stream = AGUIEventStream() + async for ag_ui_event in event_stream.agent_event_to_events(pai_event): + print(ag_ui_event) + ``` + """ + + def __init__(self) -> None: + """Initialize AG-UI event stream state.""" + super().__init__() + self.part_end: BaseEvent | None = None + self.thinking: bool = False + self.builtin_tool_call_ids: dict[str, str] = {} + + # Granular handlers implementation + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: + """Handle a TextPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + message_id = self.new_message_id() + yield TextMessageStartEvent(message_id=message_id) + if part.content: # pragma: no branch + yield TextMessageContentEvent(message_id=message_id, delta=part.content) + self.part_end = TextMessageEndEvent(message_id=message_id) + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a TextPartDelta.""" + if delta.content_delta: # pragma: no branch + yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if not self.thinking: + yield ThinkingStartEvent(type=EventType.THINKING_START) + self.thinking = True + + if part.content: + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPartDelta.""" + if delta.content_delta: # pragma: no branch + if not isinstance(self.part_end, ThinkingTextMessageEndEvent): + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + yield ThinkingTextMessageContentEvent( + type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta + ) + + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + tool_call_id = part.tool_call_id + if isinstance(part, BuiltinToolCallPart): + builtin_tool_call_id = '|'.join([_BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + + message_id = self.message_id or self.new_message_id() + yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) + if part.args: + yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) + self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPartDelta.""" + tool_call_id = delta.tool_call_id + assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' + if tool_call_id in self.builtin_tool_call_ids: + tool_call_id = self.builtin_tool_call_ids[tool_call_id] + yield ToolCallArgsEvent( + tool_call_id=tool_call_id, + delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), + ) + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolReturnPart.""" + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=tool_call_id, + content=part.model_response_str(), + ) + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolCallEvent. + + This event is emitted when a function tool is called, but no AG-UI events + are needed at this stage since tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolResultEvent, emitting tool result events.""" + result = event.result + if not isinstance(result, ToolReturnPart): + return + + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=result.tool_call_id, + content=result.model_response_str(), + ) + + # Check for AG-UI events returned by tool calls. + possible_event = result.metadata or result.content + if isinstance(possible_event, BaseEvent): + yield possible_event + elif isinstance(possible_event, str | bytes): # pragma: no branch + # Avoid iterable check for strings and bytes. + pass + elif isinstance(possible_event, Iterable): # pragma: no branch + for item in possible_event: # type: ignore[reportUnknownMemberType] + if isinstance(item, BaseEvent): # pragma: no branch + yield item + + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolCallEvent. + + This event is emitted when a builtin tool is called, but no AG-UI events + are needed at this stage since builtin tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolResultEvent. + + This event is emitted when a builtin tool returns. We need to emit any pending + part_end event (TOOL_CALL_END) before the result is shown in handle_builtin_tool_return. + """ + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FinalResultEvent. + + This event is emitted when the agent produces a final result, but no AG-UI events + are needed at this stage. + """ + return + yield # Make this an async generator + + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[BaseEvent]: + """Handle an AgentRunResultEvent, cleaning up any pending state.""" + # Emit any pending part end event + if self.part_end: # pragma: no branch + yield self.part_end + self.part_end = None + + # End thinking mode if still active + if self.thinking: + yield ThinkingEndEvent( + type=EventType.THINKING_END, + ) + self.thinking = False + + async def on_validation_error(self, error: Exception) -> AsyncIterator[BaseEvent]: + """Handle validation errors before stream starts.""" + # Try to get code from exception if it has one, otherwise use class name + code = getattr(error, 'code', error.__class__.__name__) + yield RunErrorEvent(message=str(error), code=code) + + async def on_stream_error(self, error: Exception) -> AsyncIterator[BaseEvent]: + """Handle errors during streaming.""" + # Try to get code from exception if it has one, otherwise use class name + code = getattr(error, 'code', error.__class__.__name__) + yield RunErrorEvent(message=str(error), code=code) + + def encode_event(self, event: BaseEvent, accept: str) -> str: + """Encode an AG-UI event as SSE. + + Args: + event: The AG-UI event to encode. + accept: The accept header value for encoding format. + + Returns: + The SSE-formatted string. + """ + from ag_ui.encoder import EventEncoder + + encoder = EventEncoder(accept=accept) + return encoder.encode(event) + + +def protocol_messages_to_pai_messages(messages: list[Message]) -> list[ModelMessage]: + """Convert AG-UI messages to Pydantic AI messages. + + Args: + messages: List of AG-UI messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + from ...messages import ( + ModelRequest, + ModelRequestPart, + ModelResponse, + ModelResponsePart, + SystemPromptPart, + UserPromptPart, + ) + + result: list[ModelMessage] = [] + tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. + request_parts: list[ModelRequestPart] | None = None + response_parts: list[ModelResponsePart] | None = None + + for msg in messages: + if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( + isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if request_parts is None: + request_parts = [] + result.append(ModelRequest(parts=request_parts)) + response_parts = None + + if isinstance(msg, UserMessage): + request_parts.append(UserPromptPart(content=msg.content)) + elif isinstance(msg, SystemMessage | DeveloperMessage): + request_parts.append(SystemPromptPart(content=msg.content)) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + + request_parts.append( + ToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + ) + ) + + elif isinstance(msg, AssistantMessage) or ( # pragma: no branch + isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if response_parts is None: + response_parts = [] + result.append(ModelResponse(parts=response_parts)) + request_parts = None + + if isinstance(msg, AssistantMessage): + if msg.content: + response_parts.append(TextPart(content=msg.content)) + + if msg.tool_calls: + for tool_call in msg.tool_calls: + tool_call_id = tool_call.id + tool_name = tool_call.function.name + tool_calls[tool_call_id] = tool_name + + if tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX): + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + response_parts.append( + BuiltinToolCallPart( + tool_name=tool_name, + args=tool_call.function.arguments, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + else: + response_parts.append( + ToolCallPart( + tool_name=tool_name, + tool_call_id=tool_call_id, + args=tool_call.function.arguments, + ) + ) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + + response_parts.append( + BuiltinToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + + return result diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py new file mode 100644 index 0000000000..f004e15579 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -0,0 +1,94 @@ +"""Vercel AI protocol adapter for Pydantic AI agents. + +This module provides classes for integrating Pydantic AI agents with the Vercel AI protocol, +enabling streaming event-based communication for interactive AI applications. + +Converted to Python from: +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +""" + +from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata +from .adapter import VercelAIAdapter +from .event_stream import VercelAIEventStream, protocol_messages_to_pai_messages +from .request_types import ( + RequestData, + SubmitMessage, + TextUIPart, + ToolOutputAvailablePart, + UIMessage, + UIPart, + request_data_ta, +) +from .response_types import ( + AbortChunk, + AbstractSSEChunk, + DataUIMessageChunk, + DoneChunk, + ErrorChunk, + FileChunk, + FinishChunk, + FinishStepChunk, + MessageMetadataChunk, + ReasoningDeltaChunk, + ReasoningEndChunk, + ReasoningStartChunk, + SourceDocumentChunk, + SourceUrlChunk, + StartChunk, + StartStepChunk, + TextDeltaChunk, + TextEndChunk, + TextStartChunk, + ToolInputAvailableChunk, + ToolInputDeltaChunk, + ToolInputErrorChunk, + ToolInputStartChunk, + ToolOutputAvailableChunk, + ToolOutputErrorChunk, +) + +__all__ = [ + # Utilities + 'CamelBaseModel', + 'ProviderMetadata', + 'JSONValue', + 'VERCEL_AI_DSP_HEADERS', + # Request types + 'RequestData', + 'TextUIPart', + 'UIMessage', + 'ToolOutputAvailablePart', + 'UIPart', + 'SubmitMessage', + 'request_data_ta', + # Response types + 'AbstractSSEChunk', + 'TextStartChunk', + 'TextDeltaChunk', + 'TextEndChunk', + 'ReasoningStartChunk', + 'ReasoningDeltaChunk', + 'ReasoningEndChunk', + 'ErrorChunk', + 'ToolInputStartChunk', + 'ToolInputDeltaChunk', + 'ToolInputAvailableChunk', + 'ToolInputErrorChunk', + 'ToolOutputAvailableChunk', + 'ToolOutputErrorChunk', + 'SourceUrlChunk', + 'SourceDocumentChunk', + 'FileChunk', + 'DataUIMessageChunk', + 'StartStepChunk', + 'FinishStepChunk', + 'StartChunk', + 'FinishChunk', + 'AbortChunk', + 'MessageMetadataChunk', + 'DoneChunk', + # Event stream and adapter + 'VercelAIEventStream', + 'VercelAIAdapter', + 'protocol_messages_to_pai_messages', +] diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py new file mode 100644 index 0000000000..d3dfb4e8cd --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py @@ -0,0 +1,26 @@ +"""Utilities for Vercel AI protocol. + +Converted to Python from: +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +""" + +from abc import ABC +from typing import Any + +from pydantic import BaseModel, ConfigDict +from pydantic.alias_generators import to_camel + +__all__ = ('CamelBaseModel', 'ProviderMetadata', 'JSONValue', 'VERCEL_AI_DSP_HEADERS') + +# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol +VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} + +# Technically this is recursive union of JSON types; for simplicity, we call it Any +JSONValue = Any +ProviderMetadata = dict[str, dict[str, JSONValue]] + + +class CamelBaseModel(BaseModel, ABC): + """Base model with camelCase aliases.""" + + model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra='forbid') diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py new file mode 100644 index 0000000000..c38b9e365c --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py @@ -0,0 +1,149 @@ +"""Vercel AI adapter for handling requests.""" + +# pyright: reportGeneralTypeIssues=false + +from __future__ import annotations + +from collections.abc import AsyncIterator +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +from ...tools import AgentDepsT +from .event_stream import VercelAIEventStream +from .request_types import RequestData, TextUIPart, UIMessage +from .response_types import AbstractSSEChunk, DoneChunk + +if TYPE_CHECKING: + from ...agent import Agent + +__all__ = ['VercelAIAdapter'] + + +@dataclass +class VercelAIAdapter: + """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. + + This adapter provides a simplified interface for integrating Pydantic AI agents + with the Vercel AI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.vercel_ai import VercelAIAdapter + + agent = Agent('openai:gpt-4') + adapter = VercelAIAdapter(agent) + + async def handle_request(data: RequestData, deps=None): + async for chunk in adapter.run_stream(data, deps): + yield chunk.sse() + ``` + """ + + agent: Agent[AgentDepsT] + """The Pydantic AI agent to run.""" + + async def run_stream( + self, request: RequestData, deps: AgentDepsT | None = None + ) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: + """Stream events from an agent run as Vercel AI protocol events. + + Args: + request: The Vercel AI request data. + deps: Optional dependencies to pass to the agent. + + Yields: + Vercel AI protocol events (AbstractSSEChunk or DoneChunk). + + Raises: + ValueError: If request validation fails. + """ + # Parse messages from request + messages = self.parse_request_messages(request) + + # Extract prompt from last user message (for now, simple implementation) + if not messages: + raise ValueError('no messages provided') + + message = messages[-1] + prompt_parts: list[str] = [] + for part in message.parts: + if isinstance(part, TextUIPart): + prompt_parts.append(part.text) + else: + raise ValueError(f'Only text parts are supported yet, got {part}') + + # Create event stream + event_stream = self.create_event_stream() + + # Run agent and stream events + async for event in self.agent.run_stream_events('\n'.join(prompt_parts), deps=deps): + async for chunk in event_stream.agent_event_to_events(event): + yield chunk + + # Emit after-stream events + async for chunk in event_stream.after_stream(): + yield chunk + + def create_event_stream(self) -> VercelAIEventStream[AgentDepsT]: + """Create a new Vercel AI event stream. + + Returns: + A VercelAIEventStream instance. + """ + return VercelAIEventStream[AgentDepsT]() + + def parse_request_messages(self, request: RequestData) -> list[UIMessage]: + """Extract messages from the Vercel AI request. + + Args: + request: The Vercel AI request data. + + Returns: + List of UIMessage objects. + """ + return request.messages + + async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: + """Handle a request and return a streamed SSE response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: The dependencies for the agent. + + Returns: + A streamed SSE response. + """ + try: + from starlette.requests import Request + from starlette.responses import JSONResponse + except ImportError as e: # pragma: no cover + raise ImportError('Please install starlette to use dispatch_request') from e + + try: + from sse_starlette.sse import EventSourceResponse + except ImportError as e: # pragma: no cover + raise ImportError('Please install sse_starlette to use dispatch_request') from e + + from pydantic import ValidationError + + if not isinstance(request, Request): # pragma: no cover + raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') + + from .request_types import request_data_ta + + try: + data = request_data_ta.validate_json(await request.json()) + + async def run_sse() -> AsyncIterator[str]: + async for chunk in self.run_stream(data, deps=deps): + yield chunk.sse() + + from ._utils import VERCEL_AI_DSP_HEADERS + + return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) + except ValidationError as e: + return JSONResponse({'errors': e.errors()}, status_code=422) + except Exception as e: + return JSONResponse({'errors': str(e)}, status_code=500) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py new file mode 100644 index 0000000000..465a3bb7e8 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py @@ -0,0 +1,235 @@ +"""Vercel AI event stream implementation.""" + +# pyright: reportIncompatibleMethodOverride=false + +from __future__ import annotations + +from collections.abc import AsyncIterator +from typing import Any + +from pydantic_core import to_json + +from ...messages import ( + BuiltinToolCallEvent, # type: ignore[reportDeprecated] + BuiltinToolCallPart, + BuiltinToolResultEvent, # type: ignore[reportDeprecated] + BuiltinToolReturnPart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + ModelMessage, + RetryPromptPart, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, + ToolReturnPart, +) +from ...run import AgentRunResultEvent +from ...tools import AgentDepsT +from .. import BaseEventStream +from .request_types import TextUIPart, UIMessage +from .response_types import ( + AbstractSSEChunk, + DoneChunk, + ErrorChunk, + FinishChunk, + ReasoningDeltaChunk, + ReasoningStartChunk, + TextDeltaChunk, + TextStartChunk, + ToolInputDeltaChunk, + ToolInputStartChunk, + ToolOutputAvailableChunk, +) + +__all__ = ['VercelAIEventStream', 'protocol_messages_to_pai_messages'] + + +def _json_dumps(obj: Any) -> str: + """Dump an object to JSON string.""" + return to_json(obj).decode('utf-8') + + +class VercelAIEventStream(BaseEventStream[AbstractSSEChunk | DoneChunk, AgentDepsT]): + """Transforms Pydantic AI agent events into Vercel AI protocol events. + + This class handles the stateful transformation of streaming agent events + into the Vercel AI protocol format, managing message IDs and final result tool tracking. + + Example: + ```python + event_stream = VercelAIEventStream() + async for vercel_event in event_stream.agent_event_to_events(pai_event): + print(vercel_event.sse()) + ``` + """ + + def __init__(self) -> None: + """Initialize Vercel AI event stream state.""" + super().__init__() + self.new_message_id() # Generate a message ID at initialization + self._final_result_tool_id: str | None = None + + # Granular handlers implementation + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a TextPart at start.""" + yield TextStartChunk(id=self.message_id or self.new_message_id()) + if part.content: + yield TextDeltaChunk(id=self.message_id, delta=part.content) + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[AbstractSSEChunk]: + """Handle a TextPartDelta.""" + if delta.content_delta: + yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ThinkingPart at start.""" + if not self.message_id: + self.new_message_id() + yield ReasoningStartChunk(id=self.message_id) + if part.content: + yield ReasoningDeltaChunk(id=self.message_id, delta=part.content) + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ThinkingPartDelta.""" + if delta.content_delta: + yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) + + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) + if isinstance(part.args, str): + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) + elif part.args is not None: + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[AbstractSSEChunk]: + """Handle a ToolCallPartDelta.""" + tool_call_id = delta.tool_call_id or '' + if isinstance(delta.args_delta, str): + yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=delta.args_delta) + elif delta.args_delta is not None: + yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(delta.args_delta)) + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[AbstractSSEChunk]: + """Handle a BuiltinToolReturnPart.""" + yield ToolOutputAvailableChunk(tool_call_id=part.tool_call_id, output=part.content) + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle a FunctionToolCallEvent. + + No Vercel AI events are emitted at this stage since tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle a FunctionToolResultEvent, emitting tool result events.""" + result = event.result + if isinstance(result, ToolReturnPart): + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + elif isinstance(result, RetryPromptPart): + # For retry prompts, emit the error content as tool output + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolCallEvent, emitting tool input events.""" + part = event.part + yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) + if isinstance(part.args, str): + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) + elif part.args is not None: + yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) + + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + """Handle a BuiltinToolResultEvent, emitting tool output events.""" + result = event.result + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle a FinalResultEvent, tracking the final result tool.""" + if event.tool_call_id and event.tool_name: + self._final_result_tool_id = event.tool_call_id + yield ToolInputStartChunk(tool_call_id=event.tool_call_id, tool_name=event.tool_name) + + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[AbstractSSEChunk]: + """Handle an AgentRunResultEvent. + + No additional Vercel AI events are emitted at this stage. + """ + return + yield # Make this an async generator + + async def after_stream(self) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: + """Yield events after agent streaming completes.""" + # Close the final result tool if there was one + if tool_call_id := self._final_result_tool_id: + yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) + yield FinishChunk() + yield DoneChunk() + + async def on_validation_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: + """Handle validation errors before stream starts.""" + yield ErrorChunk(error_text=str(error)) + + async def on_stream_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: + """Handle errors during streaming.""" + yield ErrorChunk(error_text=str(error)) + + +def protocol_messages_to_pai_messages(messages: list[UIMessage]) -> list[ModelMessage]: + """Convert Vercel AI protocol messages to Pydantic AI messages. + + Args: + messages: List of Vercel AI UIMessage objects. + + Returns: + List of Pydantic AI ModelMessage objects. + + Raises: + ValueError: If message format is not supported. + """ + from ...messages import ModelRequest, ModelResponse, SystemPromptPart, TextPart, UserPromptPart + + pai_messages: list[ModelMessage] = [] + + for msg in messages: + if msg.role == 'user': + # User message - extract text from parts + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + else: + raise ValueError(f'Only text parts are supported for user messages, got {type(part).__name__}') + + if texts: + pai_messages.append(ModelRequest(parts=[UserPromptPart(content='\n'.join(texts))])) + + elif msg.role == 'assistant': + # Assistant message - for now, just extract text + # Full reconstruction of ModelResponse with tool calls would require more complex logic + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + # TODO: Handle ToolOutputAvailablePart for full message history reconstruction + + if texts: + pai_messages.append(ModelResponse(parts=[TextPart(content='\n'.join(texts))])) + + elif msg.role == 'system': + # System message - not in standard Vercel AI protocol but might be custom + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + + if texts: + pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) + + return pai_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py new file mode 100644 index 0000000000..ea30436378 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py @@ -0,0 +1,70 @@ +"""Vercel AI request types (UI messages). + +Converted to Python from: +https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +""" + +from typing import Any, Literal + +from pydantic import TypeAdapter + +from ._utils import CamelBaseModel, ProviderMetadata + +__all__ = [ + 'TextUIPart', + 'ToolOutputAvailablePart', + 'UIPart', + 'UIMessage', + 'SubmitMessage', + 'RequestData', + 'request_data_ta', +] + + +class TextUIPart(CamelBaseModel): + """A text part of a message.""" + + type: Literal['text'] = 'text' + text: str + state: Literal['streaming', 'done'] | None = None + provider_metadata: ProviderMetadata | None = None + + +class ToolOutputAvailablePart(CamelBaseModel): + """Tool output available part.""" + + type: str # f"tool-{tool_name}" + tool_call_id: str + state: Literal['output-available'] = 'output-available' + input: Any + output: Any + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + preliminary: bool | None = None + + +# Since ToolOutputAvailablePart has a dynamic type field, we can't use Discriminator +UIPart = TextUIPart | ToolOutputAvailablePart + + +class UIMessage(CamelBaseModel): + """A message in the UI protocol.""" + + id: str + role: Literal['user', 'assistant', 'system'] + metadata: dict[str, Any] | None = None + parts: list[UIPart] + + +class SubmitMessage(CamelBaseModel): + """Submit message request.""" + + trigger: Literal['submit-message'] = 'submit-message' + id: str + messages: list[UIMessage] + + +RequestData = SubmitMessage + +# Type adapter for parsing requests +request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py similarity index 81% rename from pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py index 8538dd9680..09b82c3a99 100644 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py @@ -1,19 +1,47 @@ -"""Convert to Python from. +"""Vercel AI response types (SSE chunks). +Converted to Python from: https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts - -Mostly with Claude. """ from typing import Any, Literal from ._utils import CamelBaseModel, ProviderMetadata +__all__ = [ + 'AbstractSSEChunk', + 'TextStartChunk', + 'TextDeltaChunk', + 'TextEndChunk', + 'ReasoningStartChunk', + 'ReasoningDeltaChunk', + 'ReasoningEndChunk', + 'ErrorChunk', + 'ToolInputStartChunk', + 'ToolInputDeltaChunk', + 'ToolInputAvailableChunk', + 'ToolInputErrorChunk', + 'ToolOutputAvailableChunk', + 'ToolOutputErrorChunk', + 'SourceUrlChunk', + 'SourceDocumentChunk', + 'FileChunk', + 'DataUIMessageChunk', + 'StartStepChunk', + 'FinishStepChunk', + 'StartChunk', + 'FinishChunk', + 'AbortChunk', + 'MessageMetadataChunk', + 'DoneChunk', +] + class AbstractSSEChunk(CamelBaseModel): - """Abstract base class for response SSE even.""" + """Abstract base class for response SSE events.""" def sse(self) -> str: + """Encode as SSE format.""" return self.model_dump_json(exclude_none=True, by_alias=True) @@ -74,29 +102,22 @@ class ErrorChunk(AbstractSSEChunk): error_text: str -class ToolInputAvailableChunk(AbstractSSEChunk): - """Tool input available chunk.""" +class ToolInputStartChunk(AbstractSSEChunk): + """Tool input start chunk.""" - type: Literal['tool-input-available'] = 'tool-input-available' + type: Literal['tool-input-start'] = 'tool-input-start' tool_call_id: str tool_name: str - input: Any provider_executed: bool | None = None - provider_metadata: ProviderMetadata | None = None dynamic: bool | None = None -class ToolInputErrorChunk(AbstractSSEChunk): - """Tool input error chunk.""" +class ToolInputDeltaChunk(AbstractSSEChunk): + """Tool input delta chunk.""" - type: Literal['tool-input-error'] = 'tool-input-error' + type: Literal['tool-input-delta'] = 'tool-input-delta' tool_call_id: str - tool_name: str - input: Any - provider_executed: bool | None = None - provider_metadata: ProviderMetadata | None = None - dynamic: bool | None = None - error_text: str + input_text_delta: str class ToolOutputAvailableChunk(AbstractSSEChunk): @@ -110,35 +131,48 @@ class ToolOutputAvailableChunk(AbstractSSEChunk): preliminary: bool | None = None -class ToolOutputErrorChunk(AbstractSSEChunk): - """Tool output error chunk.""" +class FinishChunk(AbstractSSEChunk): + """Finish chunk.""" - type: Literal['tool-output-error'] = 'tool-output-error' + type: Literal['finish'] = 'finish' + message_metadata: Any | None = None + + +class ToolInputAvailableChunk(AbstractSSEChunk): + """Tool input available chunk.""" + + type: Literal['tool-input-available'] = 'tool-input-available' tool_call_id: str - error_text: str + tool_name: str + input: Any provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None dynamic: bool | None = None -class ToolInputStartChunk(AbstractSSEChunk): - """Tool input start chunk.""" +class ToolInputErrorChunk(AbstractSSEChunk): + """Tool input error chunk.""" - type: Literal['tool-input-start'] = 'tool-input-start' + type: Literal['tool-input-error'] = 'tool-input-error' tool_call_id: str tool_name: str + input: Any provider_executed: bool | None = None + provider_metadata: ProviderMetadata | None = None dynamic: bool | None = None + error_text: str -class ToolInputDeltaChunk(AbstractSSEChunk): - """Tool input delta chunk.""" +class ToolOutputErrorChunk(AbstractSSEChunk): + """Tool output error chunk.""" - type: Literal['tool-input-delta'] = 'tool-input-delta' + type: Literal['tool-output-error'] = 'tool-output-error' tool_call_id: str - input_text_delta: str + error_text: str + provider_executed: bool | None = None + dynamic: bool | None = None -# Source chunk types class SourceUrlChunk(AbstractSSEChunk): """Source URL chunk.""" @@ -187,7 +221,6 @@ class FinishStepChunk(AbstractSSEChunk): type: Literal['finish-step'] = 'finish-step' -# Message lifecycle chunk types class StartChunk(AbstractSSEChunk): """Start chunk.""" @@ -196,13 +229,6 @@ class StartChunk(AbstractSSEChunk): message_metadata: Any | None = None -class FinishChunk(AbstractSSEChunk): - """Finish chunk.""" - - type: Literal['finish'] = 'finish' - message_metadata: Any | None = None - - class AbortChunk(AbstractSSEChunk): """Abort chunk.""" @@ -214,3 +240,17 @@ class MessageMetadataChunk(AbstractSSEChunk): type: Literal['message-metadata'] = 'message-metadata' message_metadata: Any + + +class DoneChunk: + """Special marker chunk to indicate the end of the SSE stream.""" + + def sse(self) -> str: + """Encode as SSE done marker.""" + return '[DONE]' + + def __str__(self) -> str: + return 'DoneChunk' + + def __eq__(self, other: Any) -> bool: + return isinstance(other, DoneChunk) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py deleted file mode 100644 index 6ef877a235..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/_utils.py +++ /dev/null @@ -1,18 +0,0 @@ -from abc import ABC -from typing import Any - -from pydantic import BaseModel, ConfigDict -from pydantic.alias_generators import to_camel - -__all__ = 'ProviderMetadata', 'CamelBaseModel' - -# technically this is recursive union of JSON types -# for to simplify validation, we call it Any -JSONValue = Any - -# Provider metadata types -ProviderMetadata = dict[str, dict[str, JSONValue]] - - -class CamelBaseModel(BaseModel, ABC): - model_config = ConfigDict(alias_generator=to_camel, populate_by_name=True, extra='forbid') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py deleted file mode 100644 index 4753ca90f5..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/request_types.py +++ /dev/null @@ -1,274 +0,0 @@ -"""Convert to Python from. - -https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts - -Mostly with Claude. -""" - -from typing import Annotated, Any, Literal - -from pydantic import Discriminator, TypeAdapter - -from ._utils import CamelBaseModel, ProviderMetadata - - -class TextUIPart(CamelBaseModel): - """A text part of a message.""" - - type: Literal['text'] = 'text' - - text: str - """The text content.""" - - state: Literal['streaming', 'done'] | None = None - """The state of the text part.""" - - provider_metadata: ProviderMetadata | None = None - """The provider metadata.""" - - -class ReasoningUIPart(CamelBaseModel): - """A reasoning part of a message.""" - - type: Literal['reasoning'] = 'reasoning' - - text: str - """The reasoning text.""" - - state: Literal['streaming', 'done'] | None = None - """The state of the reasoning part.""" - - provider_metadata: ProviderMetadata | None = None - """The provider metadata.""" - - -class SourceUrlUIPart(CamelBaseModel): - """A source part of a message.""" - - type: Literal['source-url'] = 'source-url' - source_id: str - url: str - title: str | None = None - provider_metadata: ProviderMetadata | None = None - - -class SourceDocumentUIPart(CamelBaseModel): - """A document source part of a message.""" - - type: Literal['source-document'] = 'source-document' - source_id: str - media_type: str - title: str - filename: str | None = None - provider_metadata: ProviderMetadata | None = None - - -class FileUIPart(CamelBaseModel): - """A file part of a message.""" - - type: Literal['file'] = 'file' - - media_type: str - """ - IANA media type of the file. - - @see https://www.iana.org/assignments/media-types/media-types.xhtml - """ - - filename: str | None = None - """Optional filename of the file.""" - - url: str - """ - The URL of the file. - It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs). - """ - - provider_metadata: ProviderMetadata | None = None - """The provider metadata.""" - - -class StepStartUIPart(CamelBaseModel): - """A step boundary part of a message.""" - - type: Literal['step-start'] = 'step-start' - - -class DataUIPart(CamelBaseModel): - """Data part with dynamic type based on data name.""" - - type: str # Will be f"data-{NAME}" - id: str | None = None - data: Any - - -# Tool part states as separate models -class ToolInputStreamingPart(CamelBaseModel): - """Tool part in input-streaming state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['input-streaming'] = 'input-streaming' - input: Any | None = None - provider_executed: bool | None = None - - -class ToolInputAvailablePart(CamelBaseModel): - """Tool part in input-available state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['input-available'] = 'input-available' - input: Any - provider_executed: bool | None = None - call_provider_metadata: ProviderMetadata | None = None - - -class ToolOutputAvailablePart(CamelBaseModel): - """Tool part in output-available state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['output-available'] = 'output-available' - input: Any - output: Any - provider_executed: bool | None = None - call_provider_metadata: ProviderMetadata | None = None - preliminary: bool | None = None - - -class ToolOutputErrorPart(CamelBaseModel): - """Tool part in output-error state.""" - - type: str # Will be f"tool-{NAME}" - tool_call_id: str - state: Literal['output-error'] = 'output-error' - input: Any | None = None - raw_input: Any | None = None - error_text: str - provider_executed: bool | None = None - call_provider_metadata: ProviderMetadata | None = None - - -# Union of all tool part states -ToolUIPart = ToolInputStreamingPart | ToolInputAvailablePart | ToolOutputAvailablePart | ToolOutputErrorPart - - -# Dynamic tool part states as separate models -class DynamicToolInputStreamingPart(CamelBaseModel): - """Dynamic tool part in input-streaming state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['input-streaming'] = 'input-streaming' - input: Any | None = None - - -class DynamicToolInputAvailablePart(CamelBaseModel): - """Dynamic tool part in input-available state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['input-available'] = 'input-available' - input: Any - call_provider_metadata: ProviderMetadata | None = None - - -class DynamicToolOutputAvailablePart(CamelBaseModel): - """Dynamic tool part in output-available state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['output-available'] = 'output-available' - input: Any - output: Any - call_provider_metadata: ProviderMetadata | None = None - preliminary: bool | None = None - - -class DynamicToolOutputErrorPart(CamelBaseModel): - """Dynamic tool part in output-error state.""" - - type: Literal['dynamic-tool'] = 'dynamic-tool' - tool_name: str - tool_call_id: str - state: Literal['output-error'] = 'output-error' - input: Any - error_text: str - call_provider_metadata: ProviderMetadata | None = None - - -# Union of all dynamic tool part states -DynamicToolUIPart = ( - DynamicToolInputStreamingPart - | DynamicToolInputAvailablePart - | DynamicToolOutputAvailablePart - | DynamicToolOutputErrorPart -) - - -UIMessagePart = ( - TextUIPart - | ReasoningUIPart - | ToolUIPart - | DynamicToolUIPart - | SourceUrlUIPart - | SourceDocumentUIPart - | FileUIPart - | DataUIPart - | StepStartUIPart -) -"""Union of all message part types.""" - - -class UIMessage(CamelBaseModel): - """A message as displayed in the UI by Vercel AI Elements.""" - - id: str - """A unique identifier for the message.""" - - role: Literal['system', 'user', 'assistant'] - """The role of the message.""" - - metadata: Any | None = None - """The metadata of the message.""" - - parts: list[UIMessagePart] - """ - The parts of the message. Use this for rendering the message in the UI. - - System messages should be avoided (set the system prompt on the server instead). - They can have text parts. - - User messages can have text parts and file parts. - - Assistant messages can have text, reasoning, tool invocation, and file parts. - """ - - -class SubmitMessage(CamelBaseModel, extra='allow'): - """Submit a message to the agent.""" - - trigger: Literal['submit-message'] - id: str - messages: list[UIMessage] - - # TODO (DouweM): Update, make variable? I like `inference_params` from OpenAI ChatKit. - # model: str - # web_search: bool - - -class RegenerateMessage(CamelBaseModel): - """Ask the agent to regenerate a message.""" - - trigger: Literal['regenerate-message'] - id: str - messages: list[UIMessage] - message_id: str - - -RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')] -request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py deleted file mode 100644 index b721192f03..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/response_stream.py +++ /dev/null @@ -1,122 +0,0 @@ -from __future__ import annotations as _annotations - -from collections.abc import AsyncIterator -from dataclasses import dataclass, field -from typing import Any -from uuid import uuid4 - -from pydantic_core import to_json - -from .. import messages -from . import response_types as _t - -__all__ = 'VERCEL_AI_DSP_HEADERS', 'EventStreamer' - -# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol -VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} - - -@dataclass -class EventStreamer: - """Logic for mapping pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE.""" - - message_id: str = field(default_factory=lambda: uuid4().hex) - _final_result_tool_id: str | None = field(default=None, init=False) - - async def event_to_chunks(self, event: messages.AgentStreamEvent) -> AsyncIterator[_t.AbstractSSEChunk]: # noqa C901 - """Convert pydantic-ai events to Vercel AI Elements events which can be streamed to a client over SSE. - - Args: - event: The pydantic-ai event to convert. - - Yields: - An async iterator of Vercel AI Elements events. - """ - match event: - case messages.PartStartEvent(part=part): - match part: - case messages.TextPart(content=content): - yield _t.TextStartChunk(id=self.message_id) - yield _t.TextDeltaChunk(id=self.message_id, delta=content) - case ( - messages.ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) - | messages.BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) - ): - yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) - if isinstance(args, str): - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) - elif args is not None: - yield ( - _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) - ) - - case messages.BuiltinToolReturnPart( - tool_name=tool_name, tool_call_id=tool_call_id, content=content - ): - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) - - case messages.ThinkingPart(content=content): - yield _t.ReasoningStartChunk(id=self.message_id) - yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content) - - case messages.PartDeltaEvent(delta=delta): - match delta: - case messages.TextPartDelta(content_delta=content_delta): - yield _t.TextDeltaChunk(id=self.message_id, delta=content_delta) - case messages.ThinkingPartDelta(content_delta=content_delta): - if content_delta: - yield _t.ReasoningDeltaChunk(id=self.message_id, delta=content_delta) - case messages.ToolCallPartDelta(args_delta=args, tool_call_id=tool_call_id): - tool_call_id = tool_call_id or '' - if isinstance(args, str): - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) - elif args is not None: - yield ( - _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) - ) - case messages.FinalResultEvent(tool_name=tool_name, tool_call_id=tool_call_id): - if tool_call_id and tool_name: - self._final_result_tool_id = tool_call_id - yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) - case messages.FunctionToolCallEvent(): - pass - # print(f'TODO FunctionToolCallEvent {part}') - case messages.FunctionToolResultEvent(result=result): - match result: - case messages.ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) - case messages.RetryPromptPart(tool_name=tool_name, tool_call_id=tool_call_id, content=content): - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=content) - case messages.BuiltinToolCallEvent(part=part): - tool_call_id = part.tool_call_id - tool_name = part.tool_name - args = part.args - yield _t.ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=tool_name) - if isinstance(args, str): - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=args) - elif args is not None: - yield _t.ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(args)) - case messages.BuiltinToolResultEvent(result=result): - yield _t.ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - - async def finish(self) -> AsyncIterator[_t.AbstractSSEChunk | DoneChunk]: - """Send extra messages required to close off the stream.""" - if tool_call_id := self._final_result_tool_id: - yield _t.ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) - yield _t.FinishChunk() - yield DoneChunk() - - -class DoneChunk: - def sse(self) -> str: - return '[DONE]' - - def __str__(self) -> str: - return 'DoneChunk' - - def __eq__(self, other: Any) -> bool: - return isinstance(other, DoneChunk) - - -def _json_dumps(obj: Any) -> str: - return to_json(obj).decode('utf-8') diff --git a/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py b/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py deleted file mode 100644 index 62e0a68964..0000000000 --- a/pydantic_ai_slim/pydantic_ai/vercel_ai/starlette.py +++ /dev/null @@ -1,85 +0,0 @@ -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import Generic - -from pydantic import ValidationError - -from ..agent import Agent -from ..run import AgentRunResultEvent -from ..tools import AgentDepsT -from .request_types import RequestData, TextUIPart, request_data_ta -from .response_stream import VERCEL_AI_DSP_HEADERS, DoneChunk, EventStreamer -from .response_types import AbstractSSEChunk - -try: - from sse_starlette.sse import EventSourceResponse - from starlette.requests import Request - from starlette.responses import JSONResponse, Response -except ImportError as e: - raise ImportError('To use Vercel AI Elements, please install starlette and sse_starlette') from e - - -@dataclass -class StarletteChat(Generic[AgentDepsT]): - """Starlette support for Pydantic AI's Vercel AI Elements integration. - - This can be used with either FastAPI or Starlette apps. - """ - - agent: Agent[AgentDepsT] - - async def dispatch_request(self, request: Request, deps: AgentDepsT) -> Response: - """Handle a request and return a streamed SSE response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - try: - data = request_data_ta.validate_json(await request.json()) - - async def run_sse() -> AsyncIterator[str]: - async for chunk in self.run(data, deps=deps): - yield chunk.sse() - - return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) - except ValidationError as e: - return JSONResponse({'errors': e.errors()}, status_code=422) - except Exception as e: - return JSONResponse({'errors': str(e)}, status_code=500) - - async def run(self, data: RequestData, deps: AgentDepsT = None) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: - """Stream events from an agent run as Vercel AI Elements events. - - Args: - data: The data to run the agent with. - deps: The dependencies to pass to the agent. - - Yields: - An async iterator text lines to stream over SSE. - """ - # TODO (DouweM): Use .model and .builtin_tools - - # TODO: Use entire message history - - if not data.messages: - raise ValueError('no messages provided') - - message = data.messages[-1] - prompt: list[str] = [] - for part in message.parts: - if isinstance(part, TextUIPart): - prompt.append(part.text) - else: - raise ValueError(f'Only text parts are supported yet, got {part}') - - event_streamer = EventStreamer() - async for event in self.agent.run_stream_events('\n'.join(prompt), deps=deps): - if not isinstance(event, AgentRunResultEvent): - async for chunk in event_streamer.event_to_chunks(event): - yield chunk - async for chunk in event_streamer.finish(): - yield chunk diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index cf321274b7..bcb57d9158 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -2,7 +2,6 @@ from __future__ import annotations -import contextlib import json import uuid from collections.abc import AsyncIterator @@ -47,10 +46,11 @@ from pydantic_ai.output import OutputDataT from pydantic_ai.tools import AgentDepsT, ToolDefinition -from .conftest import IsDatetime, IsSameStr +from .conftest import IsDatetime, IsSameStr, try_import has_ag_ui: bool = False -with contextlib.suppress(ImportError): +with try_import() as imports_successful: + has_ag_ui = imports_successful() from ag_ui.core import ( AssistantMessage, CustomEvent, @@ -72,16 +72,14 @@ SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, - _messages_from_ag_ui, # type: ignore[reportPrivateUsage] run_ag_ui, ) - - has_ag_ui = True + from pydantic_ai.ui.ag_ui.event_stream import protocol_messages_to_pai_messages pytestmark = [ pytest.mark.anyio, - pytest.mark.skipif(not has_ag_ui, reason='ag-ui-protocol not installed'), + pytest.mark.skipif(not imports_successful, reason='ag-ui-protocol not installed'), pytest.mark.filterwarnings( 'ignore:`BuiltinToolCallEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolCallPart` instead.:DeprecationWarning' ), @@ -1367,7 +1365,7 @@ def error_callback(run_result: AgentRunResult[Any]) -> None: assert any(event['type'] == 'RUN_ERROR' for event in events) -async def test_messages_from_ag_ui() -> None: +async def test_protocol_messages_to_pai_messages() -> None: messages = [ SystemMessage( id='msg_1', @@ -1450,7 +1448,7 @@ async def test_messages_from_ag_ui() -> None: ), ] - assert _messages_from_ag_ui(messages) == snapshot( + assert protocol_messages_to_pai_messages(messages) == snapshot( [ ModelRequest( parts=[ diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 9ac0fdc908..be55bcccec 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -7,13 +7,15 @@ from pydantic_ai.builtin_tools import WebSearchTool from pydantic_ai.models.openai import OpenAIResponsesModel from pydantic_ai.providers.openai import OpenAIProvider -from pydantic_ai.vercel_ai.request_types import ( +from pydantic_ai.ui.vercel_ai import VercelAIAdapter +from pydantic_ai.ui.vercel_ai.request_types import ( SubmitMessage, TextUIPart, ToolOutputAvailablePart, UIMessage, ) -from pydantic_ai.vercel_ai.response_types import ( +from pydantic_ai.ui.vercel_ai.response_types import ( + DoneChunk, FinishChunk, ReasoningDeltaChunk, ReasoningStartChunk, @@ -23,7 +25,6 @@ ToolInputStartChunk, ToolOutputAvailableChunk, ) -from pydantic_ai.vercel_ai.starlette import DoneChunk, StarletteChat from .conftest import IsStr @@ -42,7 +43,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) agent = Agent(model=model, builtin_tools=[WebSearchTool()]) - chat = StarletteChat(agent) + adapter = VercelAIAdapter(agent) data = SubmitMessage( trigger='submit-message', @@ -151,7 +152,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): ], ) - events = [event async for event in chat.run(data, None)] + events = [event async for event in adapter.run_stream(data, None)] assert events == snapshot( [ ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), From 6cae960c68e0a5f7adb641d45040459fdc1e13f2 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 9 Oct 2025 16:30:43 +0000 Subject: [PATCH 05/46] Flesh out Adapter and EventStream --- examples/pydantic_ai_examples/chat_app.py | 7 +- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 3 + pydantic_ai_slim/pydantic_ai/ag_ui.py | 118 +- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 451 +------ pydantic_ai_slim/pydantic_ai/ui/adapter.py | 370 ++++++ .../pydantic_ai/ui/ag_ui/__init__.py | 8 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 248 ++++ .../pydantic_ai/ui/ag_ui/_event_stream.py | 274 +++++ .../pydantic_ai/ui/ag_ui/adapter.py | 334 ----- .../pydantic_ai/ui/ag_ui/event_stream.py | 548 --------- .../pydantic_ai/ui/event_stream.py | 377 ++++++ .../pydantic_ai/ui/vercel_ai/__init__.py | 17 +- .../pydantic_ai/ui/vercel_ai/_adapter.py | 183 +++ .../{event_stream.py => _event_stream.py} | 137 +-- .../{request_types.py => _request_types.py} | 0 .../{response_types.py => _response_types.py} | 69 +- .../pydantic_ai/ui/vercel_ai/adapter.py | 149 --- tests/test_ag_ui.py | 52 +- tests/test_vercel_ai.py | 1081 +++++++++-------- 19 files changed, 2153 insertions(+), 2273 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/event_stream.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{event_stream.py => _event_stream.py} (61%) rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{request_types.py => _request_types.py} (100%) rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{response_types.py => _response_types.py} (74%) delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index 4e2f29ddf7..ea37a576e5 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -16,7 +16,7 @@ from fastapi import Depends, Request, Response from pydantic_ai import Agent, RunContext -from pydantic_ai.vercel_ai.starlette import StarletteChat +from pydantic_ai.ui.vercel_ai import VercelAIAdapter from .sqlite_database import Database @@ -80,7 +80,6 @@ async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str: return '\n'.join([row[0] for row in rows]) -starlette_chat = StarletteChat(chat_agent) app = fastapi.FastAPI(lifespan=lifespan) logfire.instrument_fastapi(app) @@ -96,7 +95,9 @@ def options_chat(): @app.post('/api/chat') async def get_chat(request: Request, database: Database = Depends(get_db)) -> Response: - return await starlette_chat.dispatch_request(request, deps=Deps(database, 123)) + return await VercelAIAdapter[Deps].dispatch_request( + chat_agent, request, deps=Deps(database, 123) + ) if __name__ == '__main__': diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index d7a54c5c71..fc883eee9b 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -243,6 +243,9 @@ async def run( # noqa: C901 if self.user_prompt is not None: parts.append(_messages.UserPromptPart(self.user_prompt)) + if not parts: + raise exceptions.UserError('No messages provided') + next_message = _messages.ModelRequest(parts=parts) next_message.instructions = await ctx.deps.get_instructions(run_context) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index 0ae9211bbc..b4c32d8a12 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -6,19 +6,12 @@ from __future__ import annotations -import json -from collections.abc import AsyncIterator, Awaitable, Callable, Mapping, Sequence -from http import HTTPStatus -from typing import ( - Any, - Final, - Generic, - TypeAlias, -) +from collections.abc import AsyncIterator, Callable, Mapping, Sequence +from typing import Any, Generic -from pydantic import ValidationError - -from .agent import AbstractAgent, AgentRunResult +from . import DeferredToolResults +from .agent import AbstractAgent +from .messages import ModelMessage from .models import KnownModelName, Model from .output import OutputDataT, OutputSpec from .settings import ModelSettings @@ -27,11 +20,13 @@ from .usage import RunUsage, UsageLimits try: + from ag_ui.core.types import RunAgentInput + + from .ui import OnCompleteFunc, StateDeps, StateHandler from .ui.ag_ui import ( - StateDeps, - StateHandler, + SSE_CONTENT_TYPE, + AGUIAdapter, ) - from .ui.ag_ui.event_stream import RunAgentInput, _RunError # type: ignore[reportPrivateUsage] except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use `Agent.to_ag_ui()` method, ' @@ -42,7 +37,7 @@ from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.requests import Request - from starlette.responses import Response, StreamingResponse + from starlette.responses import Response from starlette.routing import BaseRoute from starlette.types import ExceptionHandler, Lifespan except ImportError as e: # pragma: no cover @@ -62,14 +57,6 @@ 'run_ag_ui', ] -SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' -"""Content type header value for Server-Sent Events (SSE).""" - -OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]] -"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async.""" - -_BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' - class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette): """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" @@ -199,32 +186,18 @@ async def handle_ag_ui_request( Returns: A streaming Starlette response with AG-UI protocol events. """ - accept = request.headers.get('accept', SSE_CONTENT_TYPE) - try: - input_data = RunAgentInput.model_validate(await request.json()) - except ValidationError as e: # pragma: no cover - return Response( - content=json.dumps(e.json()), - media_type='application/json', - status_code=HTTPStatus.UNPROCESSABLE_ENTITY, - ) - - return StreamingResponse( - run_ag_ui( - agent, - input_data, - accept, - output_type=output_type, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ), - media_type=accept, + return await AGUIAdapter.dispatch_request( + agent, + request, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, ) @@ -234,6 +207,8 @@ async def run_ag_ui( accept: str = SSE_CONTENT_TYPE, *, output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, @@ -265,32 +240,21 @@ async def run_ag_ui( Yields: Streaming event chunks encoded as strings according to the accept header value. """ - from .ui.ag_ui import AGUIAdapter - - adapter = AGUIAdapter(agent=agent) - async for event_str in adapter.run_stream_sse( - request=run_input, + adapter = AGUIAdapter(agent=agent, request=run_input) + async for event in adapter.encode_stream( + adapter.run_stream( + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ), accept=accept, - output_type=output_type, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, ): - yield event_str - - -# _ToolCallNotFoundError is defined here (not in ui/ag_ui) since it's specific to this module -class _ToolCallNotFoundError(_RunError, ValueError): - """Exception raised when an tool result is present without a matching call.""" - - def __init__(self, tool_call_id: str) -> None: - """Initialize the exception with the tool call ID.""" - super().__init__( # pragma: no cover - message=f'Tool call with ID {tool_call_id} not found in the history.', - code='tool_call_not_found', - ) + yield event diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index dec4830efe..44c2611879 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -4,454 +4,15 @@ that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). """ -# pyright: reportIncompatibleMethodOverride=false, reportUnknownVariableType=false, reportGeneralTypeIssues=false - from __future__ import annotations -from abc import ABC, abstractmethod -from collections.abc import AsyncIterator -from typing import Any, Generic, Protocol, TypeVar, runtime_checkable -from uuid import uuid4 - -from ..messages import ( - AgentStreamEvent, - BuiltinToolCallEvent, # type: ignore[reportDeprecated] - BuiltinToolCallPart, - BuiltinToolResultEvent, # type: ignore[reportDeprecated] - BuiltinToolReturnPart, - FilePart, - FinalResultEvent, - FunctionToolCallEvent, - FunctionToolResultEvent, - ModelMessage, - PartDeltaEvent, - PartStartEvent, - TextPart, - TextPartDelta, - ThinkingPart, - ThinkingPartDelta, - ToolCallPart, - ToolCallPartDelta, -) -from ..run import AgentRunResultEvent -from ..tools import AgentDepsT +from .adapter import BaseAdapter, OnCompleteFunc, StateDeps, StateHandler +from .event_stream import BaseEventStream __all__ = [ - 'SSEEvent', - 'BaseEventStream', 'BaseAdapter', + 'BaseEventStream', + 'StateDeps', + 'StateHandler', + 'OnCompleteFunc', ] - -EventT = TypeVar('EventT', bound='SSEEvent') -"""Type variable for protocol-specific event types.""" - - -@runtime_checkable -class SSEEvent(Protocol): - """Protocol for events that can be encoded as Server-Sent Events (SSE).""" - - def sse(self) -> str: - """Encode event as a Server-Sent Event string. - - Returns: - The SSE-formatted string representation of the event. - """ - ... - - -class BaseEventStream(ABC, Generic[EventT, AgentDepsT]): - """Base class for transforming pAI agent events into protocol-specific events. - - This class provides a granular method-per-part-type pattern that makes it easy to customize - specific event transformations by overriding individual handler methods. - - Subclasses should: - 1. Initialize state attributes in `__init__` if needed - 2. Implement all abstract `handle_*` methods for event transformation - 3. Implement error handling methods - 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) - - Example: - ```python - class MyEventStream(BaseEventStream[MyEvent, MyDeps]): - def __init__(self): - super().__init__() - self.custom_state = {} - - async def handle_text_start(self, part: TextPart): - yield MyTextStartEvent(id=self.new_message_id(), text=part.content) - ``` - """ - - def __init__(self) -> None: - """Initialize event stream state. - - Subclasses can add additional state attributes for tracking streaming context. - """ - self.message_id: str = '' - - def new_message_id(self) -> str: - """Generate and store a new message ID. - - Returns: - A new UUID-based message ID. - """ - self.message_id = str(uuid4()) - return self.message_id - - async def agent_event_to_events(self, event: AgentStreamEvent | AgentRunResultEvent) -> AsyncIterator[EventT]: # noqa: C901 - """Transform a pAI agent event into protocol-specific events. - - This method dispatches to specific `handle_*` methods based on event and part type. - Subclasses should implement the individual handler methods rather than overriding this. - - Args: - event: The pAI agent event to transform. - - Yields: - Protocol-specific events. - """ - match event: - case PartStartEvent(part=part): - # Dispatch based on part type - match part: - case TextPart(): - async for e in self.handle_text_start(part): - yield e - case ThinkingPart(): - async for e in self.handle_thinking_start(part): - yield e - case ToolCallPart() | BuiltinToolCallPart(): - async for e in self.handle_tool_call_start(part): - yield e - case BuiltinToolReturnPart(): - async for e in self.handle_builtin_tool_return(part): - yield e - case FilePart(): - # FilePart is not currently handled by UI protocols - pass - case PartDeltaEvent(delta=delta): - # Dispatch based on delta type - match delta: - case TextPartDelta(): - async for e in self.handle_text_delta(delta): - yield e - case ThinkingPartDelta(): - async for e in self.handle_thinking_delta(delta): - yield e - case ToolCallPartDelta(): - async for e in self.handle_tool_call_delta(delta): - yield e - case FunctionToolCallEvent(): - async for e in self.handle_function_tool_call(event): - yield e - case FunctionToolResultEvent(): - async for e in self.handle_function_tool_result(event): - yield e - case BuiltinToolCallEvent(): # type: ignore[reportDeprecated] - async for e in self.handle_builtin_tool_call(event): - yield e - case BuiltinToolResultEvent(): # type: ignore[reportDeprecated] - async for e in self.handle_builtin_tool_result(event): - yield e - case FinalResultEvent(): - async for e in self.handle_final_result(event): - yield e - case AgentRunResultEvent(): - async for e in self.handle_run_result(event): - yield e - - # Granular part handlers (abstract - must implement) - - @abstractmethod - async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: - """Handle a TextPart at start. - - Args: - part: The TextPart. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: - """Handle a TextPartDelta. - - Args: - delta: The TextPartDelta. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: - """Handle a ThinkingPart at start. - - Args: - part: The ThinkingPart. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: - """Handle a ThinkingPartDelta. - - Args: - delta: The ThinkingPartDelta. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[EventT]: - """Handle a ToolCallPart or BuiltinToolCallPart at start. - - Args: - part: The tool call part. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: - """Handle a ToolCallPartDelta. - - Args: - delta: The ToolCallPartDelta. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: - """Handle a BuiltinToolReturnPart. - - Args: - part: The BuiltinToolReturnPart. - - Yields: - Protocol-specific events. - """ - - # Tool event handlers (abstract - must implement) - - @abstractmethod - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolCallEvent. - - Args: - event: The function tool call event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolResultEvent. - - Args: - event: The function tool result event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolCallEvent. - - Args: - event: The builtin tool call event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[EventT]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolResultEvent. - - Args: - event: The builtin tool result event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: - """Handle a FinalResultEvent. - - Args: - event: The final result event. - - Yields: - Protocol-specific events. - """ - - @abstractmethod - async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: - """Handle an AgentRunResultEvent (final event with result/usage). - - Args: - event: The agent run result event. - - Yields: - Protocol-specific events. - """ - - # Lifecycle hooks (optional overrides) - - async def before_stream(self) -> AsyncIterator[EventT]: - """Yield events before agent streaming starts. - - This hook is called before any agent events are processed. - Override this to inject custom events at the start of the stream. - - Yields: - Protocol-specific events to emit before streaming. - """ - return - yield # Make this an async generator - - async def after_stream(self) -> AsyncIterator[EventT]: - """Yield events after agent streaming completes. - - This hook is called after all agent events have been processed. - Override this to inject custom events at the end of the stream. - - Yields: - Protocol-specific events to emit after streaming. - """ - return - yield # Make this an async generator - - # Error handling (must implement) - - @abstractmethod - async def on_validation_error(self, error: Exception) -> AsyncIterator[EventT]: - """Handle validation errors that occur before streaming starts. - - Args: - error: The validation error that occurred. - - Yields: - Protocol-specific error events. - """ - - @abstractmethod - async def on_stream_error(self, error: Exception) -> AsyncIterator[EventT]: - """Handle errors that occur during streaming (after stream has started). - - Args: - error: The error that occurred during streaming. - - Yields: - Protocol-specific error events. - """ - - -RequestT = TypeVar('RequestT') -"""Type variable for protocol-specific request types.""" - -MessageT = TypeVar('MessageT') -"""Type variable for protocol-specific message types.""" - - -class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): - """Base adapter for handling UI protocol requests and streaming responses. - - This class provides a unified interface for request/response handling across different - UI protocols (AG-UI, Vercel AI, etc.). It handles: - - Request parsing and validation - - Message format conversion (protocol messages → pAI messages) - - Agent execution and event streaming - - Error handling (validation errors vs streaming errors) - - SSE encoding - - Type Parameters: - RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) - MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) - EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, AbstractSSEChunk) - AgentDepsT: Agent dependencies type - - Example: - ```python - class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): - def create_event_stream(self) -> BaseEventStream[MyEvent, MyDeps]: - return MyEventStream() - - def parse_request_messages(self, request: MyRequest) -> list[MyMessage]: - return request.messages - - def protocol_messages_to_pai_messages(self, messages: list[MyMessage]) -> list[ModelMessage]: - # Convert protocol messages to pAI messages - ... - ``` - """ - - @abstractmethod - def create_event_stream(self) -> BaseEventStream[EventT, AgentDepsT]: - """Create a new event stream for this protocol. - - Returns: - A protocol-specific event stream instance. - """ - - @abstractmethod - def parse_request_messages(self, request: RequestT) -> list[MessageT]: - """Extract messages from the protocol request. - - Args: - request: The protocol-specific request. - - Returns: - List of protocol-specific messages. - """ - - @abstractmethod - def protocol_messages_to_pai_messages(self, messages: list[MessageT]) -> list[ModelMessage]: - """Convert protocol messages to Pydantic AI messages. - - Args: - messages: List of protocol-specific messages. - - Returns: - List of Pydantic AI ModelMessage objects. - """ - - @abstractmethod - def encode_event(self, event: EventT) -> str: - """Encode a protocol event as an SSE string. - - Args: - event: The protocol-specific event. - - Returns: - SSE-formatted string. - """ - - @abstractmethod - async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: - """Handle a request and return a response. - - This method should handle the full request/response cycle: - - Parse and validate the request - - Run the agent with the request data - - Return an appropriate response (e.g., StreamingResponse, EventSourceResponse) - - Args: - request: The protocol-specific request object (e.g., Starlette Request). - deps: Optional dependencies to pass to the agent. - - Returns: - A protocol-specific response object. - """ diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py new file mode 100644 index 0000000000..00b8afa311 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -0,0 +1,370 @@ +"""Base classes for UI event stream protocols. + +This module provides abstract base classes for implementing UI event stream adapters +that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). +""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections.abc import AsyncIterator, Awaitable, Callable, Sequence +from dataclasses import Field, dataclass, replace +from functools import cached_property +from http import HTTPStatus +from typing import ( + Any, + ClassVar, + Generic, + Protocol, + TypeAlias, + TypeVar, + runtime_checkable, +) + +from pydantic import BaseModel, ValidationError +from starlette.responses import StreamingResponse + +from .. import DeferredToolRequests, DeferredToolResults, _utils +from ..agent import AbstractAgent, AgentDepsT, AgentRunResult +from ..exceptions import UserError +from ..messages import ModelMessage +from ..models import KnownModelName, Model +from ..output import OutputSpec +from ..settings import ModelSettings +from ..toolsets import AbstractToolset +from ..usage import RunUsage, UsageLimits +from .event_stream import BaseEventStream, SourceEvent + +try: + from starlette.requests import Request + from starlette.responses import Response +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + + +__all__ = [ + 'BaseAdapter', +] + +EventT = TypeVar('EventT') +"""Type variable for protocol-specific event types.""" + + +RequestT = TypeVar('RequestT') +"""Type variable for protocol-specific request types.""" + +MessageT = TypeVar('MessageT') +"""Type variable for protocol-specific message types.""" + +OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]] +"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async.""" + + +# State management types + +StateT = TypeVar('StateT', bound=BaseModel) +"""Type variable for the state type, which must be a subclass of `BaseModel`.""" + + +@runtime_checkable +class StateHandler(Protocol): + """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" + + # Has to be a dataclass so we can use `replace` to update the state. + # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 + __dataclass_fields__: ClassVar[dict[str, Field[Any]]] + + @property + def state(self) -> Any: + """Get the current state of the agent run.""" + ... + + @state.setter + def state(self, state: Any) -> None: + """Set the state of the agent run. + + This method is called to update the state of the agent run with the + provided state. + + Args: + state: The run state. + + Raises: + InvalidStateError: If `state` does not match the expected model. + """ + ... + + +@dataclass +class StateDeps(Generic[StateT]): + """Provides AG-UI state management. + + This class is used to manage the state of an agent run. It allows setting + the state of the agent run with a specific type of state model, which must + be a subclass of `BaseModel`. + + The state is set using the `state` setter by the `Adapter` when the run starts. + + Implements the `StateHandler` protocol. + """ + + state: StateT + + +@dataclass +class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): + """Base adapter for handling UI protocol requests and streaming responses. + + This class provides a unified interface for request/response handling across different + UI protocols (AG-UI, Vercel AI, etc.). It handles: + - Request parsing and validation + - Message format conversion (protocol messages → pAI messages) + - Agent execution and event streaming + - Error handling (validation errors vs streaming errors) + - SSE encoding + + Type Parameters: + RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) + MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) + EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, BaseChunk) + AgentDepsT: Agent dependencies type + + Example: + ```python + class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): + def messages(self, messages: list[MyMessage]) -> list[ModelMessage]: + # Convert protocol messages to pAI messages + ... + ``` + """ + + agent: AbstractAgent[AgentDepsT] + """The Pydantic AI agent to run.""" + + request: RequestT + """The protocol-specific request object.""" + + @abstractmethod + def create_event_stream(self) -> BaseEventStream[RequestT, EventT, AgentDepsT]: + """Create an event stream for the adapter.""" + + @cached_property + @abstractmethod + def messages(self) -> list[ModelMessage]: + """Convert protocol messages to Pydantic AI messages. + + Args: + messages: List of protocol-specific messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + + @cached_property + def toolset(self) -> AbstractToolset[AgentDepsT] | None: + """Get the toolset for the adapter.""" + return None + + @cached_property + def raw_state(self) -> dict[str, Any]: + """Get the state of the agent run.""" + return {} + + def deps_with_state(self, deps: AgentDepsT) -> AgentDepsT: + raw_state: dict[str, Any] = self.raw_state + if isinstance(deps, StateHandler): + if isinstance(deps.state, BaseModel): + state = type(deps.state).model_validate(raw_state) + else: + state = raw_state + + return replace(deps, state=state) + elif raw_state: + raise UserError( + f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' + ) + + @abstractmethod + def encode_event(self, event: EventT, accept: str | None = None) -> str: + """Encode a protocol event as an SSE string. + + Args: + event: The protocol-specific event. + accept: The accept header value for encoding format. + + Returns: + Formatted string. + """ + + async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: + """Encode a stream of events as SSE strings. + + Args: + stream: The stream of events to encode. + accept: The accept header value for encoding format. + """ + async for event in stream: + yield self.encode_event(event, accept) + + @classmethod + async def validate_request(cls, request: Request) -> RequestT: + """Validate the request and return the validated request.""" + raise NotImplementedError('validate_request is not implemented') + + async def process_stream( + self, + stream: AsyncIterator[SourceEvent], + on_complete: OnCompleteFunc | None = None, + ) -> AsyncIterator[EventT]: + """Process a stream of events and return a stream of events. + + Args: + stream: The stream of events to process. + on_complete: Optional callback function called when the agent run completes successfully. + """ + event_stream = self.create_event_stream() + async for event in event_stream.handle_stream(stream): + yield event + + try: + result = event_stream.result + if on_complete is not None and result is not None: + if _utils.is_async_callable(on_complete): + await on_complete(result) + else: + await _utils.run_in_executor(on_complete, result) + except Exception as e: + async for event in event_stream.on_error(e): + yield event + + async def run_stream( + self, + *, + output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + on_complete: OnCompleteFunc | None = None, + ) -> AsyncIterator[EventT]: + """Run the agent with the AG-UI run input and stream AG-UI protocol events. + + Args: + agent: The agent to run. + run_input: The AG-UI run input containing thread_id, run_id, messages, etc. + accept: The accept header value for the run. + + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + + Yields: + Streaming event chunks encoded as strings according to the accept header value. + """ + message_history = [*(message_history or []), *self.messages] + + toolset = self.toolset + if toolset: + output_type = [output_type or self.agent.output_type, DeferredToolRequests] + toolsets = [*toolsets, toolset] if toolsets else [toolset] + + deps = self.deps_with_state(deps) + + async for event in self.process_stream( + self.agent.run_stream_events( + user_prompt=None, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + ), + on_complete=on_complete, + ): + yield event + + @classmethod + async def dispatch_request( + cls, + agent: AbstractAgent[AgentDepsT, Any], + request: Request, + *, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + output_type: OutputSpec[Any] | None = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + on_complete: OnCompleteFunc | None = None, + ) -> Response: + """Handle an AG-UI request and return a streaming response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Returns: + A streaming Starlette response with AG-UI protocol events. + """ + try: + request_data = await cls.validate_request(request) + except ValidationError as e: # pragma: no cover + return Response( + content=e.json(), + media_type='application/json', + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, + ) + + adapter = cls(agent=agent, request=request_data) + + return StreamingResponse( + adapter.encode_stream( + adapter.run_stream( + message_history=message_history, + deferred_tool_results=deferred_tool_results, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ), + accept=request.headers.get('accept'), + ) + ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 620b74cd28..53784efb51 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,12 +1,10 @@ """AG-UI protocol integration for Pydantic AI agents.""" -from .adapter import AGUIAdapter -from .event_stream import AGUIEventStream, StateDeps, StateHandler, protocol_messages_to_pai_messages +from ._adapter import SSE_CONTENT_TYPE, AGUIAdapter +from ._event_stream import AGUIEventStream __all__ = [ 'AGUIAdapter', 'AGUIEventStream', - 'StateHandler', - 'StateDeps', - 'protocol_messages_to_pai_messages', + 'SSE_CONTENT_TYPE', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py new file mode 100644 index 0000000000..0681caa2c4 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -0,0 +1,248 @@ +"""AG-UI adapter for handling requests.""" + +from __future__ import annotations + +from functools import cached_property +from typing import ( + TYPE_CHECKING, + Any, + Final, +) + +from ... import ExternalToolset, ToolDefinition +from ...agent import AgentDepsT +from ...messages import ( + BuiltinToolCallPart, + BuiltinToolReturnPart, + ModelMessage, + ModelRequest, + ModelRequestPart, + ModelResponse, + ModelResponsePart, + SystemPromptPart, + TextPart, + ToolCallPart, + ToolReturnPart, + UserPromptPart, +) +from ...toolsets import AbstractToolset + +try: + from ag_ui.core import ( + AssistantMessage, + BaseEvent, + DeveloperMessage, + Message, + RunAgentInput, + SystemMessage, + Tool as AGUITool, + ToolMessage, + UserMessage, + ) + from ag_ui.encoder import EventEncoder + + from ..adapter import BaseAdapter + from ..event_stream import BaseEventStream + from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +try: + from starlette.requests import Request +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +if TYPE_CHECKING: + pass + +__all__ = ['AGUIAdapter'] + + +SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' +"""Content type header value for Server-Sent Events (SSE).""" + +# Frontend toolset + + +class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): + """Toolset for AG-UI frontend tools.""" + + def __init__(self, tools: list[AGUITool]): + """Initialize the toolset with AG-UI tools. + + Args: + tools: List of AG-UI tool definitions. + """ + super().__init__( + [ + ToolDefinition( + name=tool.name, + description=tool.description, + parameters_json_schema=tool.parameters, + ) + for tool in tools + ] + ) + + @property + def label(self) -> str: + """Return the label for this toolset.""" + return 'the AG-UI frontend tools' # pragma: no cover + + +class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT]): + """Adapter for handling AG-UI protocol requests with Pydantic AI agents. + + This adapter provides an interface for integrating Pydantic AI agents + with the AG-UI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.ag_ui import AGUIAdapter + + agent = Agent('openai:gpt-4') + adapter = AGUIAdapter(agent) + + async def handle_request(request: RunAgentInput, deps=None): + async for event_str in adapter.run_stream_encoded(request, deps): + yield event_str + ``` + """ + + def create_event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: + """Create an event stream for the adapter.""" + return AGUIEventStream(self.request) + + @classmethod + async def validate_request(cls, request: Request) -> RunAgentInput: + """Validate the request and return the validated request.""" + return RunAgentInput.model_validate(await request.json()) + + @cached_property + def toolset(self) -> AbstractToolset[AgentDepsT] | None: + """Get the toolset for the adapter.""" + if self.request.tools: + return _AGUIFrontendToolset[AgentDepsT](self.request.tools) + return None + + @cached_property + def raw_state(self) -> dict[str, Any]: + """Get the state of the agent run.""" + return self.request.state or {} + + def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: + """Encode an AG-UI event as SSE. + + Args: + event: The AG-UI event to encode. + accept: The accept header value for encoding format. + + Returns: + The SSE-formatted string. + """ + encoder = EventEncoder(accept=accept or SSE_CONTENT_TYPE) + return encoder.encode(event) + + @cached_property + def messages(self) -> list[ModelMessage]: + """Convert AG-UI messages to Pydantic AI messages. + + Args: + messages: List of AG-UI messages. + + Returns: + List of Pydantic AI ModelMessage objects. + """ + result: list[ModelMessage] = [] + tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. + request_parts: list[ModelRequestPart] | None = None + response_parts: list[ModelResponsePart] | None = None + + for msg in self.request.messages: + if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( + isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if request_parts is None: + request_parts = [] + result.append(ModelRequest(parts=request_parts)) + response_parts = None + + if isinstance(msg, UserMessage): + request_parts.append(UserPromptPart(content=msg.content)) + elif isinstance(msg, SystemMessage | DeveloperMessage): + request_parts.append(SystemPromptPart(content=msg.content)) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + + request_parts.append( + ToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + ) + ) + + elif isinstance(msg, AssistantMessage) or ( # pragma: no branch + isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) + ): + if response_parts is None: + response_parts = [] + result.append(ModelResponse(parts=response_parts)) + request_parts = None + + if isinstance(msg, AssistantMessage): + if msg.content: + response_parts.append(TextPart(content=msg.content)) + + if msg.tool_calls: + for tool_call in msg.tool_calls: + tool_call_id = tool_call.id + tool_name = tool_call.function.name + tool_calls[tool_call_id] = tool_name + + if tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX): + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + response_parts.append( + BuiltinToolCallPart( + tool_name=tool_name, + args=tool_call.function.arguments, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + else: + response_parts.append( + ToolCallPart( + tool_name=tool_name, + tool_call_id=tool_call_id, + args=tool_call.function.arguments, + ) + ) + else: + tool_call_id = msg.tool_call_id + tool_name = tool_calls.get(tool_call_id) + if tool_name is None: # pragma: no cover + raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') + _, provider_name, tool_call_id = tool_call_id.split('|', 2) + + response_parts.append( + BuiltinToolReturnPart( + tool_name=tool_name, + content=msg.content, + tool_call_id=tool_call_id, + provider_name=provider_name, + ) + ) + + return result diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py new file mode 100644 index 0000000000..e472bc204f --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -0,0 +1,274 @@ +"""AG-UI protocol adapter for Pydantic AI agents. + +This module provides classes for integrating Pydantic AI agents with the AG-UI protocol, +enabling streaming event-based communication for interactive AI applications. +""" + +from __future__ import annotations + +import json +from collections.abc import AsyncIterator, Iterable +from typing import TYPE_CHECKING, Final + +from ...messages import ( + BuiltinToolCallPart, + BuiltinToolReturnPart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, + ToolReturnPart, +) +from ...tools import AgentDepsT +from .. import BaseEventStream + +if TYPE_CHECKING: + pass # Agent type is not actually used in this module + +try: + from ag_ui.core import ( + BaseEvent, + EventType, + RunAgentInput, + RunErrorEvent, + RunFinishedEvent, + RunStartedEvent, + TextMessageContentEvent, + TextMessageEndEvent, + TextMessageStartEvent, + ThinkingEndEvent, + ThinkingStartEvent, + ThinkingTextMessageContentEvent, + ThinkingTextMessageEndEvent, + ThinkingTextMessageStartEvent, + ToolCallArgsEvent, + ToolCallEndEvent, + ToolCallResultEvent, + ToolCallStartEvent, + ) +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + +__all__ = [ + 'AGUIEventStream', + 'RunAgentInput', + 'RunStartedEvent', + 'RunFinishedEvent', +] + +BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' + + +class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): + """Transforms Pydantic AI agent events into AG-UI protocol events. + + This class handles the stateful transformation of streaming agent events + into the AG-UI protocol format, managing message IDs, thinking mode state, + and tool call ID mappings for builtin tools. + + Example: + ```python + event_stream = AGUIEventStream() + async for ag_ui_event in event_stream.handle_event(pai_event): + print(ag_ui_event) + ``` + """ + + def __init__(self, request: RunAgentInput) -> None: + """Initialize AG-UI event stream state.""" + super().__init__(request) + self.part_end: BaseEvent | None = None + self.thinking: bool = False + self.builtin_tool_call_ids: dict[str, str] = {} + + async def before_stream(self) -> AsyncIterator[BaseEvent]: + """Yield events before agent streaming starts.""" + yield RunStartedEvent( + thread_id=self.request.thread_id, + run_id=self.request.run_id, + ) + + async def after_stream(self) -> AsyncIterator[BaseEvent]: + """Handle an AgentRunResultEvent, cleaning up any pending state.""" + # Emit any pending part end event + if self.part_end: # pragma: no branch + yield self.part_end + self.part_end = None + + # End thinking mode if still active + if self.thinking: + yield ThinkingEndEvent( + type=EventType.THINKING_END, + ) + self.thinking = False + + # Emit finish event + yield RunFinishedEvent( + thread_id=self.request.thread_id, + run_id=self.request.run_id, + ) + + async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: + """Handle errors during streaming.""" + # Try to get code from exception if it has one, otherwise use class name + code = getattr(error, 'code', error.__class__.__name__) + yield RunErrorEvent(message=str(error), code=code) + + # Granular handlers implementation + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: + """Handle a TextPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + message_id = self.new_message_id() + yield TextMessageStartEvent(message_id=message_id) + if part.content: # pragma: no branch + yield TextMessageContentEvent(message_id=message_id, delta=part.content) + self.part_end = TextMessageEndEvent(message_id=message_id) + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a TextPartDelta.""" + if delta.content_delta: # pragma: no branch + yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if not self.thinking: + yield ThinkingStartEvent(type=EventType.THINKING_START) + self.thinking = True + + if part.content: + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPartDelta.""" + if delta.content_delta: # pragma: no branch + if not isinstance(self.part_end, ThinkingTextMessageEndEvent): + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + + yield ThinkingTextMessageContentEvent( + type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta + ) + + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + if self.part_end: + yield self.part_end + self.part_end = None + + if self.thinking: + yield ThinkingEndEvent(type=EventType.THINKING_END) + self.thinking = False + + tool_call_id = part.tool_call_id + if isinstance(part, BuiltinToolCallPart): + builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + + message_id = self.message_id or self.new_message_id() + yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) + if part.args: + yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) + self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) + + def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolCallPart at start.""" + return self.handle_tool_call_start(part) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPartDelta.""" + tool_call_id = delta.tool_call_id + assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' + if tool_call_id in self.builtin_tool_call_ids: + tool_call_id = self.builtin_tool_call_ids[tool_call_id] + yield ToolCallArgsEvent( + tool_call_id=tool_call_id, + delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), + ) + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolReturnPart.""" + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=tool_call_id, + content=part.model_response_str(), + ) + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolCallEvent. + + This event is emitted when a function tool is called, but no AG-UI events + are needed at this stage since tool calls are handled in PartStartEvent. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FunctionToolResultEvent, emitting tool result events.""" + result = event.result + if not isinstance(result, ToolReturnPart): + return + + # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result + if self.part_end: + yield self.part_end + self.part_end = None + + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=result.tool_call_id, + content=result.model_response_str(), + ) + + # Check for AG-UI events returned by tool calls. + possible_event = result.metadata or result.content + if isinstance(possible_event, BaseEvent): + yield possible_event + elif isinstance(possible_event, str | bytes): # pragma: no branch + # Avoid iterable check for strings and bytes. + pass + elif isinstance(possible_event, Iterable): # pragma: no branch + for item in possible_event: # type: ignore[reportUnknownMemberType] + if isinstance(item, BaseEvent): # pragma: no branch + yield item + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FinalResultEvent. + + This event is emitted when the agent produces a final result, but no AG-UI events + are needed at this stage. + """ + return + yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py deleted file mode 100644 index e6ffa784f3..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/adapter.py +++ /dev/null @@ -1,334 +0,0 @@ -"""AG-UI adapter for handling requests.""" - -# pyright: reportGeneralTypeIssues=false, reportInvalidTypeArguments=false - -from __future__ import annotations - -import json -from dataclasses import dataclass -from http import HTTPStatus -from typing import TYPE_CHECKING, Any - -from pydantic import BaseModel, ValidationError - -from ...tools import AgentDepsT -from .event_stream import ( - AGUIEventStream, - RunAgentInput, - StateHandler, - _AGUIFrontendToolset, # type: ignore[reportPrivateUsage] - _InvalidStateError, # type: ignore[reportPrivateUsage] - _NoMessagesError, # type: ignore[reportPrivateUsage] - _RunError, # type: ignore[reportPrivateUsage] - protocol_messages_to_pai_messages, -) - -if TYPE_CHECKING: - from ...agent import Agent - -__all__ = ['AGUIAdapter'] - - -@dataclass -class AGUIAdapter: - """Adapter for handling AG-UI protocol requests with Pydantic AI agents. - - This adapter provides an interface for integrating Pydantic AI agents - with the AG-UI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.ag_ui import AGUIAdapter - - agent = Agent('openai:gpt-4') - adapter = AGUIAdapter(agent) - - async def handle_request(request: RunAgentInput, deps=None): - async for event_str in adapter.run_stream_sse(request, deps): - yield event_str - ``` - """ - - agent: Agent[AgentDepsT] - """The Pydantic AI agent to run.""" - - async def run_stream( # noqa: C901 - self, - request: RunAgentInput, - deps: AgentDepsT | None = None, - *, - output_type: Any = None, - model: Any = None, - model_settings: Any = None, - usage_limits: Any = None, - usage: Any = None, - infer_name: bool = True, - toolsets: Any = None, - on_complete: Any = None, - ): - """Stream events from an agent run as AG-UI protocol events. - - This method provides a complete implementation with all AG-UI features including: - - Frontend tools handling - - State injection - - Error handling (validation vs stream errors) - - on_complete callback - - RunStarted and RunFinished events - - Args: - request: The AG-UI request data. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. - - Yields: - AG-UI protocol events (BaseEvent subclasses). - - Raises: - _RunError: If request validation fails or other errors occur. - """ - from ... import _utils - from ...exceptions import UserError - from ...tools import DeferredToolRequests - from .event_stream import RunFinishedEvent, RunStartedEvent - - # Create event stream - event_stream = self.create_event_stream() - stream_started = False - - # Handle frontend tools - if request.tools: - toolset = _AGUIFrontendToolset[AgentDepsT](request.tools) - toolsets = [*toolsets, toolset] if toolsets else [toolset] - - try: - # Emit start event - yield RunStartedEvent( - thread_id=request.thread_id, - run_id=request.run_id, - ) - stream_started = True - - if not request.messages: - raise _NoMessagesError - - # Handle state injection - raw_state: dict[str, Any] = request.state or {} - if isinstance(deps, StateHandler): - if isinstance(deps.state, BaseModel): - try: - state = type(deps.state).model_validate(raw_state) - except ValidationError as e: # pragma: no cover - raise _InvalidStateError from e - else: - state = raw_state - - from dataclasses import replace - - deps = replace(deps, state=state) - elif raw_state: - raise UserError( - f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' - ) - - # Convert AG-UI messages to pAI messages - messages = protocol_messages_to_pai_messages(request.messages) - - # Run agent and stream events - result = None - async for event in self.agent.run_stream_events( - user_prompt=None, - output_type=[output_type or self.agent.output_type, DeferredToolRequests], - message_history=messages, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - ): - from ...run import AgentRunResultEvent - - # Capture result for on_complete callback - if isinstance(event, AgentRunResultEvent): - result = event.result - - # Transform pAI events to AG-UI events - async for ag_ui_event in event_stream.agent_event_to_events(event): # type: ignore[arg-type] - yield ag_ui_event - - # Call on_complete callback - if on_complete is not None and result is not None: - if _utils.is_async_callable(on_complete): - await on_complete(result) - else: - await _utils.run_in_executor(on_complete, result) - - except _RunError as e: - if stream_started: - async for error_event in event_stream.on_stream_error(e): - yield error_event - else: - async for error_event in event_stream.on_validation_error(e): - yield error_event - raise - except Exception as e: - if stream_started: - async for error_event in event_stream.on_stream_error(e): - yield error_event - else: - async for error_event in event_stream.on_validation_error(e): - yield error_event - raise - else: - # Emit finish event - yield RunFinishedEvent( - thread_id=request.thread_id, - run_id=request.run_id, - ) - - async def run_stream_sse( - self, - request: RunAgentInput, - accept: str, - *, - output_type: Any = None, - model: Any = None, - deps: AgentDepsT | None = None, - model_settings: Any = None, - usage_limits: Any = None, - usage: Any = None, - infer_name: bool = True, - toolsets: Any = None, - on_complete: Any = None, - ): - """Stream SSE-encoded events from an agent run. - - This method wraps `run_stream` and encodes the events as SSE strings. - - Args: - request: The AG-UI request data. - accept: The accept header value for encoding. - output_type: Custom output type for this run. - model: Optional model to use for this run. - deps: Optional dependencies to pass to the agent. - model_settings: Optional settings for the model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. - - Yields: - SSE-formatted strings. - """ - from ag_ui.encoder import EventEncoder - - encoder = EventEncoder(accept=accept) - - try: - async for event in self.run_stream( - request=request, - deps=deps, - output_type=output_type, - model=model, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ): - yield encoder.encode(event) - except _RunError: - # Error events are already yielded by run_stream - # This shouldn't actually be reached since run_stream yields error events before raising - pass - except Exception: - # Let other exceptions propagate - raise - - async def dispatch_request( - self, - request: Any, - deps: AgentDepsT | None = None, - *, - output_type: Any = None, - model: Any = None, - model_settings: Any = None, - usage_limits: Any = None, - usage: Any = None, - infer_name: bool = True, - toolsets: Any = None, - on_complete: Any = None, - ) -> Any: - """Handle an AG-UI request and return a streaming response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. - - Returns: - A streaming Starlette response with AG-UI protocol events. - """ - try: - from starlette.requests import Request - from starlette.responses import Response, StreamingResponse - except ImportError as e: # pragma: no cover - raise ImportError('Please install starlette to use dispatch_request') from e - - if not isinstance(request, Request): # pragma: no cover - raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') - - accept = request.headers.get('accept', 'text/event-stream') - - try: - input_data = RunAgentInput.model_validate(await request.json()) - except ValidationError as e: # pragma: no cover - return Response( - content=json.dumps(e.json()), - media_type='application/json', - status_code=HTTPStatus.UNPROCESSABLE_ENTITY, - ) - - return StreamingResponse( - self.run_stream_sse( - request=input_data, - accept=accept, - deps=deps, - output_type=output_type, - model=model, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ), - media_type=accept, - ) - - def create_event_stream(self) -> AGUIEventStream[AgentDepsT]: - """Create a new AG-UI event stream. - - Returns: - An AGUIEventStream instance. - """ - return AGUIEventStream[AgentDepsT]() diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py deleted file mode 100644 index 5ce69abfda..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/event_stream.py +++ /dev/null @@ -1,548 +0,0 @@ -"""AG-UI protocol adapter for Pydantic AI agents. - -This module provides classes for integrating Pydantic AI agents with the AG-UI protocol, -enabling streaming event-based communication for interactive AI applications. -""" - -# pyright: reportIncompatibleMethodOverride=false, reportUnusedClass=false, reportGeneralTypeIssues=false, reportInvalidTypeArguments=false - -from __future__ import annotations - -import json -from collections.abc import AsyncIterator, Iterable -from dataclasses import Field, dataclass -from typing import TYPE_CHECKING, Any, ClassVar, Final, Generic, Protocol, TypeVar, runtime_checkable - -if TYPE_CHECKING: - pass # Agent type is not actually used in this module - -from pydantic import BaseModel, ValidationError - -from ...messages import ( - BuiltinToolCallEvent, # type: ignore[reportDeprecated] - BuiltinToolCallPart, - BuiltinToolResultEvent, # type: ignore[reportDeprecated] - BuiltinToolReturnPart, - FinalResultEvent, - FunctionToolCallEvent, - FunctionToolResultEvent, - ModelMessage, - TextPart, - TextPartDelta, - ThinkingPart, - ThinkingPartDelta, - ToolCallPart, - ToolCallPartDelta, - ToolReturnPart, -) -from ...run import AgentRunResultEvent -from ...tools import AgentDepsT, ToolDefinition -from ...toolsets.external import ExternalToolset -from .. import BaseEventStream - -try: - from ag_ui.core import ( - AssistantMessage, - BaseEvent, - DeveloperMessage, - EventType, - Message, - RunAgentInput, - RunErrorEvent, - RunFinishedEvent, - RunStartedEvent, - SystemMessage, - TextMessageContentEvent, - TextMessageEndEvent, - TextMessageStartEvent, - ThinkingEndEvent, - ThinkingStartEvent, - ThinkingTextMessageContentEvent, - ThinkingTextMessageEndEvent, - ThinkingTextMessageStartEvent, - Tool as AGUITool, - ToolCallArgsEvent, - ToolCallEndEvent, - ToolCallResultEvent, - ToolCallStartEvent, - ToolMessage, - UserMessage, - ) -except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' - 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' - ) from e - -__all__ = [ - 'AGUIEventStream', - 'StateHandler', - 'StateDeps', - 'protocol_messages_to_pai_messages', - '_AGUIFrontendToolset', - '_NoMessagesError', - '_InvalidStateError', - '_RunError', - 'RunAgentInput', - 'RunStartedEvent', - 'RunFinishedEvent', -] - -_BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' - - -# State management types - -StateT = TypeVar('StateT', bound=BaseModel) -"""Type variable for the state type, which must be a subclass of `BaseModel`.""" - - -@runtime_checkable -class StateHandler(Protocol): - """Protocol for state handlers in agent runs. Requires the class to be a dataclass with a `state` field.""" - - # Has to be a dataclass so we can use `replace` to update the state. - # From https://github.com/python/typeshed/blob/9ab7fde0a0cd24ed7a72837fcb21093b811b80d8/stdlib/_typeshed/__init__.pyi#L352 - __dataclass_fields__: ClassVar[dict[str, Field[Any]]] - - @property - def state(self) -> Any: - """Get the current state of the agent run.""" - ... - - @state.setter - def state(self, state: Any) -> None: - """Set the state of the agent run. - - This method is called to update the state of the agent run with the - provided state. - - Args: - state: The run state. - - Raises: - InvalidStateError: If `state` does not match the expected model. - """ - ... - - -@dataclass -class StateDeps(Generic[StateT]): - """Provides AG-UI state management. - - This class is used to manage the state of an agent run. It allows setting - the state of the agent run with a specific type of state model, which must - be a subclass of `BaseModel`. - - The state is set using the `state` setter by the `Adapter` when the run starts. - - Implements the `StateHandler` protocol. - """ - - state: StateT - - -# Error types - - -@dataclass -class _RunError(Exception): - """Exception raised for errors during agent runs.""" - - message: str - code: str - - def __str__(self) -> str: # pragma: no cover - return self.message - - -@dataclass -class _NoMessagesError(_RunError): - """Exception raised when no messages are found in the input.""" - - message: str = 'no messages found in the input' - code: str = 'no_messages' - - -@dataclass -class _InvalidStateError(_RunError, ValidationError): - """Exception raised when an invalid state is provided.""" - - message: str = 'invalid state provided' - code: str = 'invalid_state' - - -class _ToolCallNotFoundError(_RunError, ValueError): - """Exception raised when an tool result is present without a matching call.""" - - def __init__(self, tool_call_id: str) -> None: - """Initialize the exception with the tool call ID.""" - super().__init__( # pragma: no cover - message=f'Tool call with ID {tool_call_id} not found in the history.', - code='tool_call_not_found', - ) - - -# Frontend toolset - - -class _AGUIFrontendToolset(ExternalToolset[AgentDepsT]): - """Toolset for AG-UI frontend tools.""" - - def __init__(self, tools: list[AGUITool]): - """Initialize the toolset with AG-UI tools. - - Args: - tools: List of AG-UI tool definitions. - """ - super().__init__( - [ - ToolDefinition( - name=tool.name, - description=tool.description, - parameters_json_schema=tool.parameters, - ) - for tool in tools - ] - ) - - @property - def label(self) -> str: - """Return the label for this toolset.""" - return 'the AG-UI frontend tools' # pragma: no cover - - -class AGUIEventStream(BaseEventStream[BaseEvent, AgentDepsT]): - """Transforms Pydantic AI agent events into AG-UI protocol events. - - This class handles the stateful transformation of streaming agent events - into the AG-UI protocol format, managing message IDs, thinking mode state, - and tool call ID mappings for builtin tools. - - Example: - ```python - event_stream = AGUIEventStream() - async for ag_ui_event in event_stream.agent_event_to_events(pai_event): - print(ag_ui_event) - ``` - """ - - def __init__(self) -> None: - """Initialize AG-UI event stream state.""" - super().__init__() - self.part_end: BaseEvent | None = None - self.thinking: bool = False - self.builtin_tool_call_ids: dict[str, str] = {} - - # Granular handlers implementation - - async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: - """Handle a TextPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if self.thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False - - message_id = self.new_message_id() - yield TextMessageStartEvent(message_id=message_id) - if part.content: # pragma: no branch - yield TextMessageContentEvent(message_id=message_id, delta=part.content) - self.part_end = TextMessageEndEvent(message_id=message_id) - - async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a TextPartDelta.""" - if delta.content_delta: # pragma: no branch - yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) - - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if not self.thinking: - yield ThinkingStartEvent(type=EventType.THINKING_START) - self.thinking = True - - if part.content: - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) - - async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPartDelta.""" - if delta.content_delta: # pragma: no branch - if not isinstance(self.part_end, ThinkingTextMessageEndEvent): - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) - - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta - ) - - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if self.thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False - - tool_call_id = part.tool_call_id - if isinstance(part, BuiltinToolCallPart): - builtin_tool_call_id = '|'.join([_BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) - self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id - tool_call_id = builtin_tool_call_id - - message_id = self.message_id or self.new_message_id() - yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) - if part.args: - yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) - self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) - - async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPartDelta.""" - tool_call_id = delta.tool_call_id - assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' - if tool_call_id in self.builtin_tool_call_ids: - tool_call_id = self.builtin_tool_call_ids[tool_call_id] - yield ToolCallArgsEvent( - tool_call_id=tool_call_id, - delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), - ) - - async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: - """Handle a BuiltinToolReturnPart.""" - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - - tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] - yield ToolCallResultEvent( - message_id=self.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=tool_call_id, - content=part.model_response_str(), - ) - - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: - """Handle a FunctionToolCallEvent. - - This event is emitted when a function tool is called, but no AG-UI events - are needed at this stage since tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FunctionToolResultEvent, emitting tool result events.""" - result = event.result - if not isinstance(result, ToolReturnPart): - return - - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - - yield ToolCallResultEvent( - message_id=self.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=result.tool_call_id, - content=result.model_response_str(), - ) - - # Check for AG-UI events returned by tool calls. - possible_event = result.metadata or result.content - if isinstance(possible_event, BaseEvent): - yield possible_event - elif isinstance(possible_event, str | bytes): # pragma: no branch - # Avoid iterable check for strings and bytes. - pass - elif isinstance(possible_event, Iterable): # pragma: no branch - for item in possible_event: # type: ignore[reportUnknownMemberType] - if isinstance(item, BaseEvent): # pragma: no branch - yield item - - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolCallEvent. - - This event is emitted when a builtin tool is called, but no AG-UI events - are needed at this stage since builtin tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseEvent]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolResultEvent. - - This event is emitted when a builtin tool returns. We need to emit any pending - part_end event (TOOL_CALL_END) before the result is shown in handle_builtin_tool_return. - """ - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FinalResultEvent. - - This event is emitted when the agent produces a final result, but no AG-UI events - are needed at this stage. - """ - return - yield # Make this an async generator - - async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[BaseEvent]: - """Handle an AgentRunResultEvent, cleaning up any pending state.""" - # Emit any pending part end event - if self.part_end: # pragma: no branch - yield self.part_end - self.part_end = None - - # End thinking mode if still active - if self.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - self.thinking = False - - async def on_validation_error(self, error: Exception) -> AsyncIterator[BaseEvent]: - """Handle validation errors before stream starts.""" - # Try to get code from exception if it has one, otherwise use class name - code = getattr(error, 'code', error.__class__.__name__) - yield RunErrorEvent(message=str(error), code=code) - - async def on_stream_error(self, error: Exception) -> AsyncIterator[BaseEvent]: - """Handle errors during streaming.""" - # Try to get code from exception if it has one, otherwise use class name - code = getattr(error, 'code', error.__class__.__name__) - yield RunErrorEvent(message=str(error), code=code) - - def encode_event(self, event: BaseEvent, accept: str) -> str: - """Encode an AG-UI event as SSE. - - Args: - event: The AG-UI event to encode. - accept: The accept header value for encoding format. - - Returns: - The SSE-formatted string. - """ - from ag_ui.encoder import EventEncoder - - encoder = EventEncoder(accept=accept) - return encoder.encode(event) - - -def protocol_messages_to_pai_messages(messages: list[Message]) -> list[ModelMessage]: - """Convert AG-UI messages to Pydantic AI messages. - - Args: - messages: List of AG-UI messages. - - Returns: - List of Pydantic AI ModelMessage objects. - """ - from ...messages import ( - ModelRequest, - ModelRequestPart, - ModelResponse, - ModelResponsePart, - SystemPromptPart, - UserPromptPart, - ) - - result: list[ModelMessage] = [] - tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. - request_parts: list[ModelRequestPart] | None = None - response_parts: list[ModelResponsePart] | None = None - - for msg in messages: - if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( - isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if request_parts is None: - request_parts = [] - result.append(ModelRequest(parts=request_parts)) - response_parts = None - - if isinstance(msg, UserMessage): - request_parts.append(UserPromptPart(content=msg.content)) - elif isinstance(msg, SystemMessage | DeveloperMessage): - request_parts.append(SystemPromptPart(content=msg.content)) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') - - request_parts.append( - ToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - ) - ) - - elif isinstance(msg, AssistantMessage) or ( # pragma: no branch - isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX) - ): - if response_parts is None: - response_parts = [] - result.append(ModelResponse(parts=response_parts)) - request_parts = None - - if isinstance(msg, AssistantMessage): - if msg.content: - response_parts.append(TextPart(content=msg.content)) - - if msg.tool_calls: - for tool_call in msg.tool_calls: - tool_call_id = tool_call.id - tool_name = tool_call.function.name - tool_calls[tool_call_id] = tool_name - - if tool_call_id.startswith(_BUILTIN_TOOL_CALL_ID_PREFIX): - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - response_parts.append( - BuiltinToolCallPart( - tool_name=tool_name, - args=tool_call.function.arguments, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - else: - response_parts.append( - ToolCallPart( - tool_name=tool_name, - tool_call_id=tool_call_id, - args=tool_call.function.arguments, - ) - ) - else: - tool_call_id = msg.tool_call_id - tool_name = tool_calls.get(tool_call_id) - if tool_name is None: # pragma: no cover - raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') - _, provider_name, tool_call_id = tool_call_id.split('|', 2) - - response_parts.append( - BuiltinToolReturnPart( - tool_name=tool_name, - content=msg.content, - tool_call_id=tool_call_id, - provider_name=provider_name, - ) - ) - - return result diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py new file mode 100644 index 0000000000..927e910409 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -0,0 +1,377 @@ +"""Base classes for UI event stream protocols. + +This module provides abstract base classes for implementing UI event stream adapters +that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). +""" + +from __future__ import annotations + +from abc import ABC +from collections.abc import AsyncIterator +from dataclasses import dataclass, field +from typing import Generic, TypeVar +from uuid import uuid4 + +from ..messages import ( + AgentStreamEvent, + BuiltinToolCallPart, + BuiltinToolReturnPart, + FilePart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + PartDeltaEvent, + PartStartEvent, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, +) +from ..run import AgentRunResult, AgentRunResultEvent +from ..tools import AgentDepsT + +__all__ = [ + 'BaseEventStream', +] + +EventT = TypeVar('EventT') +"""Type variable for protocol-specific event types.""" + +RequestT = TypeVar('RequestT') +"""Type variable for request types.""" + +SourceEvent = AgentStreamEvent | AgentRunResultEvent + + +@dataclass +class BaseEventStream(ABC, Generic[RequestT, EventT, AgentDepsT]): + """Base class for transforming pAI agent events into protocol-specific events. + + This class provides a granular method-per-part-type pattern that makes it easy to customize + specific event transformations by overriding individual handler methods. + + Subclasses should: + 1. Initialize state attributes in `__init__` if needed + 2. Implement all abstract `handle_*` methods for event transformation + 3. Implement error handling methods + 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) + + Example: + ```python + class MyEventStream(BaseEventStream[MyEvent, MyDeps]): + def __init__(self): + super().__init__() + self.custom_state = {} + + async def handle_text_start(self, part: TextPart): + yield MyTextStartEvent(id=self.new_message_id(), text=part.content) + ``` + """ + + request: RequestT + + result: AgentRunResult | None = None + + message_id: str = field(default_factory=lambda: str(uuid4())) + + def new_message_id(self) -> str: + """Generate and store a new message ID. + + Returns: + A new UUID-based message ID. + """ + self.message_id = str(uuid4()) + return self.message_id + + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: + """Handle a stream of agent events. + + Args: + stream: The stream of agent events to handle. + + Yields: + Protocol-specific events. + """ + async for e in self.before_stream(): + yield e + + try: + async for event in stream: + async for e in self.handle_event(event): + yield e + except Exception as e: + async for e in self.on_error(e): + yield e + else: + async for e in self.after_stream(): + yield e + + async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 + """Transform a pAI agent event into protocol-specific events. + + This method dispatches to specific `handle_*` methods based on event and part type. + Subclasses should implement the individual handler methods rather than overriding this. + + Args: + event: The pAI agent event to transform. + + Yields: + Protocol-specific events. + """ + async for e in self.before_event(event): + yield e + + match event: + case PartStartEvent(part=part): + # Dispatch based on part type + match part: + case TextPart(): + async for e in self.handle_text_start(part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_start(part): + yield e + case ToolCallPart(): + async for e in self.handle_tool_call_start(part): + yield e + case BuiltinToolCallPart(): + async for e in self.handle_builtin_tool_call_start(part): + yield e + case BuiltinToolReturnPart(): + async for e in self.handle_builtin_tool_return(part): + yield e + case FilePart(): + # FilePart is not currently handled by UI protocols + pass + case PartDeltaEvent(delta=delta): + # Dispatch based on delta type + match delta: + case TextPartDelta(): + async for e in self.handle_text_delta(delta): + yield e + case ThinkingPartDelta(): + async for e in self.handle_thinking_delta(delta): + yield e + case ToolCallPartDelta(): + async for e in self.handle_tool_call_delta(delta): + yield e + case FunctionToolCallEvent(): + async for e in self.handle_function_tool_call(event): + yield e + case FunctionToolResultEvent(): + async for e in self.handle_function_tool_result(event): + yield e + case FinalResultEvent(): + async for e in self.handle_final_result(event): + yield e + case AgentRunResultEvent(): + self.result = event.result + async for e in self.handle_run_result(event): + yield e + case _: + pass + + async for e in self.after_event(event): + yield e + + async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: + """Handle a TextPart at start. + + Args: + part: The TextPart. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: + """Handle a TextPartDelta. + + Args: + delta: The TextPartDelta. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: + """Handle a ThinkingPart at start. + + Args: + part: The ThinkingPart. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: + """Handle a ThinkingPartDelta. + + Args: + delta: The ThinkingPartDelta. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[EventT]: + """Handle a ToolCallPart at start. + + Args: + part: The tool call part. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle a BuiltinToolCallPart at start. + + Args: + part: The tool call part. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: + """Handle a ToolCallPartDelta. + + Args: + delta: The ToolCallPartDelta. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: + """Handle a BuiltinToolReturnPart. + + Args: + part: The BuiltinToolReturnPart. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolCallEvent. + + Args: + event: The function tool call event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolResultEvent. + + Args: + event: The function tool result event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: + """Handle a FinalResultEvent. + + Args: + event: The final result event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: + """Handle an AgentRunResultEvent (final event with result/usage). + + Args: + event: The agent run result event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def before_event(self, event: SourceEvent) -> AsyncIterator[EventT]: + """Handle an event before it is processed. + + Args: + event: The event to handle. + """ + return + yield # Make this an async generator + + async def after_event(self, event: SourceEvent) -> AsyncIterator[EventT]: + """Handle an event after it is processed. + + Args: + event: The event to handle. + """ + return + yield # Make this an async generator + + # Lifecycle hooks (optional overrides) + + async def before_stream(self) -> AsyncIterator[EventT]: + """Yield events before agent streaming starts. + + This hook is called before any agent events are processed. + Override this to inject custom events at the start of the stream. + + Yields: + Protocol-specific events to emit before streaming. + """ + return + yield # Make this an async generator + + async def after_stream(self) -> AsyncIterator[EventT]: + """Yield events after agent streaming completes. + + This hook is called after all agent events have been processed. + Override this to inject custom events at the end of the stream. + + Yields: + Protocol-specific events to emit after streaming. + """ + return + yield # Make this an async generator + + async def on_error(self, error: Exception) -> AsyncIterator[EventT]: + """Handle errors that occur during streaming (after stream has started). + + Args: + error: The error that occurred during streaming. + + Yields: + Protocol-specific error events. + """ + return + yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index f004e15579..e0788a1404 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -7,10 +7,9 @@ https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts """ -from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata -from .adapter import VercelAIAdapter -from .event_stream import VercelAIEventStream, protocol_messages_to_pai_messages -from .request_types import ( +from ._adapter import VercelAIAdapter +from ._event_stream import VercelAIEventStream +from ._request_types import ( RequestData, SubmitMessage, TextUIPart, @@ -19,11 +18,10 @@ UIPart, request_data_ta, ) -from .response_types import ( +from ._response_types import ( AbortChunk, - AbstractSSEChunk, + BaseChunk, DataUIMessageChunk, - DoneChunk, ErrorChunk, FileChunk, FinishChunk, @@ -46,6 +44,7 @@ ToolOutputAvailableChunk, ToolOutputErrorChunk, ) +from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata __all__ = [ # Utilities @@ -62,7 +61,7 @@ 'SubmitMessage', 'request_data_ta', # Response types - 'AbstractSSEChunk', + 'BaseChunk', 'TextStartChunk', 'TextDeltaChunk', 'TextEndChunk', @@ -86,9 +85,7 @@ 'FinishChunk', 'AbortChunk', 'MessageMetadataChunk', - 'DoneChunk', # Event stream and adapter 'VercelAIEventStream', 'VercelAIAdapter', - 'protocol_messages_to_pai_messages', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py new file mode 100644 index 0000000000..8f3053ff89 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -0,0 +1,183 @@ +"""Vercel AI adapter for handling requests.""" + +from __future__ import annotations + +from collections.abc import Sequence +from dataclasses import dataclass +from functools import cached_property +from typing import ( + Any, +) + +from ... import DeferredToolResults +from ...agent import AbstractAgent, AgentDepsT +from ...messages import ( + ModelMessage, + ModelRequest, + ModelResponse, + SystemPromptPart, + TextPart, + UserPromptPart, +) +from ...models import KnownModelName, Model +from ...output import OutputSpec +from ...settings import ModelSettings +from ...toolsets import AbstractToolset +from ...usage import RunUsage, UsageLimits +from ..adapter import BaseAdapter, OnCompleteFunc +from ..event_stream import BaseEventStream +from ._event_stream import VercelAIEventStream +from ._request_types import RequestData, TextUIPart, UIMessage, request_data_ta +from ._response_types import BaseChunk +from ._utils import VERCEL_AI_DSP_HEADERS + +try: + from starlette.requests import Request + from starlette.responses import Response +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + + +__all__ = ['VercelAIAdapter'] + + +@dataclass +class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT]): + """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. + + This adapter provides a simplified interface for integrating Pydantic AI agents + with the Vercel AI protocol, handling request parsing, message conversion, + and event streaming. + + Example: + ```python + from pydantic_ai import Agent + from pydantic_ai.ui.vercel_ai import VercelAIAdapter + + agent = Agent('openai:gpt-4') + adapter = VercelAIAdapter(agent) + + async def handle_request(data: RequestData, deps=None): + async for chunk in adapter.run_stream(data, deps): + yield chunk.sse() + ``` + """ + + def create_event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: + return VercelAIEventStream(self.request) + + def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: + return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' + + @classmethod + async def validate_request(cls, request: Request) -> RequestData: + """Validate a Vercel AI request.""" + return request_data_ta.validate_json(await request.json()) + + @classmethod + async def dispatch_request( + cls, + agent: AbstractAgent[AgentDepsT, Any], + request: Request, + *, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + output_type: OutputSpec[Any] | None = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + on_complete: OnCompleteFunc | None = None, + ) -> Response: + """Handle an AG-UI request and return a streaming response. + + Args: + request: The incoming Starlette/FastAPI request. + deps: Optional dependencies to pass to the agent. + output_type: Custom output type for this run. + model: Optional model to use for this run. + model_settings: Optional settings for the model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with. + infer_name: Whether to infer the agent name from the call frame. + toolsets: Optional additional toolsets for this run. + on_complete: Optional callback called when the agent run completes. + + Returns: + A streaming Starlette response with AG-UI protocol events. + """ + response = await super().dispatch_request( + agent, + request, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + output_type=output_type, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, + ) + response.headers.update(VERCEL_AI_DSP_HEADERS) + return response + + @cached_property + def messages(self) -> list[ModelMessage]: + """Convert Vercel AI protocol messages to Pydantic AI messages. + + Args: + messages: List of Vercel AI UIMessage objects. + + Returns: + List of Pydantic AI ModelMessage objects. + + Raises: + ValueError: If message format is not supported. + """ + pai_messages: list[ModelMessage] = [] + + for msg in self.request.messages: + if msg.role == 'user': + # User message - extract text from parts + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + else: + raise ValueError(f'Only text parts are supported for user messages, got {type(part).__name__}') + + if texts: + pai_messages.append(ModelRequest(parts=[UserPromptPart(content='\n'.join(texts))])) + + elif msg.role == 'assistant': + # Assistant message - for now, just extract text + # Full reconstruction of ModelResponse with tool calls would require more complex logic + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + # TODO: Handle ToolOutputAvailablePart for full message history reconstruction + + if texts: + pai_messages.append(ModelResponse(parts=[TextPart(content='\n'.join(texts))])) + + elif msg.role == 'system': + # System message - not in standard Vercel AI protocol but might be custom + texts: list[str] = [] + for part in msg.parts: + if isinstance(part, TextUIPart): + texts.append(part.text) + + if texts: + pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) + + return pai_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py similarity index 61% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 465a3bb7e8..26cab17894 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -17,7 +17,6 @@ FinalResultEvent, FunctionToolCallEvent, FunctionToolResultEvent, - ModelMessage, RetryPromptPart, TextPart, TextPartDelta, @@ -27,13 +26,11 @@ ToolCallPartDelta, ToolReturnPart, ) -from ...run import AgentRunResultEvent from ...tools import AgentDepsT from .. import BaseEventStream -from .request_types import TextUIPart, UIMessage -from .response_types import ( - AbstractSSEChunk, - DoneChunk, +from ._request_types import RequestData +from ._response_types import ( + BaseChunk, ErrorChunk, FinishChunk, ReasoningDeltaChunk, @@ -45,7 +42,7 @@ ToolOutputAvailableChunk, ) -__all__ = ['VercelAIEventStream', 'protocol_messages_to_pai_messages'] +__all__ = ['VercelAIEventStream'] def _json_dumps(obj: Any) -> str: @@ -53,7 +50,7 @@ def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') -class VercelAIEventStream(BaseEventStream[AbstractSSEChunk | DoneChunk, AgentDepsT]): +class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): """Transforms Pydantic AI agent events into Vercel AI protocol events. This class handles the stateful transformation of streaming agent events @@ -62,31 +59,41 @@ class VercelAIEventStream(BaseEventStream[AbstractSSEChunk | DoneChunk, AgentDep Example: ```python event_stream = VercelAIEventStream() - async for vercel_event in event_stream.agent_event_to_events(pai_event): + async for vercel_event in event_stream.handle_event(pai_event): print(vercel_event.sse()) ``` """ - def __init__(self) -> None: + def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" - super().__init__() - self.new_message_id() # Generate a message ID at initialization + super().__init__(request) self._final_result_tool_id: str | None = None + async def after_stream(self) -> AsyncIterator[BaseChunk]: + """Yield events after agent streaming completes.""" + # Close the final result tool if there was one + if tool_call_id := self._final_result_tool_id: + yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) + yield FinishChunk() + + async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: + """Handle errors during streaming.""" + yield ErrorChunk(error_text=str(error)) + # Granular handlers implementation - async def handle_text_start(self, part: TextPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" yield TextStartChunk(id=self.message_id or self.new_message_id()) if part.content: yield TextDeltaChunk(id=self.message_id, delta=part.content) - async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[AbstractSSEChunk]: + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]: """Handle a TextPartDelta.""" if delta.content_delta: yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPart at start.""" if not self.message_id: self.new_message_id() @@ -94,12 +101,12 @@ async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[Abstr if part.content: yield ReasoningDeltaChunk(id=self.message_id, delta=part.content) - async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[AbstractSSEChunk]: + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPartDelta.""" if delta.content_delta: yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) if isinstance(part.args, str): @@ -107,7 +114,10 @@ async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) elif part.args is not None: yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) - async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[AbstractSSEChunk]: + def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: + return self.handle_tool_call_start(part) + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id or '' if isinstance(delta.args_delta, str): @@ -115,11 +125,11 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato elif delta.args_delta is not None: yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(delta.args_delta)) - async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[AbstractSSEChunk]: + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolReturnPart.""" yield ToolOutputAvailableChunk(tool_call_id=part.tool_call_id, output=part.content) - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolCallEvent. No Vercel AI events are emitted at this stage since tool calls are handled in PartStartEvent. @@ -127,7 +137,7 @@ async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> Async return yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result if isinstance(result, ToolReturnPart): @@ -136,7 +146,7 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A # For retry prompts, emit the error content as tool output yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] """Handle a BuiltinToolCallEvent, emitting tool input events.""" part = event.part yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) @@ -145,91 +155,14 @@ async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIt elif part.args is not None: yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[AbstractSSEChunk]: # type: ignore[reportDeprecated] + async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] """Handle a BuiltinToolResultEvent, emitting tool output events.""" result = event.result yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[AbstractSSEChunk]: + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FinalResultEvent, tracking the final result tool.""" if event.tool_call_id and event.tool_name: self._final_result_tool_id = event.tool_call_id + # TODO (DouweM): Stream output tool result once it's ready yield ToolInputStartChunk(tool_call_id=event.tool_call_id, tool_name=event.tool_name) - - async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[AbstractSSEChunk]: - """Handle an AgentRunResultEvent. - - No additional Vercel AI events are emitted at this stage. - """ - return - yield # Make this an async generator - - async def after_stream(self) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: - """Yield events after agent streaming completes.""" - # Close the final result tool if there was one - if tool_call_id := self._final_result_tool_id: - yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) - yield FinishChunk() - yield DoneChunk() - - async def on_validation_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: - """Handle validation errors before stream starts.""" - yield ErrorChunk(error_text=str(error)) - - async def on_stream_error(self, error: Exception) -> AsyncIterator[AbstractSSEChunk]: - """Handle errors during streaming.""" - yield ErrorChunk(error_text=str(error)) - - -def protocol_messages_to_pai_messages(messages: list[UIMessage]) -> list[ModelMessage]: - """Convert Vercel AI protocol messages to Pydantic AI messages. - - Args: - messages: List of Vercel AI UIMessage objects. - - Returns: - List of Pydantic AI ModelMessage objects. - - Raises: - ValueError: If message format is not supported. - """ - from ...messages import ModelRequest, ModelResponse, SystemPromptPart, TextPart, UserPromptPart - - pai_messages: list[ModelMessage] = [] - - for msg in messages: - if msg.role == 'user': - # User message - extract text from parts - texts: list[str] = [] - for part in msg.parts: - if isinstance(part, TextUIPart): - texts.append(part.text) - else: - raise ValueError(f'Only text parts are supported for user messages, got {type(part).__name__}') - - if texts: - pai_messages.append(ModelRequest(parts=[UserPromptPart(content='\n'.join(texts))])) - - elif msg.role == 'assistant': - # Assistant message - for now, just extract text - # Full reconstruction of ModelResponse with tool calls would require more complex logic - texts: list[str] = [] - for part in msg.parts: - if isinstance(part, TextUIPart): - texts.append(part.text) - # TODO: Handle ToolOutputAvailablePart for full message history reconstruction - - if texts: - pai_messages.append(ModelResponse(parts=[TextPart(content='\n'.join(texts))])) - - elif msg.role == 'system': - # System message - not in standard Vercel AI protocol but might be custom - texts: list[str] = [] - for part in msg.parts: - if isinstance(part, TextUIPart): - texts.append(part.text) - - if texts: - pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) - - return pai_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py similarity index 100% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py similarity index 74% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py index 09b82c3a99..c2d6ab2055 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py @@ -9,7 +9,7 @@ from ._utils import CamelBaseModel, ProviderMetadata __all__ = [ - 'AbstractSSEChunk', + 'BaseChunk', 'TextStartChunk', 'TextDeltaChunk', 'TextEndChunk', @@ -33,19 +33,14 @@ 'FinishChunk', 'AbortChunk', 'MessageMetadataChunk', - 'DoneChunk', ] -class AbstractSSEChunk(CamelBaseModel): +class BaseChunk(CamelBaseModel): """Abstract base class for response SSE events.""" - def sse(self) -> str: - """Encode as SSE format.""" - return self.model_dump_json(exclude_none=True, by_alias=True) - -class TextStartChunk(AbstractSSEChunk): +class TextStartChunk(BaseChunk): """Text start chunk.""" type: Literal['text-start'] = 'text-start' @@ -53,7 +48,7 @@ class TextStartChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class TextDeltaChunk(AbstractSSEChunk): +class TextDeltaChunk(BaseChunk): """Text delta chunk.""" type: Literal['text-delta'] = 'text-delta' @@ -62,7 +57,7 @@ class TextDeltaChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class TextEndChunk(AbstractSSEChunk): +class TextEndChunk(BaseChunk): """Text end chunk.""" type: Literal['text-end'] = 'text-end' @@ -70,7 +65,7 @@ class TextEndChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ReasoningStartChunk(AbstractSSEChunk): +class ReasoningStartChunk(BaseChunk): """Reasoning start chunk.""" type: Literal['reasoning-start'] = 'reasoning-start' @@ -78,7 +73,7 @@ class ReasoningStartChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ReasoningDeltaChunk(AbstractSSEChunk): +class ReasoningDeltaChunk(BaseChunk): """Reasoning delta chunk.""" type: Literal['reasoning-delta'] = 'reasoning-delta' @@ -87,7 +82,7 @@ class ReasoningDeltaChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ReasoningEndChunk(AbstractSSEChunk): +class ReasoningEndChunk(BaseChunk): """Reasoning end chunk.""" type: Literal['reasoning-end'] = 'reasoning-end' @@ -95,14 +90,14 @@ class ReasoningEndChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class ErrorChunk(AbstractSSEChunk): +class ErrorChunk(BaseChunk): """Error chunk.""" type: Literal['error'] = 'error' error_text: str -class ToolInputStartChunk(AbstractSSEChunk): +class ToolInputStartChunk(BaseChunk): """Tool input start chunk.""" type: Literal['tool-input-start'] = 'tool-input-start' @@ -112,7 +107,7 @@ class ToolInputStartChunk(AbstractSSEChunk): dynamic: bool | None = None -class ToolInputDeltaChunk(AbstractSSEChunk): +class ToolInputDeltaChunk(BaseChunk): """Tool input delta chunk.""" type: Literal['tool-input-delta'] = 'tool-input-delta' @@ -120,7 +115,7 @@ class ToolInputDeltaChunk(AbstractSSEChunk): input_text_delta: str -class ToolOutputAvailableChunk(AbstractSSEChunk): +class ToolOutputAvailableChunk(BaseChunk): """Tool output available chunk.""" type: Literal['tool-output-available'] = 'tool-output-available' @@ -131,14 +126,14 @@ class ToolOutputAvailableChunk(AbstractSSEChunk): preliminary: bool | None = None -class FinishChunk(AbstractSSEChunk): +class FinishChunk(BaseChunk): """Finish chunk.""" type: Literal['finish'] = 'finish' message_metadata: Any | None = None -class ToolInputAvailableChunk(AbstractSSEChunk): +class ToolInputAvailableChunk(BaseChunk): """Tool input available chunk.""" type: Literal['tool-input-available'] = 'tool-input-available' @@ -150,7 +145,7 @@ class ToolInputAvailableChunk(AbstractSSEChunk): dynamic: bool | None = None -class ToolInputErrorChunk(AbstractSSEChunk): +class ToolInputErrorChunk(BaseChunk): """Tool input error chunk.""" type: Literal['tool-input-error'] = 'tool-input-error' @@ -163,7 +158,7 @@ class ToolInputErrorChunk(AbstractSSEChunk): error_text: str -class ToolOutputErrorChunk(AbstractSSEChunk): +class ToolOutputErrorChunk(BaseChunk): """Tool output error chunk.""" type: Literal['tool-output-error'] = 'tool-output-error' @@ -173,7 +168,7 @@ class ToolOutputErrorChunk(AbstractSSEChunk): dynamic: bool | None = None -class SourceUrlChunk(AbstractSSEChunk): +class SourceUrlChunk(BaseChunk): """Source URL chunk.""" type: Literal['source-url'] = 'source-url' @@ -183,7 +178,7 @@ class SourceUrlChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class SourceDocumentChunk(AbstractSSEChunk): +class SourceDocumentChunk(BaseChunk): """Source document chunk.""" type: Literal['source-document'] = 'source-document' @@ -194,7 +189,7 @@ class SourceDocumentChunk(AbstractSSEChunk): provider_metadata: ProviderMetadata | None = None -class FileChunk(AbstractSSEChunk): +class FileChunk(BaseChunk): """File chunk.""" type: Literal['file'] = 'file' @@ -202,26 +197,26 @@ class FileChunk(AbstractSSEChunk): media_type: str -class DataUIMessageChunk(AbstractSSEChunk): +class DataUIMessageChunk(BaseChunk): """Data UI message chunk with dynamic type.""" type: str # Will be f"data-{NAME}" data: Any -class StartStepChunk(AbstractSSEChunk): +class StartStepChunk(BaseChunk): """Start step chunk.""" type: Literal['start-step'] = 'start-step' -class FinishStepChunk(AbstractSSEChunk): +class FinishStepChunk(BaseChunk): """Finish step chunk.""" type: Literal['finish-step'] = 'finish-step' -class StartChunk(AbstractSSEChunk): +class StartChunk(BaseChunk): """Start chunk.""" type: Literal['start'] = 'start' @@ -229,28 +224,14 @@ class StartChunk(AbstractSSEChunk): message_metadata: Any | None = None -class AbortChunk(AbstractSSEChunk): +class AbortChunk(BaseChunk): """Abort chunk.""" type: Literal['abort'] = 'abort' -class MessageMetadataChunk(AbstractSSEChunk): +class MessageMetadataChunk(BaseChunk): """Message metadata chunk.""" type: Literal['message-metadata'] = 'message-metadata' message_metadata: Any - - -class DoneChunk: - """Special marker chunk to indicate the end of the SSE stream.""" - - def sse(self) -> str: - """Encode as SSE done marker.""" - return '[DONE]' - - def __str__(self) -> str: - return 'DoneChunk' - - def __eq__(self, other: Any) -> bool: - return isinstance(other, DoneChunk) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py deleted file mode 100644 index c38b9e365c..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/adapter.py +++ /dev/null @@ -1,149 +0,0 @@ -"""Vercel AI adapter for handling requests.""" - -# pyright: reportGeneralTypeIssues=false - -from __future__ import annotations - -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any - -from ...tools import AgentDepsT -from .event_stream import VercelAIEventStream -from .request_types import RequestData, TextUIPart, UIMessage -from .response_types import AbstractSSEChunk, DoneChunk - -if TYPE_CHECKING: - from ...agent import Agent - -__all__ = ['VercelAIAdapter'] - - -@dataclass -class VercelAIAdapter: - """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. - - This adapter provides a simplified interface for integrating Pydantic AI agents - with the Vercel AI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.vercel_ai import VercelAIAdapter - - agent = Agent('openai:gpt-4') - adapter = VercelAIAdapter(agent) - - async def handle_request(data: RequestData, deps=None): - async for chunk in adapter.run_stream(data, deps): - yield chunk.sse() - ``` - """ - - agent: Agent[AgentDepsT] - """The Pydantic AI agent to run.""" - - async def run_stream( - self, request: RequestData, deps: AgentDepsT | None = None - ) -> AsyncIterator[AbstractSSEChunk | DoneChunk]: - """Stream events from an agent run as Vercel AI protocol events. - - Args: - request: The Vercel AI request data. - deps: Optional dependencies to pass to the agent. - - Yields: - Vercel AI protocol events (AbstractSSEChunk or DoneChunk). - - Raises: - ValueError: If request validation fails. - """ - # Parse messages from request - messages = self.parse_request_messages(request) - - # Extract prompt from last user message (for now, simple implementation) - if not messages: - raise ValueError('no messages provided') - - message = messages[-1] - prompt_parts: list[str] = [] - for part in message.parts: - if isinstance(part, TextUIPart): - prompt_parts.append(part.text) - else: - raise ValueError(f'Only text parts are supported yet, got {part}') - - # Create event stream - event_stream = self.create_event_stream() - - # Run agent and stream events - async for event in self.agent.run_stream_events('\n'.join(prompt_parts), deps=deps): - async for chunk in event_stream.agent_event_to_events(event): - yield chunk - - # Emit after-stream events - async for chunk in event_stream.after_stream(): - yield chunk - - def create_event_stream(self) -> VercelAIEventStream[AgentDepsT]: - """Create a new Vercel AI event stream. - - Returns: - A VercelAIEventStream instance. - """ - return VercelAIEventStream[AgentDepsT]() - - def parse_request_messages(self, request: RequestData) -> list[UIMessage]: - """Extract messages from the Vercel AI request. - - Args: - request: The Vercel AI request data. - - Returns: - List of UIMessage objects. - """ - return request.messages - - async def dispatch_request(self, request: Any, deps: AgentDepsT | None = None) -> Any: - """Handle a request and return a streamed SSE response. - - Args: - request: The incoming Starlette/FastAPI request. - deps: The dependencies for the agent. - - Returns: - A streamed SSE response. - """ - try: - from starlette.requests import Request - from starlette.responses import JSONResponse - except ImportError as e: # pragma: no cover - raise ImportError('Please install starlette to use dispatch_request') from e - - try: - from sse_starlette.sse import EventSourceResponse - except ImportError as e: # pragma: no cover - raise ImportError('Please install sse_starlette to use dispatch_request') from e - - from pydantic import ValidationError - - if not isinstance(request, Request): # pragma: no cover - raise TypeError(f'Expected Starlette Request, got {type(request).__name__}') - - from .request_types import request_data_ta - - try: - data = request_data_ta.validate_json(await request.json()) - - async def run_sse() -> AsyncIterator[str]: - async for chunk in self.run_stream(data, deps=deps): - yield chunk.sse() - - from ._utils import VERCEL_AI_DSP_HEADERS - - return EventSourceResponse(run_sse(), headers=VERCEL_AI_DSP_HEADERS) - except ValidationError as e: - return JSONResponse({'errors': e.errors()}, status_code=422) - except Exception as e: - return JSONResponse({'errors': str(e)}, status_code=500) diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index bcb57d9158..e7e0be7746 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -48,9 +48,7 @@ from .conftest import IsDatetime, IsSameStr, try_import -has_ag_ui: bool = False with try_import() as imports_successful: - has_ag_ui = imports_successful() from ag_ui.core import ( AssistantMessage, CustomEvent, @@ -70,16 +68,16 @@ from pydantic_ai.ag_ui import ( SSE_CONTENT_TYPE, + AGUIAdapter, OnCompleteFunc, StateDeps, run_ag_ui, ) - from pydantic_ai.ui.ag_ui.event_stream import protocol_messages_to_pai_messages pytestmark = [ pytest.mark.anyio, - pytest.mark.skipif(not imports_successful, reason='ag-ui-protocol not installed'), + pytest.mark.skipif(not imports_successful(), reason='ag-ui-protocol not installed'), pytest.mark.filterwarnings( 'ignore:`BuiltinToolCallEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolCallPart` instead.:DeprecationWarning' ), @@ -257,7 +255,7 @@ async def stream_function( 'threadId': IsStr(), 'runId': IsStr(), }, - {'type': 'RUN_ERROR', 'message': 'no messages found in the input', 'code': 'no_messages'}, + {'type': 'RUN_ERROR', 'message': 'No messages provided', 'code': 'UserError'}, ] ) @@ -1097,7 +1095,7 @@ async def store_state( agent: Agent[StateDeps[StateInt], str] = Agent( model=FunctionModel(stream_function=simple_stream), - deps_type=StateDeps[StateInt], # type: ignore[reportUnknownArgumentType] + deps_type=StateDeps[StateInt], prepare_tools=store_state, ) @@ -1197,7 +1195,7 @@ async def test_concurrent_runs() -> None: agent: Agent[StateDeps[StateInt], str] = Agent( model=TestModel(), - deps_type=StateDeps[StateInt], # type: ignore[reportUnknownArgumentType] + deps_type=StateDeps[StateInt], ) @agent.tool @@ -1342,30 +1340,7 @@ async def async_callback(run_result: AgentRunResult[Any]) -> None: assert events[-1]['type'] == 'RUN_FINISHED' -async def test_callback_with_error() -> None: - """Test that callbacks are not called when errors occur.""" - - captured_results: list[AgentRunResult[Any]] = [] - - def error_callback(run_result: AgentRunResult[Any]) -> None: - captured_results.append(run_result) # pragma: no cover - - agent = Agent(TestModel()) - # Empty messages should cause an error - run_input = create_input() # No messages will cause _NoMessagesError - - events = await run_and_collect_events(agent, run_input, on_complete=error_callback) - - # Verify callback was not called due to error - assert len(captured_results) == 0 - - # Verify error event was sent - assert len(events) > 0 - assert events[0]['type'] == 'RUN_STARTED' - assert any(event['type'] == 'RUN_ERROR' for event in events) - - -async def test_protocol_messages_to_pai_messages() -> None: +async def test_messages() -> None: messages = [ SystemMessage( id='msg_1', @@ -1448,7 +1423,20 @@ async def test_protocol_messages_to_pai_messages() -> None: ), ] - assert protocol_messages_to_pai_messages(messages) == snapshot( + adapter = AGUIAdapter( + Agent(TestModel()), + # TODO (DouweM): Have a convenience method so the useless args aren't necessary + request=RunAgentInput( + messages=messages, + thread_id='test_thread', + run_id='test_run', + state={}, + tools=[], + context=[], + forwarded_props={}, + ), + ) + assert adapter.messages == snapshot( [ ModelRequest( parts=[ diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index be55bcccec..93109970a6 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -1,5 +1,7 @@ from __future__ import annotations +import json + import pytest from inline_snapshot import snapshot @@ -8,23 +10,12 @@ from pydantic_ai.models.openai import OpenAIResponsesModel from pydantic_ai.providers.openai import OpenAIProvider from pydantic_ai.ui.vercel_ai import VercelAIAdapter -from pydantic_ai.ui.vercel_ai.request_types import ( +from pydantic_ai.ui.vercel_ai._request_types import ( SubmitMessage, TextUIPart, ToolOutputAvailablePart, UIMessage, ) -from pydantic_ai.ui.vercel_ai.response_types import ( - DoneChunk, - FinishChunk, - ReasoningDeltaChunk, - ReasoningStartChunk, - TextDeltaChunk, - TextStartChunk, - ToolInputDeltaChunk, - ToolInputStartChunk, - ToolOutputAvailableChunk, -) from .conftest import IsStr @@ -43,7 +34,6 @@ async def test_run(allow_model_requests: None, openai_api_key: str): model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) agent = Agent(model=model, builtin_tools=[WebSearchTool()]) - adapter = VercelAIAdapter(agent) data = SubmitMessage( trigger='submit-message', @@ -152,670 +142,713 @@ async def test_run(allow_model_requests: None, openai_api_key: str): ], ) - events = [event async for event in adapter.run_stream(data, None)] + adapter = VercelAIAdapter(agent, request=data) + events = [json.loads(event.removeprefix('data: ')) async for event in adapter.encode_stream(adapter.run_stream())] assert events == snapshot( [ - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', - input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', - input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', - input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} - ), - ReasoningStartChunk(id='d775971d84c848228275a25a097b6409'), - ReasoningDeltaChunk(id='d775971d84c848228275a25a097b6409', delta=''), - TextStartChunk(id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":null,"type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":null,"type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'web_search', + }, + { + 'type': 'tool-input-delta', + 'toolCallId': IsStr(), + 'inputTextDelta': '{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': IsStr(), + 'output': {'status': 'completed'}, + }, + {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'text-start', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ Short answer: - Default\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' FastAPI/OpenTelemetry', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' instrumentation already records method', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/route/status', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' FastAPI/OpenTelemetry', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': ' instrumentation already records method', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '/route/status', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ . - To also\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' include HTTP headers', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=', set', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' the capture-', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='headers env', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' include HTTP headers', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ', set', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' the capture-', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'headers env', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ vars. -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' To include request', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=', use the', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/ASGI', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' hooks and add', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' the', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' payload to', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' the span yourself', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' (with red', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='action/size', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' To include request', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '/response bodies', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ', use the', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' FastAPI', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '/ASGI', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' request/response', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' hooks and add', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' the', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' payload to', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' the span yourself', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' (with red', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'action/size', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ limits). How\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' to do it', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' to do it', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ 1)\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Enable header capture', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' (server side', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Enable header capture', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' (server side', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ) - Choose\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' just the', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' headers you need; avoid', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' sensitive ones or sanitize', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' just the', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' headers you need; avoid', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': ' sensitive ones or sanitize', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ them. export OTEL\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_INSTRUMENTATION_HTTP_CAPTURE', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_HEADERS_SERVER_REQUEST="content', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='-type,user', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='-agent"\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='export OTEL_INSTRUMENTATION', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_HTTP_CAPTURE_HEADERS', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_SERVER_RESPONSE="content-type"\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='export OTEL_INSTRUMENTATION_HTTP', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': '_INSTRUMENTATION_HTTP_CAPTURE', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': '_HEADERS_SERVER_REQUEST="content', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '-type,user', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '-agent"\n', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': 'export OTEL_INSTRUMENTATION', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_HTTP_CAPTURE_HEADERS', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '_SERVER_RESPONSE="content-type"\n', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': 'export OTEL_INSTRUMENTATION_HTTP', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ _CAPTURE_HEADERS_SANITIZE_FIELDS="authorization,set-cookie" This makes headers appear on spans as http.request.header.* and http.response.header.*. ([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)) 2)\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Add hooks to capture request', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/response bodies', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': ' Add hooks to capture request', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '/response bodies', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ Note:\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=IsStr(), id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' a built-in Python', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' env', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' var to', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' auto-capture', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' HTTP bodies for Fast', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='API/AS', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='GI. Use', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' hooks to look at', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' ASGI receive', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='/send events and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' attach (tr', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='uncated) bodies', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' as span attributes', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': IsStr(), 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' a built-in Python', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' env', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' var to', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' auto-capture', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' HTTP bodies for Fast', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'API/AS', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'GI. Use', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' hooks to look at', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' ASGI receive', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '/send events and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' attach (tr', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'uncated) bodies', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' as span attributes', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ . from\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' fastapi import', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' FastAPI', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' fastapi import', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' FastAPI', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ from opente\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='lemetry.trace', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' import Span', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'lemetry.trace', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' import Span', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ from opente\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='lemetry.instrument', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='ation.fastapi import', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' FastAPIInstrument', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'lemetry.instrument', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'ation.fastapi import', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' FastAPIInstrument', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ or MAX\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_BYTES = ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='2048 ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' # keep this', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' small in prod', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_BYTES = ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '2048 ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' # keep this', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' small in prod', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ def client\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_request_hook(span', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=': Span,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' scope: dict', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=', message:', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_request_hook(span', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ': Span,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' scope: dict', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ', message:', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ dict): \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' if span and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' span.is_record', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='ing() and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' message.get("', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='type") ==', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' "http.request', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' if span and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' span.is_record', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'ing() and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' message.get("', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'type") ==', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' "http.request', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ": body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' = message.get', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '("body")', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' or b"', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ " if\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ body: \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' span.set_attribute', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ( "\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='http.request.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'http.request.body', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ", body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '[:MAX_BYTES', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '].decode("', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'utf-8', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '", "replace', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ "), ) """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ def client_response\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_hook(span:', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' Span, scope', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=': dict,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' message: dict', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_hook(span:', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' Span, scope', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ': dict,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' message: dict', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ): if\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' span and span', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.is_recording', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='() and message', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.get("type', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='") == "', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' span and span', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.is_recording', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '() and message', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.get("type', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '") == "', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'http.response.body', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ": body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' = message.get', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='("body")', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' or b"', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' = message.get', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '("body")', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' or b"', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ " if\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ body: \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' span.set_attribute', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' span.set_attribute', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ( "\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='http.response.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'http.response.body', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ ", body\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='[:MAX_BYTES', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='].decode("', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='utf-8', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='", "replace', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '[:MAX_BYTES', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '].decode("', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'utf-8', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '", "replace', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ "), ) """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ app = Fast\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ API() Fast\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='APIInstrumentor', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.instrument_app(', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'APIInstrumentor', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.instrument_app(', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ app,\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ client_request\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_hook=client', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_hook=client', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ _request_hook, \ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' client_response_hook', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='=client_response', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' client_response_hook', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '=client_response', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ _hook, ) """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ - The hooks\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' receive the AS', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='GI event dict', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='s: http', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.request (with', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' body/more', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_body) and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' http.response.body', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='. If your', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' bodies can be', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' chunked,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' you may need', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' to accumulate across', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' calls when message', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.get("more', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_body") is', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' True. ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)', - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=')', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' receive the AS', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'GI event dict', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 's: http', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.request (with', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' body/more', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '_body) and', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' http.response.body', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '. If your', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' bodies can be', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' chunked,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' you may need', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' to accumulate across', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' calls when message', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '.get("more', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '_body") is', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' True. ', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html)', + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ')', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ 3)\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Be careful with', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' PII and', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Be careful with', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' PII and', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ size -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Always limit size', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' and consider redaction', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' before putting payloads', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Always limit size', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' and consider redaction', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' before putting payloads', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ on spans. -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' Use the sanitize', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' env var above', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' for sensitive headers', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='. ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n', - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' Use the sanitize', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' env var above', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' for sensitive headers', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '. ', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/fastapi/fastapi.html))\n', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ Optional: correlate logs\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ with traces -\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' If you also want', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' request/response', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' details in logs with', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' trace IDs, enable', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' Python log correlation:\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' If you also want', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' request/response', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' details in logs with', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' trace IDs, enable', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' Python log correlation:\n', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ export OTEL_P\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='YTHON_LOG_COR', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='RELATION=true', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'YTHON_LOG_COR', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'RELATION=true', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ or programmatically\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ : from opente\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='lemetry.instrumentation', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='.logging import LoggingInstrument', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'lemetry.instrumentation', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '.logging import LoggingInstrument', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ or LoggingInstrument\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='or().instrument(set', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta='_logging_format=True)\n', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': 'or().instrument(set', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '_logging_format=True)\n', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': """\ This injects trace\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta='_id/span_id into', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' log records so you', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' can line up logs', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' with the span that', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' carries the HTTP payload', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' attributes. ', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk( - delta='([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n', - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk( - delta="""\ + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': '_id/span_id into', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' log records so you', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' can line up logs', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' with the span that', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' carries the HTTP payload', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' attributes. ', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': '([opentelemetry-python-contrib.readthedocs.io](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/logging/logging.html?utm_source=openai))\n', + 'id': IsStr(), + }, + { + 'type': 'text-delta', + 'delta': """\ Want me to tailor\ """, - id='d775971d84c848228275a25a097b6409', - ), - TextDeltaChunk(delta=' the hook to only', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' capture JSON bodies,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' skip binary content,', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' or accumulate chunked', id='d775971d84c848228275a25a097b6409'), - TextDeltaChunk(delta=' bodies safely?', id='d775971d84c848228275a25a097b6409'), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', - input_text_delta='{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', - input_text_delta='{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', - input_text_delta='{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', - input_text_delta='{"query":null,"type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e6480e11208191834104e1aaab1148', output={'status': 'completed'} - ), - ToolInputStartChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', tool_name='web_search' - ), - ToolInputDeltaChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', - input_text_delta='{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', - ), - ToolOutputAvailableChunk( - tool_call_id='ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', output={'status': 'completed'} - ), - FinishChunk(), - DoneChunk(), + 'id': IsStr(), + }, + {'type': 'text-delta', 'delta': ' the hook to only', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' capture JSON bodies,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' skip binary content,', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' or accumulate chunked', 'id': IsStr()}, + {'type': 'text-delta', 'delta': ' bodies safely?', 'id': IsStr()}, + {'type': 'finish'}, ] ) From 2acc1c3865d0aca9339183fbf62043d099a2c5ae Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 08:46:54 +0000 Subject: [PATCH 06/46] fix typecheck, tests, linter --- pydantic_ai_slim/pydantic_ai/_agent_graph.py | 6 +- pydantic_ai_slim/pydantic_ai/ag_ui.py | 14 +++- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 81 +++++++------------ .../pydantic_ai/ui/ag_ui/_adapter.py | 24 +----- .../pydantic_ai/ui/ag_ui/_event_stream.py | 14 +--- .../pydantic_ai/ui/event_stream.py | 27 +------ .../pydantic_ai/ui/vercel_ai/_adapter.py | 44 +++++----- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 13 +-- tests/test_ag_ui.py | 8 +- tests/test_agent.py | 8 +- 10 files changed, 82 insertions(+), 157 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index fc883eee9b..bd6c863228 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -243,13 +243,13 @@ async def run( # noqa: C901 if self.user_prompt is not None: parts.append(_messages.UserPromptPart(self.user_prompt)) - if not parts: - raise exceptions.UserError('No messages provided') - next_message = _messages.ModelRequest(parts=parts) next_message.instructions = await ctx.deps.get_instructions(run_context) + if not messages and not next_message.parts and not next_message.instructions: + raise exceptions.UserError('No message history, user prompt, or instructions provided') + return ModelRequestNode[DepsT, NodeRunEndT](request=next_message) async def _handle_deferred_tool_results( # noqa: C901 diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index b4c32d8a12..df03646ae9 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -67,6 +67,8 @@ def __init__( *, # Agent.iter parameters. output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, @@ -97,6 +99,8 @@ def __init__( output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. @@ -139,6 +143,8 @@ async def endpoint(request: Request) -> Response: agent, request, output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, model=model, deps=deps, model_settings=model_settings, @@ -156,6 +162,8 @@ async def handle_ag_ui_request( request: Request, *, output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, @@ -173,6 +181,8 @@ async def handle_ag_ui_request( output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. @@ -186,7 +196,7 @@ async def handle_ag_ui_request( Returns: A streaming Starlette response with AG-UI protocol events. """ - return await AGUIAdapter.dispatch_request( + return await AGUIAdapter[AgentDepsT].dispatch_request( agent, request, deps=deps, @@ -227,6 +237,8 @@ async def run_ag_ui( output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 00b8afa311..0fe6d1f006 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -116,30 +116,7 @@ class StateDeps(Generic[StateT]): @dataclass class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): - """Base adapter for handling UI protocol requests and streaming responses. - - This class provides a unified interface for request/response handling across different - UI protocols (AG-UI, Vercel AI, etc.). It handles: - - Request parsing and validation - - Message format conversion (protocol messages → pAI messages) - - Agent execution and event streaming - - Error handling (validation errors vs streaming errors) - - SSE encoding - - Type Parameters: - RequestT: Protocol-specific request type (e.g., RunAgentInput, RequestData) - MessageT: Protocol-specific message type (e.g., ag_ui.Message, UIMessage) - EventT: Protocol-specific event type (e.g., ag_ui.BaseEvent, BaseChunk) - AgentDepsT: Agent dependencies type - - Example: - ```python - class MyAdapter(BaseAdapter[MyRequest, MyMessage, MyEvent, MyDeps]): - def messages(self, messages: list[MyMessage]) -> list[ModelMessage]: - # Convert protocol messages to pAI messages - ... - ``` - """ + """TODO (DouwM): Docstring.""" agent: AbstractAgent[AgentDepsT] """The Pydantic AI agent to run.""" @@ -169,23 +146,9 @@ def toolset(self) -> AbstractToolset[AgentDepsT] | None: return None @cached_property - def raw_state(self) -> dict[str, Any]: + def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" - return {} - - def deps_with_state(self, deps: AgentDepsT) -> AgentDepsT: - raw_state: dict[str, Any] = self.raw_state - if isinstance(deps, StateHandler): - if isinstance(deps.state, BaseModel): - state = type(deps.state).model_validate(raw_state) - else: - state = raw_state - - return replace(deps, state=state) - elif raw_state: - raise UserError( - f'AG-UI state is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' - ) + return None @abstractmethod def encode_event(self, event: EventT, accept: str | None = None) -> str: @@ -258,12 +221,10 @@ async def run_stream( """Run the agent with the AG-UI run input and stream AG-UI protocol events. Args: - agent: The agent to run. - run_input: The AG-UI run input containing thread_id, run_id, messages, etc. - accept: The accept header value for the run. - output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. @@ -284,7 +245,18 @@ async def run_stream( output_type = [output_type or self.agent.output_type, DeferredToolRequests] toolsets = [*toolsets, toolset] if toolsets else [toolset] - deps = self.deps_with_state(deps) + if isinstance(deps, StateHandler): + raw_state = self.raw_state or {} + if isinstance(deps.state, BaseModel): + state = type(deps.state).model_validate(raw_state) + else: + state = raw_state + + deps = replace(deps, state=state) + elif self.raw_state: + raise UserError( + f'State is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' + ) async for event in self.process_stream( self.agent.run_stream_events( @@ -325,16 +297,21 @@ async def dispatch_request( """Handle an AG-UI request and return a streaming response. Args: + agent: The agent to run. request: The incoming Starlette/FastAPI request. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. Returns: A streaming Starlette response with AG-UI protocol events. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 0681caa2c4..e3694dd2b7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -97,25 +97,7 @@ def label(self) -> str: class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT]): - """Adapter for handling AG-UI protocol requests with Pydantic AI agents. - - This adapter provides an interface for integrating Pydantic AI agents - with the AG-UI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.ag_ui import AGUIAdapter - - agent = Agent('openai:gpt-4') - adapter = AGUIAdapter(agent) - - async def handle_request(request: RunAgentInput, deps=None): - async for event_str in adapter.run_stream_encoded(request, deps): - yield event_str - ``` - """ + """TODO (DouwM): Docstring.""" def create_event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: """Create an event stream for the adapter.""" @@ -134,9 +116,9 @@ def toolset(self) -> AbstractToolset[AgentDepsT] | None: return None @cached_property - def raw_state(self) -> dict[str, Any]: + def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" - return self.request.state or {} + return self.request.state def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: """Encode an AG-UI event as SSE. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index e472bc204f..59ffec19a0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -68,19 +68,7 @@ class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): - """Transforms Pydantic AI agent events into AG-UI protocol events. - - This class handles the stateful transformation of streaming agent events - into the AG-UI protocol format, managing message IDs, thinking mode state, - and tool call ID mappings for builtin tools. - - Example: - ```python - event_stream = AGUIEventStream() - async for ag_ui_event in event_stream.handle_event(pai_event): - print(ag_ui_event) - ``` - """ + """TODO (DouwM): Docstring.""" def __init__(self, request: RunAgentInput) -> None: """Initialize AG-UI event stream state.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 927e910409..7f94135143 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -47,28 +47,7 @@ @dataclass class BaseEventStream(ABC, Generic[RequestT, EventT, AgentDepsT]): - """Base class for transforming pAI agent events into protocol-specific events. - - This class provides a granular method-per-part-type pattern that makes it easy to customize - specific event transformations by overriding individual handler methods. - - Subclasses should: - 1. Initialize state attributes in `__init__` if needed - 2. Implement all abstract `handle_*` methods for event transformation - 3. Implement error handling methods - 4. Optionally override lifecycle hooks (`before_stream`, `after_stream`) - - Example: - ```python - class MyEventStream(BaseEventStream[MyEvent, MyDeps]): - def __init__(self): - super().__init__() - self.custom_state = {} - - async def handle_text_start(self, part: TextPart): - yield MyTextStartEvent(id=self.new_message_id(), text=part.content) - ``` - """ + """TODO (DouwM): Docstring.""" request: RequestT @@ -109,13 +88,13 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat yield e async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 - """Transform a pAI agent event into protocol-specific events. + """Transform a Pydantic AI agent event into protocol-specific events. This method dispatches to specific `handle_*` methods based on event and part type. Subclasses should implement the individual handler methods rather than overriding this. Args: - event: The pAI agent event to transform. + event: The Pydantic AI agent event to transform. Yields: Protocol-specific events. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 8f3053ff89..77d66ba4b6 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -46,25 +46,7 @@ @dataclass class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT]): - """Adapter for handling Vercel AI protocol requests with Pydantic AI agents. - - This adapter provides a simplified interface for integrating Pydantic AI agents - with the Vercel AI protocol, handling request parsing, message conversion, - and event streaming. - - Example: - ```python - from pydantic_ai import Agent - from pydantic_ai.ui.vercel_ai import VercelAIAdapter - - agent = Agent('openai:gpt-4') - adapter = VercelAIAdapter(agent) - - async def handle_request(data: RequestData, deps=None): - async for chunk in adapter.run_stream(data, deps): - yield chunk.sse() - ``` - """ + """TODO (DouwM): Docstring.""" def create_event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: return VercelAIEventStream(self.request) @@ -98,16 +80,22 @@ async def dispatch_request( """Handle an AG-UI request and return a streaming response. Args: + agent: The agent to run. request: The incoming Starlette/FastAPI request. - deps: Optional dependencies to pass to the agent. - output_type: Custom output type for this run. - model: Optional model to use for this run. - model_settings: Optional settings for the model's request. + + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with. - infer_name: Whether to infer the agent name from the call frame. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. - on_complete: Optional callback called when the agent run completes. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. Returns: A streaming Starlette response with AG-UI protocol events. @@ -181,3 +169,7 @@ def messages(self) -> list[ModelMessage]: pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) return pai_messages + + # TODO (DouweM): model, builtin_tools? + + # TODO (DouweM): static load_messages, dump_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 26cab17894..9fc938a4d7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -51,18 +51,7 @@ def _json_dumps(obj: Any) -> str: class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): - """Transforms Pydantic AI agent events into Vercel AI protocol events. - - This class handles the stateful transformation of streaming agent events - into the Vercel AI protocol format, managing message IDs and final result tool tracking. - - Example: - ```python - event_stream = VercelAIEventStream() - async for vercel_event in event_stream.handle_event(pai_event): - print(vercel_event.sse()) - ``` - """ + """TODO (DouwM): Docstring.""" def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index e7e0be7746..ea8962abda 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -255,7 +255,11 @@ async def stream_function( 'threadId': IsStr(), 'runId': IsStr(), }, - {'type': 'RUN_ERROR', 'message': 'No messages provided', 'code': 'UserError'}, + { + 'type': 'RUN_ERROR', + 'message': 'No message history, user prompt, or instructions provided', + 'code': 'UserError', + }, ] ) @@ -1152,7 +1156,7 @@ async def test_request_with_state_without_handler() -> None: with pytest.raises( UserError, - match='AG-UI state is provided but `deps` of type `NoneType` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.', + match='State is provided but `deps` of type `NoneType` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.', ): async for _ in run_ag_ui(agent, run_input): pass diff --git a/tests/test_agent.py b/tests/test_agent.py index b27e815651..fc6b97ace7 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -4562,7 +4562,7 @@ async def call_tools_parallel(messages: list[ModelMessage], info: AgentInfo) -> server = MCPServerStdio('python', ['-m', 'tests.mcp_server']) agent = Agent(FunctionModel(call_tools_parallel), toolsets=[server]) - result = agent.run_sync() + result = agent.run_sync('call tools in parallel') assert result.output == snapshot('finished') @@ -4620,11 +4620,13 @@ def call_second(): FunctionModel(call_tools_sequential), toolsets=[sequential_toolset], output_type=[str, DeferredToolRequests] ) + user_prompt = 'call a lot of tools' + if mode == 'contextmanager': with agent.sequential_tool_calls(): - result = agent.run_sync() + result = agent.run_sync(user_prompt) else: - result = agent.run_sync() + result = agent.run_sync(user_prompt) assert result.output == snapshot( DeferredToolRequests(approvals=[ToolCallPart(tool_name='requires_approval', tool_call_id=IsStr())]) From 2d7c781b7c3fe114a1656950987ed69d4a88b751 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 09:30:22 +0000 Subject: [PATCH 07/46] Fix Vercel --- .../pydantic_ai/ui/vercel_ai/_adapter.py | 2 +- .../pydantic_ai/ui/vercel_ai/_request_types.py | 17 +++++++++++++---- tests/test_vercel_ai.py | 10 +++++++--- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 77d66ba4b6..7cb064e8cb 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -57,7 +57,7 @@ def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: @classmethod async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" - return request_data_ta.validate_json(await request.json()) + return request_data_ta.validate_json(await request.body()) @classmethod async def dispatch_request( diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py index ea30436378..c2aa43f506 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py @@ -4,9 +4,9 @@ https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts """ -from typing import Any, Literal +from typing import Annotated, Any, Literal -from pydantic import TypeAdapter +from pydantic import Discriminator, TypeAdapter from ._utils import CamelBaseModel, ProviderMetadata @@ -56,7 +56,7 @@ class UIMessage(CamelBaseModel): parts: list[UIPart] -class SubmitMessage(CamelBaseModel): +class SubmitMessage(CamelBaseModel, extra='allow'): """Submit message request.""" trigger: Literal['submit-message'] = 'submit-message' @@ -64,7 +64,16 @@ class SubmitMessage(CamelBaseModel): messages: list[UIMessage] -RequestData = SubmitMessage +class RegenerateMessage(CamelBaseModel, extra='allow'): + """Ask the agent to regenerate a message.""" + + trigger: Literal['regenerate-message'] + id: str + messages: list[UIMessage] + message_id: str + + +RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')] # Type adapter for parsing requests request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 93109970a6..0f61684697 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -7,8 +7,6 @@ from pydantic_ai import Agent from pydantic_ai.builtin_tools import WebSearchTool -from pydantic_ai.models.openai import OpenAIResponsesModel -from pydantic_ai.providers.openai import OpenAIProvider from pydantic_ai.ui.vercel_ai import VercelAIAdapter from pydantic_ai.ui.vercel_ai._request_types import ( SubmitMessage, @@ -17,7 +15,12 @@ UIMessage, ) -from .conftest import IsStr +from .conftest import IsStr, try_import + +with try_import() as openai_import_successful: + from pydantic_ai.models.openai import OpenAIResponsesModel + from pydantic_ai.providers.openai import OpenAIProvider + pytestmark = [ pytest.mark.anyio, @@ -31,6 +34,7 @@ ] +@pytest.mark.skipif(not openai_import_successful(), reason='OpenAI not installed') async def test_run(allow_model_requests: None, openai_api_key: str): model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) agent = Agent(model=model, builtin_tools=[WebSearchTool()]) From 03862a5750d6c89cc3c585acfccd5782c82061d9 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 10:03:24 +0000 Subject: [PATCH 08/46] cleanup --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 58 +++++----- .../pydantic_ai/ui/ag_ui/__init__.py | 4 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 40 +++---- .../pydantic_ai/ui/ag_ui/_event_stream.py | 19 ++++ .../pydantic_ai/ui/event_stream.py | 28 ++++- .../pydantic_ai/ui/vercel_ai/__init__.py | 5 +- .../pydantic_ai/ui/vercel_ai/_adapter.py | 106 +++++------------- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 3 + .../pydantic_ai/ui/vercel_ai/_utils.py | 5 +- 9 files changed, 125 insertions(+), 143 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 0fe6d1f006..a75d3d03da 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -7,7 +7,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from collections.abc import AsyncIterator, Awaitable, Callable, Sequence +from collections.abc import AsyncIterator, Awaitable, Callable, Mapping, Sequence from dataclasses import Field, dataclass, replace from functools import cached_property from http import HTTPStatus @@ -53,7 +53,7 @@ """Type variable for protocol-specific event types.""" -RequestT = TypeVar('RequestT') +RunRequestT = TypeVar('RunRequestT') """Type variable for protocol-specific request types.""" MessageT = TypeVar('MessageT') @@ -115,17 +115,32 @@ class StateDeps(Generic[StateT]): @dataclass -class BaseAdapter(ABC, Generic[RequestT, MessageT, EventT, AgentDepsT]): +class BaseAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT]): """TODO (DouwM): Docstring.""" agent: AbstractAgent[AgentDepsT] """The Pydantic AI agent to run.""" - request: RequestT + request: RunRequestT # TODO (DouweM): rename """The protocol-specific request object.""" + @classmethod + async def validate_request(cls, request: Request) -> RunRequestT: + """Validate the request and return the validated request.""" + raise NotImplementedError('validate_request is not implemented') + + @classmethod + @abstractmethod + def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: + """Load messages from the request and return the loaded messages.""" + @abstractmethod - def create_event_stream(self) -> BaseEventStream[RequestT, EventT, AgentDepsT]: + def dump_messages(self, messages: Sequence[ModelMessage]) -> list[MessageT]: + """Dump messages to the request and return the dumped messages.""" + + @cached_property + @abstractmethod + def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT]: """Create an event stream for the adapter.""" @cached_property @@ -150,32 +165,24 @@ def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return None - @abstractmethod - def encode_event(self, event: EventT, accept: str | None = None) -> str: - """Encode a protocol event as an SSE string. - - Args: - event: The protocol-specific event. - accept: The accept header value for encoding format. + @property + def result(self) -> AgentRunResult | None: + """Get the result of the agent run.""" + return self.event_stream.result - Returns: - Formatted string. - """ + @property + def response_headers(self) -> Mapping[str, str] | None: + """Get the response headers for the adapter.""" + return None - async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: + def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: """Encode a stream of events as SSE strings. Args: stream: The stream of events to encode. accept: The accept header value for encoding format. """ - async for event in stream: - yield self.encode_event(event, accept) - - @classmethod - async def validate_request(cls, request: Request) -> RequestT: - """Validate the request and return the validated request.""" - raise NotImplementedError('validate_request is not implemented') + return self.event_stream.encode_stream(stream, accept) async def process_stream( self, @@ -188,7 +195,7 @@ async def process_stream( stream: The stream of events to process. on_complete: Optional callback function called when the agent run completes successfully. """ - event_stream = self.create_event_stream() + event_stream = self.event_stream async for event in event_stream.handle_stream(stream): yield event @@ -343,5 +350,6 @@ async def dispatch_request( on_complete=on_complete, ), accept=request.headers.get('accept'), - ) + ), + headers=adapter.response_headers, ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 53784efb51..eccd37ed89 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,7 +1,7 @@ """AG-UI protocol integration for Pydantic AI agents.""" -from ._adapter import SSE_CONTENT_TYPE, AGUIAdapter -from ._event_stream import AGUIEventStream +from ._adapter import AGUIAdapter +from ._event_stream import SSE_CONTENT_TYPE, AGUIEventStream __all__ = [ 'AGUIAdapter', diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index e3694dd2b7..b4d6c98b98 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -2,11 +2,11 @@ from __future__ import annotations +from collections.abc import Sequence from functools import cached_property from typing import ( TYPE_CHECKING, Any, - Final, ) from ... import ExternalToolset, ToolDefinition @@ -39,7 +39,6 @@ ToolMessage, UserMessage, ) - from ag_ui.encoder import EventEncoder from ..adapter import BaseAdapter from ..event_stream import BaseEventStream @@ -64,9 +63,6 @@ __all__ = ['AGUIAdapter'] -SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' -"""Content type header value for Server-Sent Events (SSE).""" - # Frontend toolset @@ -99,15 +95,21 @@ def label(self) -> str: class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT]): """TODO (DouwM): Docstring.""" - def create_event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: - """Create an event stream for the adapter.""" - return AGUIEventStream(self.request) - @classmethod async def validate_request(cls, request: Request) -> RunAgentInput: """Validate the request and return the validated request.""" return RunAgentInput.model_validate(await request.json()) + def dump_messages(self, messages: Sequence[ModelMessage]) -> list[Message]: + """Dump messages to the request and return the dumped messages.""" + # TODO (DouweM): bring in from https://github.com/pydantic/pydantic-ai/pull/3068 + raise NotImplementedError + + @cached_property + def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: + """Create an event stream for the adapter.""" + return AGUIEventStream(self.request) + @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: """Get the toolset for the adapter.""" @@ -120,19 +122,6 @@ def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return self.request.state - def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: - """Encode an AG-UI event as SSE. - - Args: - event: The AG-UI event to encode. - accept: The accept header value for encoding format. - - Returns: - The SSE-formatted string. - """ - encoder = EventEncoder(accept=accept or SSE_CONTENT_TYPE) - return encoder.encode(event) - @cached_property def messages(self) -> list[ModelMessage]: """Convert AG-UI messages to Pydantic AI messages. @@ -143,12 +132,17 @@ def messages(self) -> list[ModelMessage]: Returns: List of Pydantic AI ModelMessage objects. """ + return self.load_messages(self.request.messages) + + @classmethod + def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: + """Load messages from the request and return the loaded messages.""" result: list[ModelMessage] = [] tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. request_parts: list[ModelRequestPart] | None = None response_parts: list[ModelResponsePart] | None = None - for msg in self.request.messages: + for msg in messages: if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) ): diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 59ffec19a0..097f2f253f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -51,6 +51,8 @@ ToolCallResultEvent, ToolCallStartEvent, ) + from ag_ui.encoder import EventEncoder + except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `ag-ui-protocol` package to use AG-UI integration, ' @@ -64,6 +66,10 @@ 'RunFinishedEvent', ] +SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' +"""Content type header value for Server-Sent Events (SSE).""" + + BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' @@ -77,6 +83,19 @@ def __init__(self, request: RunAgentInput) -> None: self.thinking: bool = False self.builtin_tool_call_ids: dict[str, str] = {} + def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: + """Encode an AG-UI event as SSE. + + Args: + event: The AG-UI event to encode. + accept: The accept header value for encoding format. + + Returns: + The SSE-formatted string. + """ + encoder = EventEncoder(accept=accept or SSE_CONTENT_TYPE) + return encoder.encode(event) + async def before_stream(self) -> AsyncIterator[BaseEvent]: """Yield events before agent streaming starts.""" yield RunStartedEvent( diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 7f94135143..68d77342dc 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -6,7 +6,7 @@ from __future__ import annotations -from abc import ABC +from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field from typing import Generic, TypeVar @@ -39,17 +39,17 @@ EventT = TypeVar('EventT') """Type variable for protocol-specific event types.""" -RequestT = TypeVar('RequestT') +RunRequestT = TypeVar('RunRequestT') """Type variable for request types.""" SourceEvent = AgentStreamEvent | AgentRunResultEvent @dataclass -class BaseEventStream(ABC, Generic[RequestT, EventT, AgentDepsT]): +class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT]): """TODO (DouwM): Docstring.""" - request: RequestT + request: RunRequestT result: AgentRunResult | None = None @@ -64,6 +64,26 @@ def new_message_id(self) -> str: self.message_id = str(uuid4()) return self.message_id + @abstractmethod + def encode_event(self, event: EventT, accept: str | None = None) -> str: + """Encode an event as a string. + + Args: + event: The event to encode. + accept: The accept header value for encoding format. + """ + raise NotImplementedError + + async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: + """Encode a stream of events as SSE strings. + + Args: + stream: The stream of events to encode. + accept: The accept header value for encoding format. + """ + async for event in stream: + yield self.encode_event(event, accept) + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: """Handle a stream of agent events. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index e0788a1404..511bd68336 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -44,14 +44,11 @@ ToolOutputAvailableChunk, ToolOutputErrorChunk, ) -from ._utils import VERCEL_AI_DSP_HEADERS, CamelBaseModel, JSONValue, ProviderMetadata +from ._utils import ProviderMetadata __all__ = [ # Utilities - 'CamelBaseModel', 'ProviderMetadata', - 'JSONValue', - 'VERCEL_AI_DSP_HEADERS', # Request types 'RequestData', 'TextUIPart', diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 7cb064e8cb..382db45181 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -2,15 +2,11 @@ from __future__ import annotations -from collections.abc import Sequence +from collections.abc import Mapping, Sequence from dataclasses import dataclass from functools import cached_property -from typing import ( - Any, -) -from ... import DeferredToolResults -from ...agent import AbstractAgent, AgentDepsT +from ...agent import AgentDepsT from ...messages import ( ModelMessage, ModelRequest, @@ -19,27 +15,23 @@ TextPart, UserPromptPart, ) -from ...models import KnownModelName, Model -from ...output import OutputSpec -from ...settings import ModelSettings -from ...toolsets import AbstractToolset -from ...usage import RunUsage, UsageLimits -from ..adapter import BaseAdapter, OnCompleteFunc +from ..adapter import BaseAdapter from ..event_stream import BaseEventStream from ._event_stream import VercelAIEventStream from ._request_types import RequestData, TextUIPart, UIMessage, request_data_ta from ._response_types import BaseChunk -from ._utils import VERCEL_AI_DSP_HEADERS try: from starlette.requests import Request - from starlette.responses import Response except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e +# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol +VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} + __all__ = ['VercelAIAdapter'] @@ -48,75 +40,24 @@ class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT]): """TODO (DouwM): Docstring.""" - def create_event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: - return VercelAIEventStream(self.request) - - def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: - return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' - @classmethod async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" return request_data_ta.validate_json(await request.body()) - @classmethod - async def dispatch_request( - cls, - agent: AbstractAgent[AgentDepsT, Any], - request: Request, - *, - message_history: Sequence[ModelMessage] | None = None, - deferred_tool_results: DeferredToolResults | None = None, - model: Model | KnownModelName | str | None = None, - deps: AgentDepsT = None, - output_type: OutputSpec[Any] | None = None, - model_settings: ModelSettings | None = None, - usage_limits: UsageLimits | None = None, - usage: RunUsage | None = None, - infer_name: bool = True, - toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, - on_complete: OnCompleteFunc | None = None, - ) -> Response: - """Handle an AG-UI request and return a streaming response. + def dump_messages(self, messages: Sequence[ModelMessage]) -> list[UIMessage]: + """Dump messages to the request and return the dumped messages.""" + # TODO (DouweM): implement + raise NotImplementedError - Args: - agent: The agent to run. - request: The incoming Starlette/FastAPI request. - - output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no - output validators since output validators would expect an argument that matches the agent's output type. - message_history: History of the conversation so far. - deferred_tool_results: Optional results for deferred tool calls in the message history. - model: Optional model to use for this run, required if `model` was not set when creating the agent. - deps: Optional dependencies to use for this run. - model_settings: Optional settings to use for this model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. - infer_name: Whether to try to infer the agent name from the call frame if it's not set. - toolsets: Optional additional toolsets for this run. - on_complete: Optional callback function called when the agent run completes successfully. - The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + @cached_property + def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: + return VercelAIEventStream(self.request) - Returns: - A streaming Starlette response with AG-UI protocol events. - """ - response = await super().dispatch_request( - agent, - request, - message_history=message_history, - deferred_tool_results=deferred_tool_results, - model=model, - deps=deps, - output_type=output_type, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ) - response.headers.update(VERCEL_AI_DSP_HEADERS) - return response + @property + def response_headers(self) -> Mapping[str, str] | None: + """Get the response headers for the adapter.""" + return VERCEL_AI_DSP_HEADERS @cached_property def messages(self) -> list[ModelMessage]: @@ -131,9 +72,16 @@ def messages(self) -> list[ModelMessage]: Raises: ValueError: If message format is not supported. """ + return self.load_messages(self.request.messages) + + # TODO (DouweM): model, builtin_tools? + + @classmethod + def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: + """Load messages from the request and return the loaded messages.""" pai_messages: list[ModelMessage] = [] - for msg in self.request.messages: + for msg in messages: if msg.role == 'user': # User message - extract text from parts texts: list[str] = [] @@ -169,7 +117,3 @@ def messages(self) -> list[ModelMessage]: pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) return pai_messages - - # TODO (DouweM): model, builtin_tools? - - # TODO (DouweM): static load_messages, dump_messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 9fc938a4d7..cd4dc0a097 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -58,6 +58,9 @@ def __init__(self, request: RequestData) -> None: super().__init__(request) self._final_result_tool_id: str | None = None + def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: + return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' + async def after_stream(self) -> AsyncIterator[BaseChunk]: """Yield events after agent streaming completes.""" # Close the final result tool if there was one diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py index d3dfb4e8cd..b517a7c23d 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py @@ -10,10 +10,7 @@ from pydantic import BaseModel, ConfigDict from pydantic.alias_generators import to_camel -__all__ = ('CamelBaseModel', 'ProviderMetadata', 'JSONValue', 'VERCEL_AI_DSP_HEADERS') - -# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol -VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} +__all__ = ('CamelBaseModel', 'ProviderMetadata', 'JSONValue') # Technically this is recursive union of JSON types; for simplicity, we call it Any JSONValue = Any From 6f510535665ab11c45530f176e42dae8f224adf3 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 12:32:52 +0000 Subject: [PATCH 09/46] Refactor AG-UI streaming --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 54 ++-- .../pydantic_ai/ui/ag_ui/_adapter.py | 2 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 158 +++++------ .../pydantic_ai/ui/event_stream.py | 256 ++++++++++++++---- .../pydantic_ai/ui/vercel_ai/_adapter.py | 2 +- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 9 +- tests/test_ag_ui.py | 12 +- 7 files changed, 316 insertions(+), 177 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index a75d3d03da..7ff0f800ca 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -138,7 +138,7 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: def dump_messages(self, messages: Sequence[ModelMessage]) -> list[MessageT]: """Dump messages to the request and return the dumped messages.""" - @cached_property + @property @abstractmethod def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT]: """Create an event stream for the adapter.""" @@ -165,11 +165,6 @@ def raw_state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return None - @property - def result(self) -> AgentRunResult | None: - """Get the result of the agent run.""" - return self.event_stream.result - @property def response_headers(self) -> Mapping[str, str] | None: """Get the response headers for the adapter.""" @@ -283,6 +278,21 @@ async def run_stream( ): yield event + async def stream_response(self, stream: AsyncIterator[EventT], accept: str | None = None) -> Response: + """Stream a response to the client. + + Args: + stream: The stream of events to encode. + accept: The accept header value for encoding format. + """ + return StreamingResponse( + self.encode_stream( + stream, + accept=accept, + ), + headers=self.response_headers, + ) + @classmethod async def dispatch_request( cls, @@ -334,22 +344,18 @@ async def dispatch_request( adapter = cls(agent=agent, request=request_data) - return StreamingResponse( - adapter.encode_stream( - adapter.run_stream( - message_history=message_history, - deferred_tool_results=deferred_tool_results, - deps=deps, - output_type=output_type, - model=model, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - on_complete=on_complete, - ), - accept=request.headers.get('accept'), - ), - headers=adapter.response_headers, + run_stream = adapter.run_stream( + message_history=message_history, + deferred_tool_results=deferred_tool_results, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + on_complete=on_complete, ) + + return await adapter.stream_response(run_stream, accept=request.headers.get('accept')) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index b4d6c98b98..8f8e23a6b2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -105,7 +105,7 @@ def dump_messages(self, messages: Sequence[ModelMessage]) -> list[Message]: # TODO (DouweM): bring in from https://github.com/pydantic/pydantic-ai/pull/3068 raise NotImplementedError - @cached_property + @property def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: """Create an event stream for the adapter.""" return AGUIEventStream(self.request) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 097f2f253f..7e5fbf23dd 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -13,9 +13,8 @@ from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, - FinalResultEvent, - FunctionToolCallEvent, FunctionToolResultEvent, + ModelResponsePart, TextPart, TextPartDelta, ThinkingPart, @@ -79,8 +78,7 @@ class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): def __init__(self, request: RunAgentInput) -> None: """Initialize AG-UI event stream state.""" super().__init__(request) - self.part_end: BaseEvent | None = None - self.thinking: bool = False + self.thinking_text = False self.builtin_tool_call_ids: dict[str, str] = {} def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: @@ -105,19 +103,6 @@ async def before_stream(self) -> AsyncIterator[BaseEvent]: async def after_stream(self) -> AsyncIterator[BaseEvent]: """Handle an AgentRunResultEvent, cleaning up any pending state.""" - # Emit any pending part end event - if self.part_end: # pragma: no branch - yield self.part_end - self.part_end = None - - # End thinking mode if still active - if self.thinking: - yield ThinkingEndEvent( - type=EventType.THINKING_END, - ) - self.thinking = False - - # Emit finish event yield RunFinishedEvent( thread_id=self.request.thread_id, run_id=self.request.run_id, @@ -125,84 +110,92 @@ async def after_stream(self) -> AsyncIterator[BaseEvent]: async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: """Handle errors during streaming.""" - # Try to get code from exception if it has one, otherwise use class name - code = getattr(error, 'code', error.__class__.__name__) - yield RunErrorEvent(message=str(error), code=code) + yield RunErrorEvent(message=str(error)) - # Granular handlers implementation - - async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseEvent]: + async def handle_text_start( + self, part: TextPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None - - if self.thinking: - yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False + if isinstance(previous_part, TextPart): + message_id = previous_part.message_id + else: + message_id = self.new_message_id() + yield TextMessageStartEvent(message_id=message_id) - message_id = self.new_message_id() - yield TextMessageStartEvent(message_id=message_id) if part.content: # pragma: no branch yield TextMessageContentEvent(message_id=message_id, delta=part.content) - self.part_end = TextMessageEndEvent(message_id=message_id) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: """Handle a TextPartDelta.""" if delta.content_delta: # pragma: no branch yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None + async def handle_text_end( + self, part: TextPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a TextPart at end.""" + if not isinstance(next_part, TextPart): + yield TextMessageEndEvent(message_id=self.message_id) - if not self.thinking: + async def handle_thinking_start( + self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at start.""" + if not isinstance(previous_part, ThinkingPart): yield ThinkingStartEvent(type=EventType.THINKING_START) - self.thinking = True if part.content: yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + self.thinking_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPartDelta.""" - if delta.content_delta: # pragma: no branch - if not isinstance(self.part_end, ThinkingTextMessageEndEvent): - yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - self.part_end = ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + if not delta.content_delta: + return - yield ThinkingTextMessageContentEvent( - type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta - ) + if not self.thinking_text: + yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) + self.thinking_text = True - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - if self.part_end: - yield self.part_end - self.part_end = None + yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta) - if self.thinking: + async def handle_thinking_end( + self, part: ThinkingPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a ThinkingPart at end.""" + if self.thinking_text: + yield ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) + self.thinking_text = False + + if not isinstance(next_part, ThinkingPart): yield ThinkingEndEvent(type=EventType.THINKING_END) - self.thinking = False + async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + async for e in self._handle_tool_call_start(part): + yield e + + async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolCallPart at start.""" tool_call_id = part.tool_call_id - if isinstance(part, BuiltinToolCallPart): - builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) - self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id - tool_call_id = builtin_tool_call_id + builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + async for e in self._handle_tool_call_start(part, tool_call_id): + yield e + + async def _handle_tool_call_start( + self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None + ) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + tool_call_id = tool_call_id or part.tool_call_id message_id = self.message_id or self.new_message_id() + yield ToolCallStartEvent(tool_call_id=tool_call_id, tool_call_name=part.tool_name, parent_message_id=message_id) if part.args: yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) - self.part_end = ToolCallEndEvent(tool_call_id=tool_call_id) - - def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a BuiltinToolCallPart at start.""" - return self.handle_tool_call_start(part) async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: """Handle a ToolCallPartDelta.""" @@ -215,13 +208,16 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), ) + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a ToolCallPart at end.""" + yield ToolCallEndEvent(tool_call_id=part.tool_call_id) + + async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + """Handle a BuiltinToolCallPart at end.""" + yield ToolCallEndEvent(tool_call_id=self.builtin_tool_call_ids[part.tool_call_id]) + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolReturnPart.""" - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] yield ToolCallResultEvent( message_id=self.new_message_id(), @@ -231,26 +227,13 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async content=part.model_response_str(), ) - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseEvent]: - """Handle a FunctionToolCallEvent. - - This event is emitted when a function tool is called, but no AG-UI events - are needed at this stage since tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result if not isinstance(result, ToolReturnPart): + # TODO (DouweM): Stream RetryPromptParts to the frontend as well? return - # Emit any pending part_end event (e.g., TOOL_CALL_END) before the result - if self.part_end: - yield self.part_end - self.part_end = None - yield ToolCallResultEvent( message_id=self.new_message_id(), type=EventType.TOOL_CALL_RESULT, @@ -271,11 +254,4 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A if isinstance(item, BaseEvent): # pragma: no branch yield item - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FinalResultEvent. - - This event is emitted when the agent produces a final result, but no AG-UI events - are needed at this stage. - """ - return - yield # Make this an async generator + # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 68d77342dc..92ef862894 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -9,7 +9,7 @@ from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field -from typing import Generic, TypeVar +from typing import Generic, Literal, TypeVar from uuid import uuid4 from ..messages import ( @@ -20,6 +20,7 @@ FinalResultEvent, FunctionToolCallEvent, FunctionToolResultEvent, + ModelResponsePart, PartDeltaEvent, PartStartEvent, TextPart, @@ -50,7 +51,6 @@ class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT]): """TODO (DouwM): Docstring.""" request: RunRequestT - result: AgentRunResult | None = None message_id: str = field(default_factory=lambda: str(uuid4())) @@ -84,7 +84,7 @@ async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None async for event in stream: yield self.encode_event(event, accept) - async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: # noqa: C901 """Handle a stream of agent events. Args: @@ -96,9 +96,49 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.before_stream(): yield e + part: ModelResponsePart | None = None + turn: Literal['request', 'response'] | None = None try: async for event in stream: - async for e in self.handle_event(event): + # TODO (DouweM): Should this be a TurnChange event? + previous_part = part + next_turn = turn + next_part = part + if isinstance(event, PartStartEvent): + next_turn = 'request' + next_part = event.part + elif isinstance(event, FunctionToolCallEvent): + next_turn = 'response' + next_part = None + elif isinstance(event, AgentRunResultEvent): + next_turn = None + next_part = None + + if next_part != part: + if part: + async for e in self.handle_part_end(part, next_part): + yield e + + part = next_part + + if turn != next_turn: + if turn == 'request': + async for e in self.after_request(): + yield e + elif turn == 'response': + async for e in self.after_response(): + yield e + + turn = next_turn + + if turn == 'request': + async for e in self.before_request(): + yield e + elif turn == 'response': + async for e in self.before_response(): + yield e + + async for e in self.handle_event(event, previous_part): yield e except Exception as e: async for e in self.on_error(e): @@ -107,7 +147,9 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.after_stream(): yield e - async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 + async def handle_event( + self, event: SourceEvent, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: """Transform a Pydantic AI agent event into protocol-specific events. This method dispatches to specific `handle_*` methods based on event and part type. @@ -115,6 +157,7 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no Args: event: The Pydantic AI agent event to transform. + previous_part: The previous part. Yields: Protocol-specific events. @@ -123,48 +166,21 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no yield e match event: - case PartStartEvent(part=part): - # Dispatch based on part type - match part: - case TextPart(): - async for e in self.handle_text_start(part): - yield e - case ThinkingPart(): - async for e in self.handle_thinking_start(part): - yield e - case ToolCallPart(): - async for e in self.handle_tool_call_start(part): - yield e - case BuiltinToolCallPart(): - async for e in self.handle_builtin_tool_call_start(part): - yield e - case BuiltinToolReturnPart(): - async for e in self.handle_builtin_tool_return(part): - yield e - case FilePart(): - # FilePart is not currently handled by UI protocols - pass - case PartDeltaEvent(delta=delta): - # Dispatch based on delta type - match delta: - case TextPartDelta(): - async for e in self.handle_text_delta(delta): - yield e - case ThinkingPartDelta(): - async for e in self.handle_thinking_delta(delta): - yield e - case ToolCallPartDelta(): - async for e in self.handle_tool_call_delta(delta): - yield e + case PartStartEvent(): + async for e in self.handle_part_start(event, previous_part): + yield e + case PartDeltaEvent(): + async for e in self.handle_part_delta(event): + yield e + case FinalResultEvent(): + async for e in self.handle_final_result(event): + yield e case FunctionToolCallEvent(): async for e in self.handle_function_tool_call(event): yield e case FunctionToolResultEvent(): async for e in self.handle_function_tool_result(event): yield e - case FinalResultEvent(): - async for e in self.handle_final_result(event): - yield e case AgentRunResultEvent(): self.result = event.result async for e in self.handle_run_result(event): @@ -175,11 +191,86 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no async for e in self.after_event(event): yield e - async def handle_text_start(self, part: TextPart) -> AsyncIterator[EventT]: + async def handle_part_start( + self, event: PartStartEvent, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle a PartStartEvent. + + Args: + event: The PartStartEvent. + previous_part: The previous part. + """ + part = event.part + match part: + case TextPart(): + async for e in self.handle_text_start(part, previous_part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_start(part, previous_part): + yield e + case ToolCallPart(): + async for e in self.handle_tool_call_start(part): + yield e + case BuiltinToolCallPart(): + async for e in self.handle_builtin_tool_call_start(part): + yield e + case BuiltinToolReturnPart(): + async for e in self.handle_builtin_tool_return(part): + yield e + case FilePart(): + async for e in self.handle_file(part): + yield e + + async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[EventT]: + """Handle a PartDeltaEvent. + + Args: + event: The PartDeltaEvent. + """ + delta = event.delta + match delta: + case TextPartDelta(): + async for e in self.handle_text_delta(delta): + yield e + case ThinkingPartDelta(): + async for e in self.handle_thinking_delta(delta): + yield e + case ToolCallPartDelta(): + async for e in self.handle_tool_call_delta(delta): + yield e + + async def handle_part_end( + self, part: ModelResponsePart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle the end of a part. + + Args: + part: The part that ended. + next_part: The new part that started. + """ + # TODO (DouweM): Make this a proper event. Figure out a proper way to do context manager style wrapping + match part: + case TextPart(): + async for e in self.handle_text_end(part, next_part): + yield e + case ThinkingPart(): + async for e in self.handle_thinking_end(part, next_part): + yield e + case ToolCallPart(): + async for e in self.handle_tool_call_end(part): + yield e + case BuiltinToolCallPart(): + async for e in self.handle_builtin_tool_call_end(part): + yield e + + async def handle_text_start( + self, part: TextPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: """Handle a TextPart at start. Args: part: The TextPart. + previous_part: The previous part. Yields: Protocol-specific events. @@ -199,11 +290,14 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT] return yield # Make this an async generator - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[EventT]: + async def handle_thinking_start( + self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: """Handle a ThinkingPart at start. Args: part: The ThinkingPart. + previous_part: The previous part. Yields: Protocol-specific events. @@ -271,11 +365,11 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async return yield # Make this an async generator - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolCallEvent. + async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: + """Handle a FilePart. Args: - event: The function tool call event. + part: The FilePart. Yields: Protocol-specific events. @@ -283,11 +377,35 @@ async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> Async return yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolResultEvent. + async def handle_text_end( + self, part: TextPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle the end of a TextPart.""" + return + yield # Make this an async generator + + async def handle_thinking_end( + self, part: ThinkingPart, next_part: ModelResponsePart | None = None + ) -> AsyncIterator[EventT]: + """Handle the end of a ThinkingPart.""" + return + yield # Make this an async generator + + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[EventT]: + """Handle the end of a ToolCallPart.""" + return + yield # Make this an async generator + + async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle the end of a BuiltinToolCallPart.""" + return + yield # Make this an async generator + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: + """Handle a FinalResultEvent. Args: - event: The function tool result event. + event: The final result event. Yields: Protocol-specific events. @@ -295,11 +413,23 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A return yield # Make this an async generator - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: - """Handle a FinalResultEvent. + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolCallEvent. Args: - event: The final result event. + event: The function tool call event. + + Yields: + Protocol-specific events. + """ + return + yield # Make this an async generator + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: + """Handle a FunctionToolResultEvent. + + Args: + event: The function tool result event. Yields: Protocol-specific events. @@ -319,6 +449,8 @@ async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[E return yield # Make this an async generator + # Lifecycle hooks (optional overrides) + async def before_event(self, event: SourceEvent) -> AsyncIterator[EventT]: """Handle an event before it is processed. @@ -337,7 +469,25 @@ async def after_event(self, event: SourceEvent) -> AsyncIterator[EventT]: return yield # Make this an async generator - # Lifecycle hooks (optional overrides) + async def before_request(self) -> AsyncIterator[EventT]: + """Handle a request before it is processed.""" + return + yield # Make this an async generator + + async def after_request(self) -> AsyncIterator[EventT]: + """Handle a request after it is processed.""" + return + yield # Make this an async generator + + async def before_response(self) -> AsyncIterator[EventT]: + """Handle a response before it is processed.""" + return + yield # Make this an async generator + + async def after_response(self) -> AsyncIterator[EventT]: + """Yield events after agent streaming completes.""" + return + yield # Make this an async generator async def before_stream(self) -> AsyncIterator[EventT]: """Yield events before agent streaming starts. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 382db45181..dcb1c404ef 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -50,7 +50,7 @@ def dump_messages(self, messages: Sequence[ModelMessage]) -> list[UIMessage]: # TODO (DouweM): implement raise NotImplementedError - @cached_property + @property def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: return VercelAIEventStream(self.request) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index cd4dc0a097..c0cb11c015 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -17,6 +17,7 @@ FinalResultEvent, FunctionToolCallEvent, FunctionToolResultEvent, + ModelResponsePart, RetryPromptPart, TextPart, TextPartDelta, @@ -74,7 +75,9 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: # Granular handlers implementation - async def handle_text_start(self, part: TextPart) -> AsyncIterator[BaseChunk]: + async def handle_text_start( + self, part: TextPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" yield TextStartChunk(id=self.message_id or self.new_message_id()) if part.content: @@ -85,7 +88,9 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChu if delta.content_delta: yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_thinking_start(self, part: ThinkingPart) -> AsyncIterator[BaseChunk]: + async def handle_thinking_start( + self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + ) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPart at start.""" if not self.message_id: self.new_message_id() diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index ea8962abda..d7e5bd7573 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -255,11 +255,7 @@ async def stream_function( 'threadId': IsStr(), 'runId': IsStr(), }, - { - 'type': 'RUN_ERROR', - 'message': 'No message history, user prompt, or instructions provided', - 'code': 'UserError', - }, + {'type': 'RUN_ERROR', 'message': 'No message history, user prompt, or instructions provided'}, ] ) @@ -887,6 +883,7 @@ async def stream_function( yield {0: DeltaThinkingPart(content='')} yield "Let's do some thinking" yield '' + yield ' and some more' yield {1: DeltaThinkingPart(content='Thinking ')} yield {1: DeltaThinkingPart(content='about the weather')} yield {2: DeltaThinkingPart(content='')} @@ -922,6 +919,11 @@ async def stream_function( 'messageId': message_id, 'delta': "Let's do some thinking", }, + { + 'type': 'TEXT_MESSAGE_CONTENT', + 'messageId': message_id, + 'delta': ' and some more', + }, {'type': 'TEXT_MESSAGE_END', 'messageId': message_id}, {'type': 'THINKING_START'}, {'type': 'THINKING_TEXT_MESSAGE_START'}, From 013c43bcd57a8025d7c7b03de90231414b852064 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Fri, 10 Oct 2025 13:30:07 +0000 Subject: [PATCH 10/46] Start fixing up Vercel events --- .../pydantic_ai/ui/ag_ui/_event_stream.py | 93 +++++------ .../pydantic_ai/ui/event_stream.py | 31 ++-- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 152 +++++++++++------- tests/test_ag_ui.py | 11 ++ tests/test_vercel_ai.py | 10 ++ 5 files changed, 169 insertions(+), 128 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 7e5fbf23dd..92b563e53c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -8,13 +8,13 @@ import json from collections.abc import AsyncIterator, Iterable -from typing import TYPE_CHECKING, Final +from typing import Final from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, FunctionToolResultEvent, - ModelResponsePart, + RetryPromptPart, TextPart, TextPartDelta, ThinkingPart, @@ -26,9 +26,6 @@ from ...tools import AgentDepsT from .. import BaseEventStream -if TYPE_CHECKING: - pass # Agent type is not actually used in this module - try: from ag_ui.core import ( BaseEvent, @@ -78,8 +75,9 @@ class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): def __init__(self, request: RunAgentInput) -> None: """Initialize AG-UI event stream state.""" super().__init__(request) - self.thinking_text = False - self.builtin_tool_call_ids: dict[str, str] = {} + self._thinking_text = False + self._builtin_tool_call_ids: dict[str, str] = {} + self._final_result_tool_id: str | None = None def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: """Encode an AG-UI event as SSE. @@ -112,12 +110,10 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: """Handle errors during streaming.""" yield RunErrorEvent(message=str(error)) - async def handle_text_start( - self, part: TextPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[BaseEvent]: + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" - if isinstance(previous_part, TextPart): - message_id = previous_part.message_id + if follows_text: + message_id = self.message_id else: message_id = self.new_message_id() yield TextMessageStartEvent(message_id=message_id) @@ -130,61 +126,57 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEve if delta.content_delta: # pragma: no branch yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) - async def handle_text_end( - self, part: TextPart, next_part: ModelResponsePart | None = None - ) -> AsyncIterator[BaseEvent]: + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at end.""" - if not isinstance(next_part, TextPart): + if not followed_by_text: yield TextMessageEndEvent(message_id=self.message_id) async def handle_thinking_start( - self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + self, part: ThinkingPart, follows_thinking: bool = False ) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPart at start.""" - if not isinstance(previous_part, ThinkingPart): + if not follows_thinking: yield ThinkingStartEvent(type=EventType.THINKING_START) if part.content: yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=part.content) - self.thinking_text = True + self._thinking_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPartDelta.""" if not delta.content_delta: return - if not self.thinking_text: + if not self._thinking_text: yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) - self.thinking_text = True + self._thinking_text = True yield ThinkingTextMessageContentEvent(type=EventType.THINKING_TEXT_MESSAGE_CONTENT, delta=delta.content_delta) async def handle_thinking_end( - self, part: ThinkingPart, next_part: ModelResponsePart | None = None + self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPart at end.""" - if self.thinking_text: + if self._thinking_text: yield ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) - self.thinking_text = False + self._thinking_text = False - if not isinstance(next_part, ThinkingPart): + if not followed_by_thinking: yield ThinkingEndEvent(type=EventType.THINKING_END) - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - async for e in self._handle_tool_call_start(part): - yield e + return self._handle_tool_call_start(part) - async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: + def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolCallPart at start.""" tool_call_id = part.tool_call_id builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) - self.builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + self._builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id tool_call_id = builtin_tool_call_id - async for e in self._handle_tool_call_start(part, tool_call_id): - yield e + return self._handle_tool_call_start(part, tool_call_id) async def _handle_tool_call_start( self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None @@ -201,8 +193,8 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' - if tool_call_id in self.builtin_tool_call_ids: - tool_call_id = self.builtin_tool_call_ids[tool_call_id] + if tool_call_id in self._builtin_tool_call_ids: + tool_call_id = self._builtin_tool_call_ids[tool_call_id] yield ToolCallArgsEvent( tool_call_id=tool_call_id, delta=delta.args_delta if isinstance(delta.args_delta, str) else json.dumps(delta.args_delta), @@ -214,11 +206,11 @@ async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseEv async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolCallPart at end.""" - yield ToolCallEndEvent(tool_call_id=self.builtin_tool_call_ids[part.tool_call_id]) + yield ToolCallEndEvent(tool_call_id=self._builtin_tool_call_ids[part.tool_call_id]) async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: """Handle a BuiltinToolReturnPart.""" - tool_call_id = self.builtin_tool_call_ids[part.tool_call_id] + tool_call_id = self._builtin_tool_call_ids[part.tool_call_id] yield ToolCallResultEvent( message_id=self.new_message_id(), type=EventType.TOOL_CALL_RESULT, @@ -230,28 +222,27 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result - if not isinstance(result, ToolReturnPart): - # TODO (DouweM): Stream RetryPromptParts to the frontend as well? - return + output = result.model_response() if isinstance(result, RetryPromptPart) else result.model_response_str() yield ToolCallResultEvent( message_id=self.new_message_id(), type=EventType.TOOL_CALL_RESULT, role='tool', tool_call_id=result.tool_call_id, - content=result.model_response_str(), + content=output, ) - # Check for AG-UI events returned by tool calls. - possible_event = result.metadata or result.content - if isinstance(possible_event, BaseEvent): - yield possible_event - elif isinstance(possible_event, str | bytes): # pragma: no branch - # Avoid iterable check for strings and bytes. - pass - elif isinstance(possible_event, Iterable): # pragma: no branch - for item in possible_event: # type: ignore[reportUnknownMemberType] - if isinstance(item, BaseEvent): # pragma: no branch - yield item + if isinstance(result, ToolReturnPart): + # Check for AG-UI events returned by tool calls. + possible_event = result.metadata or result.content + if isinstance(possible_event, BaseEvent): + yield possible_event + elif isinstance(possible_event, str | bytes): # pragma: no branch + # Avoid iterable check for strings and bytes. + pass + elif isinstance(possible_event, Iterable): # pragma: no branch + for item in possible_event: # type: ignore[reportUnknownMemberType] + if isinstance(item, BaseEvent): # pragma: no branch + yield item # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 92ef862894..b9bafef105 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -203,10 +203,12 @@ async def handle_part_start( part = event.part match part: case TextPart(): - async for e in self.handle_text_start(part, previous_part): + async for e in self.handle_text_start(part, follows_text=isinstance(previous_part, TextPart)): yield e case ThinkingPart(): - async for e in self.handle_thinking_start(part, previous_part): + async for e in self.handle_thinking_start( + part, follows_thinking=isinstance(previous_part, ThinkingPart) + ): yield e case ToolCallPart(): async for e in self.handle_tool_call_start(part): @@ -251,10 +253,10 @@ async def handle_part_end( # TODO (DouweM): Make this a proper event. Figure out a proper way to do context manager style wrapping match part: case TextPart(): - async for e in self.handle_text_end(part, next_part): + async for e in self.handle_text_end(part, followed_by_text=isinstance(next_part, TextPart)): yield e case ThinkingPart(): - async for e in self.handle_thinking_end(part, next_part): + async for e in self.handle_thinking_end(part, followed_by_thinking=isinstance(next_part, ThinkingPart)): yield e case ToolCallPart(): async for e in self.handle_tool_call_end(part): @@ -262,15 +264,16 @@ async def handle_part_end( case BuiltinToolCallPart(): async for e in self.handle_builtin_tool_call_end(part): yield e + case BuiltinToolReturnPart() | FilePart(): + # These don't have deltas, so they don't need to be ended. + pass - async def handle_text_start( - self, part: TextPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[EventT]: """Handle a TextPart at start. Args: part: The TextPart. - previous_part: The previous part. + follows_text: Whether the part follows a text part. Yields: Protocol-specific events. @@ -290,14 +293,12 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT] return yield # Make this an async generator - async def handle_thinking_start( - self, part: ThinkingPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[EventT]: """Handle a ThinkingPart at start. Args: part: The ThinkingPart. - previous_part: The previous part. + follows_thinking: Whether the part follows a thinking part. Yields: Protocol-specific events. @@ -377,15 +378,13 @@ async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: return yield # Make this an async generator - async def handle_text_end( - self, part: TextPart, next_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[EventT]: """Handle the end of a TextPart.""" return yield # Make this an async generator async def handle_thinking_end( - self, part: ThinkingPart, next_part: ModelResponsePart | None = None + self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[EventT]: """Handle the end of a ThinkingPart.""" return diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index c0cb11c015..a5840eeda8 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -1,23 +1,17 @@ """Vercel AI event stream implementation.""" -# pyright: reportIncompatibleMethodOverride=false - from __future__ import annotations from collections.abc import AsyncIterator -from typing import Any +from typing import Any, Final from pydantic_core import to_json from ...messages import ( - BuiltinToolCallEvent, # type: ignore[reportDeprecated] BuiltinToolCallPart, - BuiltinToolResultEvent, # type: ignore[reportDeprecated] BuiltinToolReturnPart, FinalResultEvent, - FunctionToolCallEvent, FunctionToolResultEvent, - ModelResponsePart, RetryPromptPart, TextPart, TextPartDelta, @@ -25,7 +19,6 @@ ThinkingPartDelta, ToolCallPart, ToolCallPartDelta, - ToolReturnPart, ) from ...tools import AgentDepsT from .. import BaseEventStream @@ -35,8 +28,11 @@ ErrorChunk, FinishChunk, ReasoningDeltaChunk, + ReasoningEndChunk, ReasoningStartChunk, + StartChunk, TextDeltaChunk, + TextEndChunk, TextStartChunk, ToolInputDeltaChunk, ToolInputStartChunk, @@ -51,6 +47,9 @@ def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') +BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' + + class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): """TODO (DouwM): Docstring.""" @@ -58,108 +57,139 @@ def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" super().__init__(request) self._final_result_tool_id: str | None = None + self._builtin_tool_call_ids: dict[str, str] = {} def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' + async def before_stream(self) -> AsyncIterator[BaseChunk]: + """Yield events before agent streaming starts.""" + yield StartChunk() + async def after_stream(self) -> AsyncIterator[BaseChunk]: """Yield events after agent streaming completes.""" - # Close the final result tool if there was one - if tool_call_id := self._final_result_tool_id: - yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) yield FinishChunk() async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: """Handle errors during streaming.""" yield ErrorChunk(error_text=str(error)) - # Granular handlers implementation + async def after_response(self) -> AsyncIterator[BaseChunk]: + """Yield events after agent response completes.""" + # Close the final result tool if there was one + if tool_call_id := self._final_result_tool_id: + yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) - async def handle_text_start( - self, part: TextPart, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[BaseChunk]: + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" - yield TextStartChunk(id=self.message_id or self.new_message_id()) + if follows_text: + message_id = self.message_id + else: + message_id = self.new_message_id() + yield TextStartChunk(id=message_id) + if part.content: - yield TextDeltaChunk(id=self.message_id, delta=part.content) + yield TextDeltaChunk(id=message_id, delta=part.content) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]: """Handle a TextPartDelta.""" if delta.content_delta: yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseChunk]: + """Handle a TextPart at end.""" + if not followed_by_text: + yield TextEndChunk(id=self.message_id) + async def handle_thinking_start( - self, part: ThinkingPart, previous_part: ModelResponsePart | None = None + self, part: ThinkingPart, follows_thinking: bool = False ) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPart at start.""" - if not self.message_id: - self.new_message_id() - yield ReasoningStartChunk(id=self.message_id) + message_id = self.new_message_id() + yield ReasoningStartChunk(id=message_id) if part.content: - yield ReasoningDeltaChunk(id=self.message_id, delta=part.content) + yield ReasoningDeltaChunk(id=message_id, delta=part.content) async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPartDelta.""" if delta.content_delta: yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) - async def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: + async def handle_thinking_end( + self, part: ThinkingPart, followed_by_thinking: bool = False + ) -> AsyncIterator[BaseChunk]: + """Handle a ThinkingPart at end.""" + yield ReasoningEndChunk(id=self.message_id) + + def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) - if isinstance(part.args, str): - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) - elif part.args is not None: - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) + return self._handle_tool_call_start(part) def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: - return self.handle_tool_call_start(part) + """Handle a BuiltinToolCallEvent, emitting tool input events.""" + # TODO (DouweM): Reduce duplication with AGUIEventStream + tool_call_id = part.tool_call_id + builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) + self._builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id + tool_call_id = builtin_tool_call_id + + return self._handle_tool_call_start(part) + + async def _handle_tool_call_start( + self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None + ) -> AsyncIterator[BaseChunk]: + """Handle a ToolCallPart or BuiltinToolCallPart at start.""" + tool_call_id = tool_call_id or part.tool_call_id + yield ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=part.tool_name) + if part.args: + yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str()) async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id or '' - if isinstance(delta.args_delta, str): - yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=delta.args_delta) - elif delta.args_delta is not None: - yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=_json_dumps(delta.args_delta)) + assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' + if tool_call_id in self._builtin_tool_call_ids: + tool_call_id = self._builtin_tool_call_ids[tool_call_id] + yield ToolInputDeltaChunk( + tool_call_id=tool_call_id, + input_text_delta=delta.args_delta if isinstance(delta.args_delta, str) else _json_dumps(delta.args_delta), + ) + + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseChunk]: + """Handle a ToolCallPart at end.""" + # TODO (DouweM): We don't have the full args available here, + # and we don't seem to need to send this anyway if we've already sent deltas + # yield ToolInputAvailableChunk( + # tool_call_id=part.tool_call_id, + # tool_name=part.tool_name, + # input=part.args, + # provider_executed=True, + # dynamic=False, + # ) + # TODO (DouweM): What are ToolInputAvailableChunk.provider_executed and dynamic? + # Likely use for builtin and external tools. + return + yield # Make this an async generator + + # async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: + # """Handle a BuiltinToolCallPart at end.""" + # pass async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolReturnPart.""" yield ToolOutputAvailableChunk(tool_call_id=part.tool_call_id, output=part.content) - async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[BaseChunk]: - """Handle a FunctionToolCallEvent. - - No Vercel AI events are emitted at this stage since tool calls are handled in PartStartEvent. - """ - return - yield # Make this an async generator - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result - if isinstance(result, ToolReturnPart): - yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - elif isinstance(result, RetryPromptPart): - # For retry prompts, emit the error content as tool output - yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + output = result.model_response() if isinstance(result, RetryPromptPart) else result.content + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=output) - async def handle_builtin_tool_call(self, event: BuiltinToolCallEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolCallEvent, emitting tool input events.""" - part = event.part - yield ToolInputStartChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name) - if isinstance(part.args, str): - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=part.args) - elif part.args is not None: - yield ToolInputDeltaChunk(tool_call_id=part.tool_call_id, input_text_delta=_json_dumps(part.args)) - - async def handle_builtin_tool_result(self, event: BuiltinToolResultEvent) -> AsyncIterator[BaseChunk]: # type: ignore[reportDeprecated] - """Handle a BuiltinToolResultEvent, emitting tool output events.""" - result = event.result - yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) + # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FinalResultEvent, tracking the final result tool.""" if event.tool_call_id and event.tool_name: self._final_result_tool_id = event.tool_call_id - # TODO (DouweM): Stream output tool result once it's ready - yield ToolInputStartChunk(tool_call_id=event.tool_call_id, tool_name=event.tool_name) + return + yield # Make this an async generator diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index d7e5bd7573..14eac2e3f4 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -639,6 +639,17 @@ async def stream_function( }, {'type': 'TOOL_CALL_ARGS', 'toolCallId': tool_call_id, 'delta': 'Paris"}'}, {'type': 'TOOL_CALL_END', 'toolCallId': tool_call_id}, + { + 'type': 'TOOL_CALL_RESULT', + 'messageId': IsStr(), + 'toolCallId': tool_call_id, + 'content': """\ +Unknown tool name: 'get_weather'. Available tools: 'get_weather_parts' + +Fix the errors and try again.\ +""", + 'role': 'tool', + }, {'type': 'TEXT_MESSAGE_START', 'messageId': (message_id := IsSameStr()), 'role': 'assistant'}, { 'type': 'TEXT_MESSAGE_CONTENT', diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 0f61684697..10a333be94 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -150,7 +150,9 @@ async def test_run(allow_model_requests: None, openai_api_key: str): events = [json.loads(event.removeprefix('data: ')) async for event in adapter.encode_stream(adapter.run_stream())] assert events == snapshot( [ + {'type': 'start'}, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -167,6 +169,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -183,6 +186,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -199,6 +203,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -215,6 +220,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -231,6 +237,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -247,6 +254,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, { 'type': 'tool-input-start', 'toolCallId': IsStr(), @@ -263,6 +271,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'output': {'status': 'completed'}, }, {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-end', 'id': IsStr()}, {'type': 'text-start', 'id': IsStr()}, { 'type': 'text-delta', @@ -853,6 +862,7 @@ def client_response\ {'type': 'text-delta', 'delta': ' skip binary content,', 'id': IsStr()}, {'type': 'text-delta', 'delta': ' or accumulate chunked', 'id': IsStr()}, {'type': 'text-delta', 'delta': ' bodies safely?', 'id': IsStr()}, + {'type': 'text-end', 'id': IsStr()}, {'type': 'finish'}, ] ) From 7d4b187345b4e7f8acde1847333ff802ce94296d Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 14 Oct 2025 09:35:17 +0000 Subject: [PATCH 11/46] Improvements --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 12 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 2 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 22 ++ .../pydantic_ai/ui/event_stream.py | 2 +- .../pydantic_ai/ui/vercel_ai/__init__.py | 8 +- .../pydantic_ai/ui/vercel_ai/_adapter.py | 162 ++++++++++--- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 55 +++-- .../ui/vercel_ai/_request_types.py | 228 ++++++++++++++++-- .../ui/vercel_ai/_response_types.py | 28 ++- tests/test_vercel_ai.py | 206 +++++++++++++--- 10 files changed, 593 insertions(+), 132 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 7ff0f800ca..3d65a575a1 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -49,9 +49,6 @@ 'BaseAdapter', ] -EventT = TypeVar('EventT') -"""Type variable for protocol-specific event types.""" - RunRequestT = TypeVar('RunRequestT') """Type variable for protocol-specific request types.""" @@ -59,6 +56,9 @@ MessageT = TypeVar('MessageT') """Type variable for protocol-specific message types.""" +EventT = TypeVar('EventT') +"""Type variable for protocol-specific event types.""" + OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]] """Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async.""" @@ -161,7 +161,7 @@ def toolset(self) -> AbstractToolset[AgentDepsT] | None: return None @cached_property - def raw_state(self) -> dict[str, Any] | None: + def state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return None @@ -248,14 +248,14 @@ async def run_stream( toolsets = [*toolsets, toolset] if toolsets else [toolset] if isinstance(deps, StateHandler): - raw_state = self.raw_state or {} + raw_state = self.state or {} if isinstance(deps.state, BaseModel): state = type(deps.state).model_validate(raw_state) else: state = raw_state deps = replace(deps, state=state) - elif self.raw_state: + elif self.state: raise UserError( f'State is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 8f8e23a6b2..67e3ecddbf 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -118,7 +118,7 @@ def toolset(self) -> AbstractToolset[AgentDepsT] | None: return None @cached_property - def raw_state(self) -> dict[str, Any] | None: + def state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" return self.request.state diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 92b563e53c..a3155eb976 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -13,6 +13,7 @@ from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, + FinalResultEvent, FunctionToolResultEvent, RetryPromptPart, TextPart, @@ -110,6 +111,20 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: """Handle errors during streaming.""" yield RunErrorEvent(message=str(error)) + async def after_response(self) -> AsyncIterator[BaseEvent]: + """Yield events after agent response completes.""" + # Close the final result tool if there was one + # TODO (DouweM): Both AG-UI and Vercel AI do this to wrap up output tools, so consider if it should be a separate event, + # or if the tool ID should at least on the AgentRunResult(Event). + if tool_call_id := self._final_result_tool_id: + yield ToolCallResultEvent( + message_id=self.new_message_id(), + type=EventType.TOOL_CALL_RESULT, + role='tool', + tool_call_id=tool_call_id, + content='', + ) + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" if follows_text: @@ -246,3 +261,10 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A yield item # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: + """Handle a FinalResultEvent, tracking the final result tool.""" + if event.tool_call_id and event.tool_name: + self._final_result_tool_id = event.tool_call_id + return + yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index b9bafef105..baf6b9e654 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -100,7 +100,7 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat turn: Literal['request', 'response'] | None = None try: async for event in stream: - # TODO (DouweM): Should this be a TurnChange event? + # TODO (DouweM): Introduce PartEndEvent, possible MessageStartEvent, MessageEndEvent with ModelRequest/Response previous_part = part next_turn = turn next_part = part diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index 511bd68336..efb5a4b7d6 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -15,13 +15,13 @@ TextUIPart, ToolOutputAvailablePart, UIMessage, - UIPart, + UIMessagePart, request_data_ta, ) from ._response_types import ( AbortChunk, BaseChunk, - DataUIMessageChunk, + DataChunk, ErrorChunk, FileChunk, FinishChunk, @@ -54,7 +54,7 @@ 'TextUIPart', 'UIMessage', 'ToolOutputAvailablePart', - 'UIPart', + 'UIMessagePart', 'SubmitMessage', 'request_data_ta', # Response types @@ -75,7 +75,7 @@ 'SourceUrlChunk', 'SourceDocumentChunk', 'FileChunk', - 'DataUIMessageChunk', + 'DataChunk', 'StartStepChunk', 'FinishStepChunk', 'StartChunk', diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index dcb1c404ef..71c2933e40 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -8,17 +8,43 @@ from ...agent import AgentDepsT from ...messages import ( + AudioUrl, + BinaryContent, + BuiltinToolCallPart, + BuiltinToolReturnPart, + DocumentUrl, + FilePart, + ImageUrl, ModelMessage, ModelRequest, + ModelRequestPart, ModelResponse, + ModelResponsePart, + RetryPromptPart, SystemPromptPart, TextPart, + ThinkingPart, + ToolCallPart, + ToolReturnPart, UserPromptPart, + VideoUrl, ) from ..adapter import BaseAdapter from ..event_stream import BaseEventStream from ._event_stream import VercelAIEventStream -from ._request_types import RequestData, TextUIPart, UIMessage, request_data_ta +from ._request_types import ( + DataUIPart, + DynamicToolUIPart, + FileUIPart, + ReasoningUIPart, + RequestData, + TextUIPart, + ToolOutputAvailablePart, + ToolOutputErrorPart, + ToolUIPart, + UIMessage, + request_data_ta, +) from ._response_types import BaseChunk try: @@ -74,46 +100,116 @@ def messages(self) -> list[ModelMessage]: """ return self.load_messages(self.request.messages) - # TODO (DouweM): model, builtin_tools? - @classmethod - def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: + def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # noqa: C901 """Load messages from the request and return the loaded messages.""" - pai_messages: list[ModelMessage] = [] + result: list[ModelMessage] = [] + request_parts: list[ModelRequestPart] | None = None + response_parts: list[ModelResponsePart] | None = None for msg in messages: - if msg.role == 'user': - # User message - extract text from parts - texts: list[str] = [] + if msg.role in ('system', 'user'): + if request_parts is None: + request_parts = [] + result.append(ModelRequest(parts=request_parts)) + response_parts = None + for part in msg.parts: if isinstance(part, TextUIPart): - texts.append(part.text) - else: - raise ValueError(f'Only text parts are supported for user messages, got {type(part).__name__}') - - if texts: - pai_messages.append(ModelRequest(parts=[UserPromptPart(content='\n'.join(texts))])) + if msg.role == 'system': + request_parts.append(SystemPromptPart(content=part.text)) + else: + request_parts.append(UserPromptPart(content=part.text)) + elif isinstance(part, FileUIPart): + try: + file = BinaryContent.from_data_uri(part.url) + except ValueError: + media_type_prefix = part.media_type.split('/', 1)[0] + match media_type_prefix: + case 'image': + file = ImageUrl(url=part.url, media_type=part.media_type) + case 'video': + file = VideoUrl(url=part.url, media_type=part.media_type) + case 'audio': + file = AudioUrl(url=part.url, media_type=part.media_type) + case _: + file = DocumentUrl(url=part.url, media_type=part.media_type) + request_parts.append(UserPromptPart(content=[file])) elif msg.role == 'assistant': - # Assistant message - for now, just extract text - # Full reconstruction of ModelResponse with tool calls would require more complex logic - texts: list[str] = [] for part in msg.parts: - if isinstance(part, TextUIPart): - texts.append(part.text) - # TODO: Handle ToolOutputAvailablePart for full message history reconstruction + if response_parts is None: + response_parts = [] + result.append(ModelResponse(parts=response_parts)) + request_parts = None - if texts: - pai_messages.append(ModelResponse(parts=[TextPart(content='\n'.join(texts))])) - - elif msg.role == 'system': - # System message - not in standard Vercel AI protocol but might be custom - texts: list[str] = [] - for part in msg.parts: if isinstance(part, TextUIPart): - texts.append(part.text) - - if texts: - pai_messages.append(ModelRequest(parts=[SystemPromptPart(content='\n'.join(texts))])) - - return pai_messages + response_parts.append(TextPart(content=part.text)) + elif isinstance(part, ReasoningUIPart): + response_parts.append(ThinkingPart(content=part.text)) + elif isinstance(part, FileUIPart): + try: + file = BinaryContent.from_data_uri(part.url) + except ValueError as e: + # TODO (DouweM): handle this better + raise ValueError( + 'Vercel AI integration can currently only handle assistant file parts with data URIs.' + ) from e + response_parts.append(FilePart(content=file)) + elif isinstance(part, DataUIPart): + # Not currently supported + pass + elif isinstance(part, ToolUIPart | DynamicToolUIPart): + if isinstance(part, DynamicToolUIPart): + tool_name = part.tool_name + builtin_tool = False + else: + tool_name = part.type.removeprefix('tool-') + builtin_tool = part.provider_executed + + tool_call_id = part.tool_call_id + args = part.input + + if builtin_tool: + call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) + response_parts.append(call_part) + + if isinstance(part, ToolOutputAvailablePart | ToolOutputErrorPart): + if part.state == 'output-available': + output = part.output + else: + output = part.error_text + + provider_name = ( + (part.call_provider_metadata or {}).get('pydantic_ai', {}).get('provider_name') + ) + call_part.provider_name = provider_name + + return_part = BuiltinToolReturnPart( + tool_name=tool_name, + tool_call_id=tool_call_id, + content=output, + provider_name=provider_name, + ) + response_parts.append(return_part) + else: + call_part = ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) + response_parts.append(call_part) + + result_part: ToolReturnPart | RetryPromptPart | None = None + if part.state == 'output-available': + result_part = ToolReturnPart( + tool_name=tool_name, tool_call_id=tool_call_id, content=part.output + ) + elif part.state == 'output-error': + result_part = RetryPromptPart( + tool_name=tool_name, tool_call_id=tool_call_id, content=part.error_text + ) + + if result_part: + # TODO (DouweM): Combine consecutive tool calls and results + request_parts = [result_part] + result.append(ModelRequest(parts=request_parts)) + response_parts = None + + return result diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index a5840eeda8..68c5842dd1 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -10,6 +10,7 @@ from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, + FilePart, FinalResultEvent, FunctionToolResultEvent, RetryPromptPart, @@ -26,6 +27,7 @@ from ._response_types import ( BaseChunk, ErrorChunk, + FileChunk, FinishChunk, ReasoningDeltaChunk, ReasoningEndChunk, @@ -34,9 +36,11 @@ TextDeltaChunk, TextEndChunk, TextStartChunk, + ToolInputAvailableChunk, ToolInputDeltaChunk, ToolInputStartChunk, ToolOutputAvailableChunk, + ToolOutputErrorChunk, ) __all__ = ['VercelAIEventStream'] @@ -133,14 +137,21 @@ def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIter self._builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id tool_call_id = builtin_tool_call_id - return self._handle_tool_call_start(part) + return self._handle_tool_call_start(part, provider_executed=True) async def _handle_tool_call_start( - self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None + self, + part: ToolCallPart | BuiltinToolCallPart, + tool_call_id: str | None = None, + provider_executed: bool | None = None, ) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" tool_call_id = tool_call_id or part.tool_call_id - yield ToolInputStartChunk(tool_call_id=tool_call_id, tool_name=part.tool_name) + yield ToolInputStartChunk( + tool_call_id=tool_call_id, + tool_name=part.tool_name, + provider_executed=provider_executed, + ) if part.args: yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str()) @@ -159,31 +170,39 @@ async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseCh """Handle a ToolCallPart at end.""" # TODO (DouweM): We don't have the full args available here, # and we don't seem to need to send this anyway if we've already sent deltas - # yield ToolInputAvailableChunk( - # tool_call_id=part.tool_call_id, - # tool_name=part.tool_name, - # input=part.args, - # provider_executed=True, - # dynamic=False, - # ) - # TODO (DouweM): What are ToolInputAvailableChunk.provider_executed and dynamic? - # Likely use for builtin and external tools. return yield # Make this an async generator - # async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: - # """Handle a BuiltinToolCallPart at end.""" - # pass + async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: + """Handle a BuiltinToolCallPart at end.""" + yield ToolInputAvailableChunk( + tool_call_id=part.tool_call_id, + tool_name=part.tool_name, + input=part.args, # TODO (DouweM): This should match the full tool input + provider_executed=True, + provider_metadata={'pydantic_ai': {'provider_name': part.provider_name}}, + ) async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolReturnPart.""" - yield ToolOutputAvailableChunk(tool_call_id=part.tool_call_id, output=part.content) + yield ToolOutputAvailableChunk( + tool_call_id=part.tool_call_id, + output=part.content, + provider_executed=True, + ) + + async def handle_file(self, part: FilePart) -> AsyncIterator[BaseChunk]: + """Handle a FilePart.""" + file = part.content + yield FileChunk(url=file.data_uri, media_type=file.media_type) async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result - output = result.model_response() if isinstance(result, RetryPromptPart) else result.content - yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=output) + if isinstance(result, RetryPromptPart): + yield ToolOutputErrorChunk(tool_call_id=result.tool_call_id, error_text=result.model_response()) + else: + yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py index c2aa43f506..3aa83abde0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py @@ -1,39 +1,131 @@ """Vercel AI request types (UI messages). Converted to Python from: -https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +https://github.com/vercel/ai/blob/ai%405.0.59/packages/ai/src/ui/ui-messages.ts """ from typing import Annotated, Any, Literal -from pydantic import Discriminator, TypeAdapter +from pydantic import Discriminator, Field, TypeAdapter from ._utils import CamelBaseModel, ProviderMetadata -__all__ = [ - 'TextUIPart', - 'ToolOutputAvailablePart', - 'UIPart', - 'UIMessage', - 'SubmitMessage', - 'RequestData', - 'request_data_ta', -] - class TextUIPart(CamelBaseModel): """A text part of a message.""" type: Literal['text'] = 'text' + + text: str + """The text content.""" + + state: Literal['streaming', 'done'] | None = None + """The state of the text part.""" + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class ReasoningUIPart(CamelBaseModel): + """A reasoning part of a message.""" + + type: Literal['reasoning'] = 'reasoning' + text: str + """The reasoning text.""" + state: Literal['streaming', 'done'] | None = None + """The state of the reasoning part.""" + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class SourceUrlUIPart(CamelBaseModel): + """A source part of a message.""" + + type: Literal['source-url'] = 'source-url' + source_id: str + url: str + title: str | None = None provider_metadata: ProviderMetadata | None = None +class SourceDocumentUIPart(CamelBaseModel): + """A document source part of a message.""" + + type: Literal['source-document'] = 'source-document' + source_id: str + media_type: str + title: str + filename: str | None = None + provider_metadata: ProviderMetadata | None = None + + +class FileUIPart(CamelBaseModel): + """A file part of a message.""" + + type: Literal['file'] = 'file' + + media_type: str + """ + IANA media type of the file. + @see https://www.iana.org/assignments/media-types/media-types.xhtml + """ + + filename: str | None = None + """Optional filename of the file.""" + + url: str + """ + The URL of the file. + It can either be a URL to a hosted file or a [Data URL](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs). + """ + + provider_metadata: ProviderMetadata | None = None + """The provider metadata.""" + + +class StepStartUIPart(CamelBaseModel): + """A step boundary part of a message.""" + + type: Literal['step-start'] = 'step-start' + + +class DataUIPart(CamelBaseModel): + """Data part with dynamic type based on data name.""" + + type: Annotated[str, Field(pattern=r'^data-')] + id: str | None = None + data: Any + + +# Tool part states as separate models +class ToolInputStreamingPart(CamelBaseModel): + """Tool part in input-streaming state.""" + + type: Annotated[str, Field(pattern=r'^tool-')] + tool_call_id: str + state: Literal['input-streaming'] = 'input-streaming' + input: Any | None = None + provider_executed: bool | None = None + + +class ToolInputAvailablePart(CamelBaseModel): + """Tool part in input-available state.""" + + type: Annotated[str, Field(pattern=r'^tool-')] + tool_call_id: str + state: Literal['input-available'] = 'input-available' + input: Any + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + + class ToolOutputAvailablePart(CamelBaseModel): - """Tool output available part.""" + """Tool part in output-available state.""" - type: str # f"tool-{tool_name}" + type: Annotated[str, Field(pattern=r'^tool-')] tool_call_id: str state: Literal['output-available'] = 'output-available' input: Any @@ -43,17 +135,113 @@ class ToolOutputAvailablePart(CamelBaseModel): preliminary: bool | None = None -# Since ToolOutputAvailablePart has a dynamic type field, we can't use Discriminator -UIPart = TextUIPart | ToolOutputAvailablePart +class ToolOutputErrorPart(CamelBaseModel): + """Tool part in output-error state.""" + + type: Annotated[str, Field(pattern=r'^tool-')] + tool_call_id: str + state: Literal['output-error'] = 'output-error' + input: Any | None = None + raw_input: Any | None = None + error_text: str + provider_executed: bool | None = None + call_provider_metadata: ProviderMetadata | None = None + + +# Union of all tool part states +ToolUIPart = ToolInputStreamingPart | ToolInputAvailablePart | ToolOutputAvailablePart | ToolOutputErrorPart + + +# Dynamic tool part states as separate models +class DynamicToolInputStreamingPart(CamelBaseModel): + """Dynamic tool part in input-streaming state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['input-streaming'] = 'input-streaming' + input: Any | None = None + + +class DynamicToolInputAvailablePart(CamelBaseModel): + """Dynamic tool part in input-available state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['input-available'] = 'input-available' + input: Any + call_provider_metadata: ProviderMetadata | None = None + + +class DynamicToolOutputAvailablePart(CamelBaseModel): + """Dynamic tool part in output-available state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['output-available'] = 'output-available' + input: Any + output: Any + call_provider_metadata: ProviderMetadata | None = None + preliminary: bool | None = None + + +class DynamicToolOutputErrorPart(CamelBaseModel): + """Dynamic tool part in output-error state.""" + + type: Literal['dynamic-tool'] = 'dynamic-tool' + tool_name: str + tool_call_id: str + state: Literal['output-error'] = 'output-error' + input: Any + error_text: str + call_provider_metadata: ProviderMetadata | None = None + + +# Union of all dynamic tool part states +DynamicToolUIPart = ( + DynamicToolInputStreamingPart + | DynamicToolInputAvailablePart + | DynamicToolOutputAvailablePart + | DynamicToolOutputErrorPart +) + + +UIMessagePart = ( + TextUIPart + | ReasoningUIPart + | ToolUIPart + | DynamicToolUIPart + | SourceUrlUIPart + | SourceDocumentUIPart + | FileUIPart + | DataUIPart + | StepStartUIPart +) +"""Union of all message part types.""" class UIMessage(CamelBaseModel): - """A message in the UI protocol.""" + """A message as displayed in the UI by Vercel AI Elements.""" id: str - role: Literal['user', 'assistant', 'system'] - metadata: dict[str, Any] | None = None - parts: list[UIPart] + """A unique identifier for the message.""" + + role: Literal['system', 'user', 'assistant'] + """The role of the message.""" + + metadata: Any | None = None + """The metadata of the message.""" + + parts: list[UIMessagePart] + """ + The parts of the message. Use this for rendering the message in the UI. + System messages should be avoided (set the system prompt on the server instead). + They can have text parts. + User messages can have text parts and file parts. + Assistant messages can have text, reasoning, tool invocation, and file parts. + """ class SubmitMessage(CamelBaseModel, extra='allow'): diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py index c2d6ab2055..a1c1187f1f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py @@ -1,10 +1,12 @@ """Vercel AI response types (SSE chunks). Converted to Python from: -https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts +https://github.com/vercel/ai/blob/ai%405.0.59/packages/ai/src/ui/ui-messages.ts """ -from typing import Any, Literal +from typing import Annotated, Any, Literal + +from pydantic import Field from ._utils import CamelBaseModel, ProviderMetadata @@ -26,7 +28,7 @@ 'SourceUrlChunk', 'SourceDocumentChunk', 'FileChunk', - 'DataUIMessageChunk', + 'DataChunk', 'StartStepChunk', 'FinishStepChunk', 'StartChunk', @@ -126,13 +128,6 @@ class ToolOutputAvailableChunk(BaseChunk): preliminary: bool | None = None -class FinishChunk(BaseChunk): - """Finish chunk.""" - - type: Literal['finish'] = 'finish' - message_metadata: Any | None = None - - class ToolInputAvailableChunk(BaseChunk): """Tool input available chunk.""" @@ -197,10 +192,10 @@ class FileChunk(BaseChunk): media_type: str -class DataUIMessageChunk(BaseChunk): - """Data UI message chunk with dynamic type.""" +class DataChunk(BaseChunk): + """Data chunk with dynamic type.""" - type: str # Will be f"data-{NAME}" + type: Annotated[str, Field(pattern=r'^data-')] data: Any @@ -224,6 +219,13 @@ class StartChunk(BaseChunk): message_metadata: Any | None = None +class FinishChunk(BaseChunk): + """Finish chunk.""" + + type: Literal['finish'] = 'finish' + message_metadata: Any | None = None + + class AbortChunk(BaseChunk): """Abort chunk.""" diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 10a333be94..02f7f5fe54 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -7,6 +7,7 @@ from pydantic_ai import Agent from pydantic_ai.builtin_tools import WebSearchTool +from pydantic_ai.messages import ModelRequest, ModelResponse, TextPart, ToolCallPart, ToolReturnPart, UserPromptPart from pydantic_ai.ui.vercel_ai import VercelAIAdapter from pydantic_ai.ui.vercel_ai._request_types import ( SubmitMessage, @@ -15,7 +16,7 @@ UIMessage, ) -from .conftest import IsStr, try_import +from .conftest import IsDatetime, IsStr, try_import with try_import() as openai_import_successful: from pydantic_ai.models.openai import OpenAIResponsesModel @@ -147,128 +148,261 @@ async def test_run(allow_model_requests: None, openai_api_key: str): ) adapter = VercelAIAdapter(agent, request=data) + assert adapter.messages == snapshot( + [ + ModelRequest( + parts=[ + UserPromptPart( + content="""\ +Use a tool + + \ +""", + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + TextPart( + content="""\ +I'd be happy to help you use a tool! However, I need more information about what you'd like to do. I have access to tools for searching and retrieving documentation for two products: + + 1. **Pydantic AI** (pydantic-ai) - an open source agent framework library + 2. **Pydantic Logfire** (logfire) - an observability platform + + I can help you with: + - Searching the documentation for specific topics or questions + - Getting the table of contents to see what documentation is available + - Retrieving specific documentation files + + What would you like to learn about or search for? Please let me know: + - Which product you're interested in (Pydantic AI or Logfire) + - What specific topic, feature, or question you have + + For example, you could ask something like "How do I get started with Pydantic AI?" or "Show me the table of contents for Logfire documentation."\ +""" + ) + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + UserPromptPart( + content='Give me the ToCs', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + TextPart(content="I'll get the table of contents for both repositories."), + ToolCallPart( + tool_name='get_table_of_contents', + args={'repo': 'pydantic-ai'}, + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', + ), + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='get_table_of_contents', + content="[Scrubbed due to 'API Key']", + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + ToolCallPart( + tool_name='get_table_of_contents', + args={'repo': 'logfire'}, + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', + ) + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='get_table_of_contents', + content="[Scrubbed due to 'Auth']", + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + TextPart( + content='Here are the Table of Contents for both repositories:... Both products are designed to work together - Pydantic AI for building AI agents and Logfire for observing and monitoring them in production.' + ) + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + UserPromptPart( + content='How do I get FastAPI instrumentation to include the HTTP request and response', + timestamp=IsDatetime(), + ) + ] + ), + ] + ) events = [json.loads(event.removeprefix('data: ')) async for event in adapter.encode_stream(adapter.run_stream())] assert events == snapshot( [ {'type': 'start'}, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, - { - 'type': 'tool-input-start', - 'toolCallId': IsStr(), - 'toolName': 'web_search', - }, + {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, { 'type': 'tool-input-delta', 'toolCallId': IsStr(), 'inputTextDelta': '{"query":"OpenTelemetry FastAPI instrumentation capture request and response body","type":"search"}', }, + { + 'type': 'tool-input-available', + 'toolCallId': 'ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', + 'toolName': 'web_search', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, + }, { 'type': 'tool-output-available', 'toolCallId': IsStr(), 'output': {'status': 'completed'}, + 'providerExecuted': True, }, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, - { - 'type': 'tool-input-start', - 'toolCallId': IsStr(), - 'toolName': 'web_search', - }, + {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, { 'type': 'tool-input-delta', 'toolCallId': IsStr(), 'inputTextDelta': '{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python","type":"search"}', }, + { + 'type': 'tool-input-available', + 'toolCallId': 'ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', + 'toolName': 'web_search', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, + }, { 'type': 'tool-output-available', 'toolCallId': IsStr(), 'output': {'status': 'completed'}, + 'providerExecuted': True, }, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, - { - 'type': 'tool-input-start', - 'toolCallId': IsStr(), - 'toolName': 'web_search', - }, + {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, { 'type': 'tool-input-delta', 'toolCallId': IsStr(), 'inputTextDelta': '{"query":"OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python","type":"search"}', }, + { + 'type': 'tool-input-available', + 'toolCallId': 'ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', + 'toolName': 'web_search', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, + }, { 'type': 'tool-output-available', 'toolCallId': IsStr(), 'output': {'status': 'completed'}, + 'providerExecuted': True, }, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, - { - 'type': 'tool-input-start', - 'toolCallId': IsStr(), - 'toolName': 'web_search', - }, + {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, { 'type': 'tool-input-delta', 'toolCallId': IsStr(), 'inputTextDelta': '{"query":"site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY","type":"search"}', }, + { + 'type': 'tool-input-available', + 'toolCallId': 'ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', + 'toolName': 'web_search', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, + }, { 'type': 'tool-output-available', 'toolCallId': IsStr(), 'output': {'status': 'completed'}, + 'providerExecuted': True, }, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, - { - 'type': 'tool-input-start', - 'toolCallId': IsStr(), - 'toolName': 'web_search', - }, + {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, { 'type': 'tool-input-delta', 'toolCallId': IsStr(), 'inputTextDelta': '{"query":null,"type":"search"}', }, + { + 'type': 'tool-input-available', + 'toolCallId': 'ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', + 'toolName': 'web_search', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, + }, { 'type': 'tool-output-available', 'toolCallId': IsStr(), 'output': {'status': 'completed'}, + 'providerExecuted': True, }, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, - { - 'type': 'tool-input-start', - 'toolCallId': IsStr(), - 'toolName': 'web_search', - }, + {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, { 'type': 'tool-input-delta', 'toolCallId': IsStr(), 'inputTextDelta': '{"query":null,"type":"search"}', }, + { + 'type': 'tool-input-available', + 'toolCallId': 'ws_00e767404995b9950068e6480e11208191834104e1aaab1148', + 'toolName': 'web_search', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, + }, { 'type': 'tool-output-available', 'toolCallId': IsStr(), 'output': {'status': 'completed'}, + 'providerExecuted': True, }, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, - { - 'type': 'tool-input-start', - 'toolCallId': IsStr(), - 'toolName': 'web_search', - }, + {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, { 'type': 'tool-input-delta', 'toolCallId': IsStr(), 'inputTextDelta': '{"query":"OTEL_PYTHON_LOG_CORRELATION environment variable","type":"search"}', }, + { + 'type': 'tool-input-available', + 'toolCallId': 'ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', + 'toolName': 'web_search', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, + }, { 'type': 'tool-output-available', 'toolCallId': IsStr(), 'output': {'status': 'completed'}, + 'providerExecuted': True, }, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, From 365f14fed97b0365100bc6827a2da0c51aad9bea Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 14 Oct 2025 10:18:54 +0000 Subject: [PATCH 12/46] misc --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 7 +++++++ pydantic_ai_slim/pydantic_ai/ui/event_stream.py | 1 + .../pydantic_ai/ui/vercel_ai/_event_stream.py | 16 ++-------------- .../pydantic_ai/ui/vercel_ai/_request_types.py | 6 +++--- 4 files changed, 13 insertions(+), 17 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 3d65a575a1..1aeb71e21f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -26,6 +26,7 @@ from .. import DeferredToolRequests, DeferredToolResults, _utils from ..agent import AbstractAgent, AgentDepsT, AgentRunResult +from ..builtin_tools import AbstractBuiltinTool from ..exceptions import UserError from ..messages import ModelMessage from ..models import KnownModelName, Model @@ -218,6 +219,7 @@ async def run_stream( usage: RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, on_complete: OnCompleteFunc | None = None, ) -> AsyncIterator[EventT]: """Run the agent with the AG-UI run input and stream AG-UI protocol events. @@ -234,6 +236,7 @@ async def run_stream( usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. + builtin_tools: Optional additional builtin tools to use for this run. on_complete: Optional callback function called when the agent run completes successfully. The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. @@ -273,6 +276,7 @@ async def run_stream( usage=usage, infer_name=infer_name, toolsets=toolsets, + builtin_tools=builtin_tools, ), on_complete=on_complete, ): @@ -309,6 +313,7 @@ async def dispatch_request( usage: RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, on_complete: OnCompleteFunc | None = None, ) -> Response: """Handle an AG-UI request and return a streaming response. @@ -327,6 +332,7 @@ async def dispatch_request( usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. + builtin_tools: Optional additional builtin tools to use for this run. on_complete: Optional callback function called when the agent run completes successfully. The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. @@ -355,6 +361,7 @@ async def dispatch_request( usage=usage, infer_name=infer_name, toolsets=toolsets, + builtin_tools=builtin_tools, on_complete=on_complete, ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index baf6b9e654..b722490c30 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -82,6 +82,7 @@ async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None accept: The accept header value for encoding format. """ async for event in stream: + print(event) yield self.encode_event(event, accept) async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: # noqa: C901 diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 68c5842dd1..4b2807dc66 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -3,7 +3,7 @@ from __future__ import annotations from collections.abc import AsyncIterator -from typing import Any, Final +from typing import Any from pydantic_core import to_json @@ -51,9 +51,6 @@ def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') -BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' - - class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): """TODO (DouwM): Docstring.""" @@ -61,7 +58,6 @@ def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" super().__init__(request) self._final_result_tool_id: str | None = None - self._builtin_tool_call_ids: dict[str, str] = {} def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' @@ -131,12 +127,6 @@ def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> As def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolCallEvent, emitting tool input events.""" - # TODO (DouweM): Reduce duplication with AGUIEventStream - tool_call_id = part.tool_call_id - builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) - self._builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id - tool_call_id = builtin_tool_call_id - return self._handle_tool_call_start(part, provider_executed=True) async def _handle_tool_call_start( @@ -159,8 +149,6 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id or '' assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' - if tool_call_id in self._builtin_tool_call_ids: - tool_call_id = self._builtin_tool_call_ids[tool_call_id] yield ToolInputDeltaChunk( tool_call_id=tool_call_id, input_text_delta=delta.args_delta if isinstance(delta.args_delta, str) else _json_dumps(delta.args_delta), @@ -178,7 +166,7 @@ async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> Async yield ToolInputAvailableChunk( tool_call_id=part.tool_call_id, tool_name=part.tool_name, - input=part.args, # TODO (DouweM): This should match the full tool input + input=part.args, # TODO (DouweM): This should match the full tool input, now erases the input from the UI! provider_executed=True, provider_metadata={'pydantic_ai': {'provider_name': part.provider_name}}, ) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py index 3aa83abde0..9df592b7f1 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py @@ -117,7 +117,7 @@ class ToolInputAvailablePart(CamelBaseModel): type: Annotated[str, Field(pattern=r'^tool-')] tool_call_id: str state: Literal['input-available'] = 'input-available' - input: Any + input: Any | None = None provider_executed: bool | None = None call_provider_metadata: ProviderMetadata | None = None @@ -128,8 +128,8 @@ class ToolOutputAvailablePart(CamelBaseModel): type: Annotated[str, Field(pattern=r'^tool-')] tool_call_id: str state: Literal['output-available'] = 'output-available' - input: Any - output: Any + input: Any | None = None + output: Any | None = None provider_executed: bool | None = None call_provider_metadata: ProviderMetadata | None = None preliminary: bool | None = None From 45e757e5dd6da4e2a8178a38062a9125d5483e42 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 14 Oct 2025 12:25:25 +0000 Subject: [PATCH 13/46] Add PartEndEvent --- pydantic_ai_slim/pydantic_ai/messages.py | 30 ++++- .../pydantic_ai/models/__init__.py | 43 +++++- .../pydantic_ai/ui/ag_ui/_event_stream.py | 23 ---- .../pydantic_ai/ui/event_stream.py | 127 ++++++------------ .../pydantic_ai/ui/vercel_ai/_event_stream.py | 22 +-- tests/test_vercel_ai.py | 13 ++ 6 files changed, 125 insertions(+), 133 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 6e91b78bda..d00c82aee5 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1612,6 +1612,11 @@ class PartStartEvent: part: ModelResponsePart """The newly started `ModelResponsePart`.""" + previous_part_kind: ( + Literal['text', 'thinking', 'tool-call', 'builtin-tool-call', 'builtin-tool-return', 'file'] | None + ) = None + """The kind of the previous part, if known.""" + event_kind: Literal['part_start'] = 'part_start' """Event type identifier, used as a discriminator.""" @@ -1634,6 +1639,27 @@ class PartDeltaEvent: __repr__ = _utils.dataclasses_no_defaults_repr +@dataclass(repr=False, kw_only=True) +class PartEndEvent: + """An event indicating that a part is complete.""" + + index: int + """The index of the part within the overall response parts list.""" + + part: ModelResponsePart + """The complete `ModelResponsePart`.""" + + next_part_kind: ( + Literal['text', 'thinking', 'tool-call', 'builtin-tool-call', 'builtin-tool-return', 'file'] | None + ) = None + """The kind of the next part, if known.""" + + event_kind: Literal['part_end'] = 'part_end' + """Event type identifier, used as a discriminator.""" + + __repr__ = _utils.dataclasses_no_defaults_repr + + @dataclass(repr=False, kw_only=True) class FinalResultEvent: """An event indicating the response to the current model request matches the output schema and will produce a result.""" @@ -1649,9 +1675,9 @@ class FinalResultEvent: ModelResponseStreamEvent = Annotated[ - PartStartEvent | PartDeltaEvent | FinalResultEvent, pydantic.Discriminator('event_kind') + PartStartEvent | PartDeltaEvent | PartEndEvent | FinalResultEvent, pydantic.Discriminator('event_kind') ] -"""An event in the model response stream, starting a new part, applying a delta to an existing one, or indicating the final result.""" +"""An event in the model response stream, starting a new part, applying a delta to an existing one, indicating a part is complete, or indicating the final result.""" @dataclass(repr=False) diff --git a/pydantic_ai_slim/pydantic_ai/models/__init__.py b/pydantic_ai_slim/pydantic_ai/models/__init__.py index 7169e21447..6d07c58b75 100644 --- a/pydantic_ai_slim/pydantic_ai/models/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/models/__init__.py @@ -27,6 +27,7 @@ from ..builtin_tools import AbstractBuiltinTool from ..exceptions import UserError from ..messages import ( + BaseToolCallPart, BinaryImage, FilePart, FileUrl, @@ -35,9 +36,12 @@ ModelMessage, ModelRequest, ModelResponse, + ModelResponsePart, ModelResponseStreamEvent, + PartEndEvent, PartStartEvent, TextPart, + ThinkingPart, ToolCallPart, VideoUrl, ) @@ -539,7 +543,44 @@ async def iterator_with_final_event( async for event in iterator: yield event - self._event_iterator = iterator_with_final_event(self._get_event_iterator()) + async def iterator_with_part_end( + iterator: AsyncIterator[ModelResponseStreamEvent], + ) -> AsyncIterator[ModelResponseStreamEvent]: + last_start_event: PartStartEvent | None = None + + def part_end_event(next_part: ModelResponsePart | None = None) -> PartEndEvent | None: + if not last_start_event: + return None + + index = last_start_event.index + part = self._parts_manager.get_parts()[index] + if not isinstance(part, TextPart | ThinkingPart | BaseToolCallPart): + # Parts other than these 3 don't have deltas, so don't need an end part. + return None + + return PartEndEvent( + index=index, + part=part, + next_part_kind=next_part.part_kind if next_part else None, + ) + + async for event in iterator: + if isinstance(event, PartStartEvent): + if last_start_event: + end_event = part_end_event(event.part) + if end_event: + yield end_event + + event.previous_part_kind = last_start_event.part.part_kind + last_start_event = event + + yield event + + end_event = part_end_event() + if end_event: + yield end_event + + self._event_iterator = iterator_with_part_end(iterator_with_final_event(self._get_event_iterator())) return self._event_iterator @abstractmethod diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index a3155eb976..c576b5be6a 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -13,7 +13,6 @@ from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, - FinalResultEvent, FunctionToolResultEvent, RetryPromptPart, TextPart, @@ -78,7 +77,6 @@ def __init__(self, request: RunAgentInput) -> None: super().__init__(request) self._thinking_text = False self._builtin_tool_call_ids: dict[str, str] = {} - self._final_result_tool_id: str | None = None def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: """Encode an AG-UI event as SSE. @@ -111,20 +109,6 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: """Handle errors during streaming.""" yield RunErrorEvent(message=str(error)) - async def after_response(self) -> AsyncIterator[BaseEvent]: - """Yield events after agent response completes.""" - # Close the final result tool if there was one - # TODO (DouweM): Both AG-UI and Vercel AI do this to wrap up output tools, so consider if it should be a separate event, - # or if the tool ID should at least on the AgentRunResult(Event). - if tool_call_id := self._final_result_tool_id: - yield ToolCallResultEvent( - message_id=self.new_message_id(), - type=EventType.TOOL_CALL_RESULT, - role='tool', - tool_call_id=tool_call_id, - content='', - ) - async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" if follows_text: @@ -261,10 +245,3 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A yield item # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? - - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FinalResultEvent, tracking the final result tool.""" - if event.tool_call_id and event.tool_name: - self._final_result_tool_id = event.tool_call_id - return - yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index b722490c30..cbcb8af293 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -9,7 +9,7 @@ from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field -from typing import Generic, Literal, TypeVar +from typing import Generic, TypeVar from uuid import uuid4 from ..messages import ( @@ -20,8 +20,8 @@ FinalResultEvent, FunctionToolCallEvent, FunctionToolResultEvent, - ModelResponsePart, PartDeltaEvent, + PartEndEvent, PartStartEvent, TextPart, TextPartDelta, @@ -29,6 +29,7 @@ ThinkingPartDelta, ToolCallPart, ToolCallPartDelta, + ToolReturnPart, ) from ..run import AgentRunResult, AgentRunResultEvent from ..tools import AgentDepsT @@ -55,6 +56,8 @@ class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT]): message_id: str = field(default_factory=lambda: str(uuid4())) + _final_result_event: FinalResultEvent | None = None + def new_message_id(self) -> str: """Generate and store a new message ID. @@ -82,10 +85,9 @@ async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None accept: The accept header value for encoding format. """ async for event in stream: - print(event) yield self.encode_event(event, accept) - async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: # noqa: C901 + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: """Handle a stream of agent events. Args: @@ -97,49 +99,9 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.before_stream(): yield e - part: ModelResponsePart | None = None - turn: Literal['request', 'response'] | None = None try: async for event in stream: - # TODO (DouweM): Introduce PartEndEvent, possible MessageStartEvent, MessageEndEvent with ModelRequest/Response - previous_part = part - next_turn = turn - next_part = part - if isinstance(event, PartStartEvent): - next_turn = 'request' - next_part = event.part - elif isinstance(event, FunctionToolCallEvent): - next_turn = 'response' - next_part = None - elif isinstance(event, AgentRunResultEvent): - next_turn = None - next_part = None - - if next_part != part: - if part: - async for e in self.handle_part_end(part, next_part): - yield e - - part = next_part - - if turn != next_turn: - if turn == 'request': - async for e in self.after_request(): - yield e - elif turn == 'response': - async for e in self.after_response(): - yield e - - turn = next_turn - - if turn == 'request': - async for e in self.before_request(): - yield e - elif turn == 'response': - async for e in self.before_response(): - yield e - - async for e in self.handle_event(event, previous_part): + async for e in self.handle_event(event): yield e except Exception as e: async for e in self.on_error(e): @@ -148,9 +110,7 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.after_stream(): yield e - async def handle_event( - self, event: SourceEvent, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 """Transform a Pydantic AI agent event into protocol-specific events. This method dispatches to specific `handle_*` methods based on event and part type. @@ -158,7 +118,6 @@ async def handle_event( Args: event: The Pydantic AI agent event to transform. - previous_part: The previous part. Yields: Protocol-specific events. @@ -168,12 +127,16 @@ async def handle_event( match event: case PartStartEvent(): - async for e in self.handle_part_start(event, previous_part): + async for e in self.handle_part_start(event): yield e case PartDeltaEvent(): async for e in self.handle_part_delta(event): yield e + case PartEndEvent(): + async for e in self.handle_part_end(event): + yield e case FinalResultEvent(): + self._final_result_event = event async for e in self.handle_final_result(event): yield e case FunctionToolCallEvent(): @@ -183,6 +146,22 @@ async def handle_event( async for e in self.handle_function_tool_result(event): yield e case AgentRunResultEvent(): + if ( + self._final_result_event + and (tool_call_id := self._final_result_event.tool_call_id) + and (tool_name := self._final_result_event.tool_name) + ): + self._final_result_event = None + output_tool_result_event = FunctionToolResultEvent( + result=ToolReturnPart( + tool_call_id=tool_call_id, + tool_name=tool_name, + content='Final result processed.', + ) + ) + async for e in self.handle_function_tool_result(output_tool_result_event): + yield e + self.result = event.result async for e in self.handle_run_result(event): yield e @@ -192,24 +171,20 @@ async def handle_event( async for e in self.after_event(event): yield e - async def handle_part_start( - self, event: PartStartEvent, previous_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: + async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT]: """Handle a PartStartEvent. Args: event: The PartStartEvent. - previous_part: The previous part. """ part = event.part + previous_part_kind = event.previous_part_kind match part: case TextPart(): - async for e in self.handle_text_start(part, follows_text=isinstance(previous_part, TextPart)): + async for e in self.handle_text_start(part, follows_text=previous_part_kind == 'text'): yield e case ThinkingPart(): - async for e in self.handle_thinking_start( - part, follows_thinking=isinstance(previous_part, ThinkingPart) - ): + async for e in self.handle_thinking_start(part, follows_thinking=previous_part_kind == 'thinking'): yield e case ToolCallPart(): async for e in self.handle_tool_call_start(part): @@ -242,22 +217,20 @@ async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[EventT async for e in self.handle_tool_call_delta(delta): yield e - async def handle_part_end( - self, part: ModelResponsePart, next_part: ModelResponsePart | None = None - ) -> AsyncIterator[EventT]: - """Handle the end of a part. + async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[EventT]: + """Handle a PartEndEvent. Args: - part: The part that ended. - next_part: The new part that started. + event: The PartEndEvent. """ - # TODO (DouweM): Make this a proper event. Figure out a proper way to do context manager style wrapping + part = event.part + next_part_kind = event.next_part_kind match part: case TextPart(): - async for e in self.handle_text_end(part, followed_by_text=isinstance(next_part, TextPart)): + async for e in self.handle_text_end(part, followed_by_text=next_part_kind == 'text'): yield e case ThinkingPart(): - async for e in self.handle_thinking_end(part, followed_by_thinking=isinstance(next_part, ThinkingPart)): + async for e in self.handle_thinking_end(part, followed_by_thinking=next_part_kind == 'thinking'): yield e case ToolCallPart(): async for e in self.handle_tool_call_end(part): @@ -469,26 +442,6 @@ async def after_event(self, event: SourceEvent) -> AsyncIterator[EventT]: return yield # Make this an async generator - async def before_request(self) -> AsyncIterator[EventT]: - """Handle a request before it is processed.""" - return - yield # Make this an async generator - - async def after_request(self) -> AsyncIterator[EventT]: - """Handle a request after it is processed.""" - return - yield # Make this an async generator - - async def before_response(self) -> AsyncIterator[EventT]: - """Handle a response before it is processed.""" - return - yield # Make this an async generator - - async def after_response(self) -> AsyncIterator[EventT]: - """Yield events after agent streaming completes.""" - return - yield # Make this an async generator - async def before_stream(self) -> AsyncIterator[EventT]: """Yield events before agent streaming starts. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 4b2807dc66..7fa57be7f0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -11,7 +11,6 @@ BuiltinToolCallPart, BuiltinToolReturnPart, FilePart, - FinalResultEvent, FunctionToolResultEvent, RetryPromptPart, TextPart, @@ -57,7 +56,6 @@ class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" super().__init__(request) - self._final_result_tool_id: str | None = None def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' @@ -74,12 +72,6 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: """Handle errors during streaming.""" yield ErrorChunk(error_text=str(error)) - async def after_response(self) -> AsyncIterator[BaseChunk]: - """Yield events after agent response completes.""" - # Close the final result tool if there was one - if tool_call_id := self._final_result_tool_id: - yield ToolOutputAvailableChunk(tool_call_id=tool_call_id, output=None) - async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" if follows_text: @@ -156,17 +148,14 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart at end.""" - # TODO (DouweM): We don't have the full args available here, - # and we don't seem to need to send this anyway if we've already sent deltas - return - yield # Make this an async generator + yield ToolInputAvailableChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name, input=part.args) async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolCallPart at end.""" yield ToolInputAvailableChunk( tool_call_id=part.tool_call_id, tool_name=part.tool_name, - input=part.args, # TODO (DouweM): This should match the full tool input, now erases the input from the UI! + input=part.args, provider_executed=True, provider_metadata={'pydantic_ai': {'provider_name': part.provider_name}}, ) @@ -193,10 +182,3 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? - - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[BaseChunk]: - """Handle a FinalResultEvent, tracking the final result tool.""" - if event.tool_call_id and event.tool_name: - self._final_result_tool_id = event.tool_call_id - return - yield # Make this an async generator diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 02f7f5fe54..286c42a55f 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -269,6 +269,10 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'type': 'tool-input-available', 'toolCallId': 'ws_00e767404995b9950068e647f909248191bfe8d05eeed67645', 'toolName': 'web_search', + 'input': { + 'query': 'OpenTelemetry FastAPI instrumentation capture request and response body', + 'type': 'search', + }, 'providerExecuted': True, 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, }, @@ -290,6 +294,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'type': 'tool-input-available', 'toolCallId': 'ws_00e767404995b9950068e647fb73c48191b0bdb147c3a0d22c', 'toolName': 'web_search', + 'input': {'query': 'OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY Python', 'type': 'search'}, 'providerExecuted': True, 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, }, @@ -311,6 +316,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'type': 'tool-input-available', 'toolCallId': 'ws_00e767404995b9950068e647fee97c8191919865e0c0a78bba', 'toolName': 'web_search', + 'input': {'query': 'OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY opentelemetry python', 'type': 'search'}, 'providerExecuted': True, 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, }, @@ -332,6 +338,10 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'type': 'tool-input-available', 'toolCallId': 'ws_00e767404995b9950068e64803f27c81918a39ce50cb8dfbc2', 'toolName': 'web_search', + 'input': { + 'query': 'site:github.com open-telemetry/opentelemetry-python-contrib OTEL_INSTRUMENTATION_HTTP_CAPTURE_BODY', + 'type': 'search', + }, 'providerExecuted': True, 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, }, @@ -353,6 +363,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'type': 'tool-input-available', 'toolCallId': 'ws_00e767404995b9950068e6480ac0888191a7897231e6ca9911', 'toolName': 'web_search', + 'input': {'query': None, 'type': 'search'}, 'providerExecuted': True, 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, }, @@ -374,6 +385,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'type': 'tool-input-available', 'toolCallId': 'ws_00e767404995b9950068e6480e11208191834104e1aaab1148', 'toolName': 'web_search', + 'input': {'query': None, 'type': 'search'}, 'providerExecuted': True, 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, }, @@ -395,6 +407,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): 'type': 'tool-input-available', 'toolCallId': 'ws_00e767404995b9950068e648118bf88191aa7f804637c45b32', 'toolName': 'web_search', + 'input': {'query': 'OTEL_PYTHON_LOG_CORRELATION environment variable', 'type': 'search'}, 'providerExecuted': True, 'providerMetadata': {'pydantic_ai': {'provider_name': 'openai'}}, }, From 47a396b5cff17a350eded1616959e24ae7c9b574 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 14 Oct 2025 13:27:00 +0000 Subject: [PATCH 14/46] Properly finish Vercel steps and messages --- .../pydantic_ai/ui/event_stream.py | 58 ++++++++++++++++++- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 22 +++++++ .../ui/vercel_ai/_response_types.py | 6 ++ tests/test_vercel_ai.py | 8 ++- 4 files changed, 91 insertions(+), 3 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index cbcb8af293..6ae6d079f9 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -9,7 +9,7 @@ from abc import ABC, abstractmethod from collections.abc import AsyncIterator from dataclasses import dataclass, field -from typing import Generic, TypeVar +from typing import Generic, Literal, TypeVar from uuid import uuid4 from ..messages import ( @@ -87,7 +87,7 @@ async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None async for event in stream: yield self.encode_event(event, accept) - async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: + async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: # noqa: C901 """Handle a stream of agent events. Args: @@ -99,14 +99,48 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.before_stream(): yield e + turn: Literal['request', 'response'] | None = None try: async for event in stream: + # TODO (DouweM): Introduce, possibly, MessageStartEvent, MessageEndEvent with ModelRequest/Response? + next_turn = turn + if isinstance(event, PartStartEvent): + next_turn = 'request' + elif isinstance(event, FunctionToolCallEvent): + next_turn = 'response' + elif isinstance(event, AgentRunResultEvent): + next_turn = None + + if turn != next_turn: + if turn == 'request': + async for e in self.after_request(): + yield e + elif turn == 'response': + async for e in self.after_response(): + yield e + + turn = next_turn + + if turn == 'request': + async for e in self.before_request(): + yield e + elif turn == 'response': + async for e in self.before_response(): + yield e + async for e in self.handle_event(event): yield e except Exception as e: async for e in self.on_error(e): yield e else: + if turn == 'request': + async for e in self.after_request(): + yield e + elif turn == 'response': + async for e in self.after_response(): + yield e + async for e in self.after_stream(): yield e @@ -442,6 +476,26 @@ async def after_event(self, event: SourceEvent) -> AsyncIterator[EventT]: return yield # Make this an async generator + async def before_request(self) -> AsyncIterator[EventT]: + """Handle a request before it is processed.""" + return + yield # Make this an async generator + + async def after_request(self) -> AsyncIterator[EventT]: + """Handle a request after it is processed.""" + return + yield # Make this an async generator + + async def before_response(self) -> AsyncIterator[EventT]: + """Handle a response before it is processed.""" + return + yield # Make this an async generator + + async def after_response(self) -> AsyncIterator[EventT]: + """Handle a response after it is processed.""" + return + yield # Make this an async generator + async def before_stream(self) -> AsyncIterator[EventT]: """Yield events before agent streaming starts. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 7fa57be7f0..317db9c27c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -25,13 +25,16 @@ from ._request_types import RequestData from ._response_types import ( BaseChunk, + DoneChunk, ErrorChunk, FileChunk, FinishChunk, + FinishStepChunk, ReasoningDeltaChunk, ReasoningEndChunk, ReasoningStartChunk, StartChunk, + StartStepChunk, TextDeltaChunk, TextEndChunk, TextStartChunk, @@ -57,16 +60,35 @@ def __init__(self, request: RequestData) -> None: """Initialize Vercel AI event stream state.""" super().__init__(request) + self._step_started = False + def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: + if isinstance(event, DoneChunk): + return 'data: [DONE]\n\n' return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' async def before_stream(self) -> AsyncIterator[BaseChunk]: """Yield events before agent streaming starts.""" yield StartChunk() + async def before_request(self) -> AsyncIterator[BaseChunk]: + """Yield events before the request is processed.""" + self._step_started = True + yield StartStepChunk() + + async def after_response(self) -> AsyncIterator[BaseChunk]: + """Yield events after the response is processed.""" + if self._step_started: + yield FinishStepChunk() + self._step_started = False + async def after_stream(self) -> AsyncIterator[BaseChunk]: """Yield events after agent streaming completes.""" + if self._step_started: + yield FinishStepChunk() + yield FinishChunk() + yield DoneChunk() async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: """Handle errors during streaming.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py index a1c1187f1f..1fb97836f2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py @@ -237,3 +237,9 @@ class MessageMetadataChunk(BaseChunk): type: Literal['message-metadata'] = 'message-metadata' message_metadata: Any + + +class DoneChunk(BaseChunk): + """Done chunk.""" + + type: Literal['done'] = 'done' diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 286c42a55f..d6e83445b1 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -253,10 +253,14 @@ async def test_run(allow_model_requests: None, openai_api_key: str): ), ] ) - events = [json.loads(event.removeprefix('data: ')) async for event in adapter.encode_stream(adapter.run_stream())] + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream()) + ] assert events == snapshot( [ {'type': 'start'}, + {'type': 'start-step'}, {'type': 'reasoning-start', 'id': IsStr()}, {'type': 'reasoning-end', 'id': IsStr()}, {'type': 'tool-input-start', 'toolCallId': IsStr(), 'toolName': 'web_search', 'providerExecuted': True}, @@ -1010,6 +1014,8 @@ def client_response\ {'type': 'text-delta', 'delta': ' or accumulate chunked', 'id': IsStr()}, {'type': 'text-delta', 'delta': ' bodies safely?', 'id': IsStr()}, {'type': 'text-end', 'id': IsStr()}, + {'type': 'finish-step'}, {'type': 'finish'}, + '[DONE]', ] ) From 7ba0cc78cc7ae9aac05c99b8e149b5bc2a6146c6 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 14 Oct 2025 14:32:40 +0000 Subject: [PATCH 15/46] Update tests for new PartEndEvent --- pydantic_ai_slim/pydantic_ai/__init__.py | 2 + pydantic_ai_slim/pydantic_ai/ui/adapter.py | 4 - .../pydantic_ai/ui/ag_ui/_adapter.py | 5 - .../pydantic_ai/ui/event_stream.py | 1 + .../pydantic_ai/ui/vercel_ai/_adapter.py | 7 +- tests/models/test_anthropic.py | 509 ++++++- tests/models/test_bedrock.py | 72 +- tests/models/test_deepseek.py | 285 +++- tests/models/test_fallback.py | 20 +- tests/models/test_gemini.py | 5 +- tests/models/test_google.py | 165 ++- tests/models/test_groq.py | 231 ++- tests/models/test_huggingface.py | 1252 ++++++++++++++++- tests/models/test_instrumented.py | 2 + tests/models/test_mistral.py | 134 +- tests/models/test_openai.py | 5 + tests/models/test_openai_responses.py | 170 ++- tests/test_agent.py | 11 +- tests/test_streaming.py | 100 +- 19 files changed, 2858 insertions(+), 122 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/__init__.py index 8f6254f425..50959dbc70 100644 --- a/pydantic_ai_slim/pydantic_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/__init__.py @@ -63,6 +63,7 @@ ModelResponseStreamEvent, MultiModalContent, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, SystemPromptPart, @@ -161,6 +162,7 @@ 'ModelResponseStreamEvent', 'MultiModalContent', 'PartDeltaEvent', + 'PartEndEvent', 'PartStartEvent', 'RetryPromptPart', 'SystemPromptPart', diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 1aeb71e21f..10426e12b9 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -135,10 +135,6 @@ async def validate_request(cls, request: Request) -> RunRequestT: def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: """Load messages from the request and return the loaded messages.""" - @abstractmethod - def dump_messages(self, messages: Sequence[ModelMessage]) -> list[MessageT]: - """Dump messages to the request and return the dumped messages.""" - @property @abstractmethod def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT]: diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 67e3ecddbf..5c3eef5280 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -100,11 +100,6 @@ async def validate_request(cls, request: Request) -> RunAgentInput: """Validate the request and return the validated request.""" return RunAgentInput.model_validate(await request.json()) - def dump_messages(self, messages: Sequence[ModelMessage]) -> list[Message]: - """Dump messages to the request and return the dumped messages.""" - # TODO (DouweM): bring in from https://github.com/pydantic/pydantic-ai/pull/3068 - raise NotImplementedError - @property def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: """Create an event stream for the adapter.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 6ae6d079f9..4652c3dfa1 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -103,6 +103,7 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat try: async for event in stream: # TODO (DouweM): Introduce, possibly, MessageStartEvent, MessageEndEvent with ModelRequest/Response? + # People have requested these before. We can store Request and Response next_turn = turn if isinstance(event, PartStartEvent): next_turn = 'request' diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 71c2933e40..6ce73075d0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -71,11 +71,6 @@ async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" return request_data_ta.validate_json(await request.body()) - def dump_messages(self, messages: Sequence[ModelMessage]) -> list[UIMessage]: - """Dump messages to the request and return the dumped messages.""" - # TODO (DouweM): implement - raise NotImplementedError - @property def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: return VercelAIEventStream(self.request) @@ -151,7 +146,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # try: file = BinaryContent.from_data_uri(part.url) except ValueError as e: - # TODO (DouweM): handle this better + # We don't yet handle non-data-URI file URLs returned by assistants, as no Pydantic AI models do this. raise ValueError( 'Vercel AI integration can currently only handle assistant file parts with data URIs.' ) from e diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 5fe850bf45..1e24feedfa 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -29,6 +29,7 @@ ModelResponse, ModelRetry, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, SystemPromptPart, @@ -695,12 +696,7 @@ async def my_tool(first: str, second: str) -> int: # The tool output doesn't echo any content to the stream, so we only get the final payload once when # the block starts and once when it ends. - assert chunks == snapshot( - [ - 'FINAL_PAYLOAD', - 'FINAL_PAYLOAD', - ] - ) + assert chunks == snapshot(['FINAL_PAYLOAD', 'FINAL_PAYLOAD', 'FINAL_PAYLOAD']) assert result.is_complete assert result.usage() == snapshot( RunUsage( @@ -1255,16 +1251,39 @@ async def test_anthropic_model_thinking_part_redacted_stream(allow_model_request provider_name='anthropic', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content='', + id='redacted_thinking', + signature='EqkECkYIBxgCKkA8AZ4noDfV5VcOJe/p3JTRB6Xz5297mrWhl3MbHSXDKTMfuB/Z52U2teiWWTN0gg4eQ4bGS9TPilFX/xWTIq9HEgyOmstSPriNwyn1G7AaDC51r0hQ062qEd55IiIwYQj3Z3MSBBv0bSVdXi60LEHDvC7tzzmpQfw5Hb6R9rtyOz/6vC/xPw9/E1mUqfBqKpADO2HS2QlE/CnuzR901nZOn0TOw7kEXwH7kg30c85b9W7iKALgEejY9sELMBdPyIZNlTgKqNOKtY3R/aV5rGIRPTHh2Wh9Ijmqsf/TT7i//Z+InaYTo6f/fxF8R0vFXMRPOBME4XIscb05HcNhh4c9FDkpqQGYKaq31IR1NNwPWA0BsvdDz7SIo1nfx4H+X0qKKqqegKnQ3ynaXiD5ydT1C4U7fku4ftgF0LGwIk4PwXBE+4BP0DcKr1HV3cn7YSyNakBSDTvRJMKcXW6hl7X3w2a4//sxjC1Cjq0uzkIHkhzRWirN0OSXt+g3m6b1ex0wGmSyuO17Ak6kgVBpxwPugtrqsflG0oujFem44hecXJ9LQNssPf4RSlcydiG8EXp/XLGTe0YfHbe3kJagkowSH/Dm6ErXBiVs7249brncyY8WA+7MOoqIM82YIU095B9frCqDJDUWnN84VwOszRrcaywmpJXZO4aeQLMC1kXD5Wabu+O/00tD/X67EWkkWuR0AhDIXXjpot45vnBd4ewJ/hgB', + provider_name='anthropic', + ), + next_part_kind='thinking', + ), PartStartEvent( index=1, part=ThinkingPart( content='', id='redacted_thinking', - signature=IsStr(), + signature='EtgBCkYIBxgCKkDQfGkwzflEJP5asG3oQfJXcTwJLoRznn8CmuczWCsJ36dv93X9H0NCeaJRbi5BrCA2DyMgFnRKRuzZx8VTv5axEgwkFmcHJk8BSiZMZRQaDDYv2KZPfbFgRa2QjyIwm47f5YYsSK9CT/oh/WWpU1HJJVHr8lrC6HG1ItRdtMvYQYmEGy+KhyfcIACfbssVKkDGv/NKqNMOAcu0bd66gJ2+R1R0PX11Jxn2Nd1JtZqkxx7vMT/PXtHDhm9jkDZ2k/6RjRRFuab/DBV3yRYdZ1J0GAE=', + provider_name='anthropic', + ), + previous_part_kind='thinking', + ), + PartEndEvent( + index=1, + part=ThinkingPart( + content='', + id='redacted_thinking', + signature='EtgBCkYIBxgCKkDQfGkwzflEJP5asG3oQfJXcTwJLoRznn8CmuczWCsJ36dv93X9H0NCeaJRbi5BrCA2DyMgFnRKRuzZx8VTv5axEgwkFmcHJk8BSiZMZRQaDDYv2KZPfbFgRa2QjyIwm47f5YYsSK9CT/oh/WWpU1HJJVHr8lrC6HG1ItRdtMvYQYmEGy+KhyfcIACfbssVKkDGv/NKqNMOAcu0bd66gJ2+R1R0PX11Jxn2Nd1JtZqkxx7vMT/PXtHDhm9jkDZ2k/6RjRRFuab/DBV3yRYdZ1J0GAE=', provider_name='anthropic', ), + next_part_kind='text', + ), + PartStartEvent( + index=2, part=TextPart(content="I notice that you've sent what"), previous_part_kind='thinking' ), - PartStartEvent(index=2, part=TextPart(content="I notice that you've sent what")), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=' appears to be some')), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=' kind of test string')), @@ -1289,6 +1308,16 @@ async def test_anthropic_model_thinking_part_redacted_stream(allow_model_request PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=', and honest. How')), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=' can I assist you today with')), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=' a legitimate task or question?')), + PartEndEvent( + index=2, + part=TextPart( + content="""\ +I notice that you've sent what appears to be some kind of test string or command. I don't have any special "magic string" triggers or backdoor commands that would expose internal systems or change my behavior. + +I'm Claude, an AI assistant created by Anthropic to be helpful, harmless, and honest. How can I assist you today with a legitimate task or question?\ +""" + ), + ), ] ) @@ -1593,7 +1622,32 @@ async def test_anthropic_model_thinking_part_stream(allow_model_requests: None, provider_name='anthropic', ), ), - PartStartEvent(index=1, part=IsInstance(TextPart)), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ +The question is asking about how to safely cross a street, which is a basic but important safety skill. + +I should provide clear, step-by-step instructions for crossing a street safely: + +1. Find a designated crossing point if possible (crosswalk, pedestrian crossing, etc.) +2. Look both ways (left-right-left in countries where cars drive on the right; right-left-right where they drive on the left) +3. Wait for traffic to stop or for a clear gap in traffic +4. Make eye contact with drivers if possible +5. Cross at a steady pace without running +6. Continue watching for traffic while crossing +7. Use pedestrian signals where available + +I'll also mention some additional safety tips and considerations for different situations (busy streets, streets with traffic signals, etc.).\ +""", + signature='ErUBCkYIBhgCIkA/Y+JwNMtmQyHcoo4/v2dpY6ruQifcu3pAzHbzIwpIrjIyaWaYdJOp9/0vUmBPj+LmqgiDSTktRcn0U75AlpXOEgwzVmYdHgDaZfeyBGcaDFSIZCHzzrZQkolJKCIwhMETosYLx+Dw/vKa83hht943z9R3/ViOqokT25JmMfaGOntuo+33Zxqf5rqUbkQ3Kh34rIqqnKaFSVr7Nn85z8OFN3Cwzz+HmXl2FgCXOxgC', + provider_name='anthropic', + ), + next_part_kind='text', + ), + PartStartEvent( + index=1, part=TextPart(content='# How to Cross a Street Safely'), previous_part_kind='thinking' + ), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent( index=1, @@ -1659,6 +1713,38 @@ async def test_anthropic_model_thinking_part_stream(allow_model_requests: None, PartDeltaEvent(index=1, delta=IsInstance(TextPartDelta)), PartDeltaEvent(index=1, delta=IsInstance(TextPartDelta)), PartDeltaEvent(index=1, delta=IsInstance(TextPartDelta)), + PartEndEvent( + index=1, + part=TextPart( + content="""\ +# How to Cross a Street Safely + +Follow these steps to cross a street safely: + +1. **Find a proper crossing point** - Use a crosswalk, pedestrian crossing, or intersection whenever possible. + +2. **Stop at the curb** - Stand slightly back from the edge. + +3. **Look both ways** - Look left, right, then left again (reverse in countries where cars drive on the left). + +4. **Listen for traffic** - Remove headphones if you're wearing them. + +5. **Wait for a gap** or for vehicles to stop completely. + +6. **Make eye contact** with drivers to ensure they see you. + +7. **Cross with purpose** - Walk at a steady pace without stopping or running. + +8. **Continue watching** for traffic as you cross. + +9. **Use signals** - Follow pedestrian crossing signals where available. + +If there's a traffic light or pedestrian signal, only cross when indicated, and always check for turning vehicles even when you have the right of way. + +Is there a specific situation or type of street crossing you're concerned about?\ +""" + ), + ), ] ) @@ -2524,11 +2610,25 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None provider_name='anthropic', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ +The user is asking about the weather in San Francisco today. This is clearly a request for current, real-time information that changes daily, so I should use web search to get up-to-date weather information. According to the guidelines, today's date is September 16, 2025. + +I should search for current weather in San Francisco. I'll include "today" in the search query to get the most current information.\ +""", + signature='Er8ECkYIBxgCKkDp29haxwUos3j9hg3HNQI8e4jcFtinIsLxpzaQR/MhPnIpHkUpSNPatD/C2EVyiEGg2LIO1lhkU/P8XLgiyejFEgzinYyrRtGe03DeFEIaDL63CVUOAo1v/57lpSIw+msm1NHv1h+xLzkbu2YqlXPwjza0tVjwAj7RLUFwB1HpPbdv6hlityaMFb/SwKZZKqYDwbYu36cdPpUcpirpZaKZ/DITzfWJkX93BXmRl5au50mxAiFe9B8XxreADaofra5cmevEaaLH0b5Ze/IC0ja/cJdo9NoVlyHlqdXmex22CAkg0Y/HnsZr8MbnE6GyG9bOqAEhwb6YgKHMaMLDVmElbNSsD7luWtsbw5BDvRaqSSROzTxH4s0dqjUqJsoOBeUXuUqWHSl2KwQi8akELKUnvlDz15ZwFI1yVTHA5nSMFIhjB0jECs1g8PjFkAYTHkHddYR5/SLruy1ENpKU0xjc/hd/O41xnI3PxHBGDKv/hdeSVBKjJ0SDYIwXW96QS5vzlKxYGCqtibj2VxPzUlDITvhn1oO+cjCXClo1lE+ul//+nk7jk7fRkvl1/+pscYCpBoGKprA7CU1kpiggO9pAVUrpZM9vC2jF5/VVVYEoY3CyC+hrNpDWXTUdGdCTofhp2wdWVZzCmO7/+L8SUnlu64YYe9PWsRDuHRe8Lvl0M9EyBrhWnGWQkkk9b+O5uNU5xgE0sjbuGzgYswhwSd7Powb8XbtbW6h7lTbo1M2IQ3Ok0kdt0RAYAQ==', + provider_name='anthropic', + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=1, part=BuiltinToolCallPart( tool_name='web_search', tool_call_id='srvtoolu_01FYcUbzEaqqQh1WBRj1QX3h', provider_name='anthropic' ), + previous_part_kind='thinking', ), PartDeltaEvent( index=1, delta=ToolCallPartDelta(args_delta='', tool_call_id='srvtoolu_01FYcUbzEaqqQh1WBRj1QX3h') @@ -2559,6 +2659,16 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None PartDeltaEvent( index=1, delta=ToolCallPartDelta(args_delta='ay"}', tool_call_id='srvtoolu_01FYcUbzEaqqQh1WBRj1QX3h') ), + PartEndEvent( + index=1, + part=BuiltinToolCallPart( + tool_name='web_search', + args='{"query": "San Francisco weather today"}', + tool_call_id='srvtoolu_01FYcUbzEaqqQh1WBRj1QX3h', + provider_name='anthropic', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=2, part=BuiltinToolReturnPart( @@ -2639,8 +2749,9 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None timestamp=IsDatetime(), provider_name='anthropic', ), + previous_part_kind='builtin-tool-call', ), - PartStartEvent(index=3, part=TextPart(content='Base')), + PartStartEvent(index=3, part=TextPart(content='Base'), previous_part_kind='builtin-tool-return'), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='d on the search results, I can see')), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' that the information is a bit date')), @@ -2652,11 +2763,19 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None ), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' Let me search for more current')), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' information.')), + PartEndEvent( + index=3, + part=TextPart( + content='Based on the search results, I can see that the information is a bit dated (most results are from about 6 days to a week ago), but I can provide you with the available weather information for San Francisco. Let me search for more current information.' + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=4, part=BuiltinToolCallPart( tool_name='web_search', tool_call_id='srvtoolu_01FDqc7ruGpVRoNuD5G6jkUx', provider_name='anthropic' ), + previous_part_kind='text', ), PartDeltaEvent( index=4, delta=ToolCallPartDelta(args_delta='', tool_call_id='srvtoolu_01FDqc7ruGpVRoNuD5G6jkUx') @@ -2696,6 +2815,16 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None PartDeltaEvent( index=4, delta=ToolCallPartDelta(args_delta='025"}', tool_call_id='srvtoolu_01FDqc7ruGpVRoNuD5G6jkUx') ), + PartEndEvent( + index=4, + part=BuiltinToolCallPart( + tool_name='web_search', + args='{"query": "San Francisco weather September 16 2025"}', + tool_call_id='srvtoolu_01FDqc7ruGpVRoNuD5G6jkUx', + provider_name='anthropic', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=5, part=BuiltinToolReturnPart( @@ -2776,8 +2905,9 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None timestamp=IsDatetime(), provider_name='anthropic', ), + previous_part_kind='builtin-tool-call', ), - PartStartEvent(index=6, part=TextPart(content='Base')), + PartStartEvent(index=6, part=TextPart(content='Base'), previous_part_kind='builtin-tool-return'), PartDeltaEvent( index=6, delta=TextPartDelta( @@ -2795,7 +2925,27 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None ), ), PartDeltaEvent(index=6, delta=TextPartDelta(content_delta=" to AccuWeather's forecast, ")), - PartStartEvent(index=7, part=TextPart(content='today (September 16) shows a high of 76°F and low of 59°F')), + PartEndEvent( + index=6, + part=TextPart( + content="""\ +Based on the search results, I can provide you with information about San Francisco's weather today (September 16, 2025): + +According to AccuWeather's forecast, \ +""" + ), + next_part_kind='text', + ), + PartStartEvent( + index=7, + part=TextPart(content='today (September 16) shows a high of 76°F and low of 59°F'), + previous_part_kind='text', + ), + PartEndEvent( + index=7, + part=TextPart(content='today (September 16) shows a high of 76°F and low of 59°F'), + next_part_kind='text', + ), PartStartEvent( index=8, part=TextPart( @@ -2805,12 +2955,33 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None From the recent San\ """ ), + previous_part_kind='text', ), PartDeltaEvent(index=8, delta=TextPartDelta(content_delta=' Francisco Chronicle weather report, ')), + PartEndEvent( + index=8, + part=TextPart( + content="""\ + for San Francisco. + +From the recent San Francisco Chronicle weather report, \ +""" + ), + next_part_kind='text', + ), PartStartEvent( - index=9, part=TextPart(content='average mid-September highs in San Francisco are around 70 degrees') + index=9, + part=TextPart(content='average mid-September highs in San Francisco are around 70 degrees'), + previous_part_kind='text', + ), + PartEndEvent( + index=9, + part=TextPart(content='average mid-September highs in San Francisco are around 70 degrees'), + next_part_kind='text', + ), + PartStartEvent( + index=10, part=TextPart(content=", so today's forecast of 76°F is"), previous_part_kind='text' ), - PartStartEvent(index=10, part=TextPart(content=", so today's forecast of 76°F is")), PartDeltaEvent( index=10, delta=TextPartDelta( @@ -2830,13 +3001,33 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None """ ), ), + PartEndEvent( + index=10, + part=TextPart( + content="""\ +, so today's forecast of 76°F is slightly above the typical temperature for this time of year. + +The general weather pattern for San Francisco in September includes: +- \ +""" + ), + next_part_kind='text', + ), PartStartEvent( index=11, part=TextPart( content='Daytime temperatures usually reach 22°C (72°F) in San Francisco in September, falling to 13°C' ), + previous_part_kind='text', ), PartDeltaEvent(index=11, delta=TextPartDelta(content_delta=' (55°F) at night')), + PartEndEvent( + index=11, + part=TextPart( + content='Daytime temperatures usually reach 22°C (72°F) in San Francisco in September, falling to 13°C (55°F) at night' + ), + next_part_kind='text', + ), PartStartEvent( index=12, part=TextPart( @@ -2845,12 +3036,31 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None - \ """ ), + previous_part_kind='text', + ), + PartEndEvent( + index=12, + part=TextPart( + content="""\ + +- \ +""" + ), + next_part_kind='text', ), PartStartEvent( index=13, part=TextPart(content='There are normally 9 hours of bright sunshine each day in San Francisco in'), + previous_part_kind='text', ), PartDeltaEvent(index=13, delta=TextPartDelta(content_delta=' September')), + PartEndEvent( + index=13, + part=TextPart( + content='There are normally 9 hours of bright sunshine each day in San Francisco in September' + ), + next_part_kind='text', + ), PartStartEvent( index=14, part=TextPart( @@ -2859,15 +3069,34 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None - \ """ ), + previous_part_kind='text', + ), + PartEndEvent( + index=14, + part=TextPart( + content="""\ + +- \ +""" + ), + next_part_kind='text', ), PartStartEvent( index=15, part=TextPart( content='San Francisco experiences minimal rainfall in September, with an average precipitation of just 3mm.' ), + previous_part_kind='text', ), PartDeltaEvent(index=15, delta=TextPartDelta(content_delta=' Typically, there are no rainy days')), PartDeltaEvent(index=15, delta=TextPartDelta(content_delta=' during this month')), + PartEndEvent( + index=15, + part=TextPart( + content='San Francisco experiences minimal rainfall in September, with an average precipitation of just 3mm. Typically, there are no rainy days during this month' + ), + next_part_kind='text', + ), PartStartEvent( index=16, part=TextPart( @@ -2877,6 +3106,7 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None So for today, you can expect partly sunny to sunny skies with a\ """ ), + previous_part_kind='text', ), PartDeltaEvent(index=16, delta=TextPartDelta(content_delta=' high around 76°F (24°C)')), PartDeltaEvent(index=16, delta=TextPartDelta(content_delta=' and a low around 59°F (15°C),')), @@ -2884,6 +3114,16 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None PartDeltaEvent( index=16, delta=TextPartDelta(content_delta='aping up to be a pleasant day in San Francisco!') ), + PartEndEvent( + index=16, + part=TextPart( + content="""\ + + +So for today, you can expect partly sunny to sunny skies with a high around 76°F (24°C) and a low around 59°F (15°C), with very little chance of rain. It's shaping up to be a pleasant day in San Francisco!\ +""" + ), + ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='web_search', @@ -3420,13 +3660,43 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, provider_name='anthropic', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ +The user is asking me to calculate a mathematical expression: 65465-6544 * 65464-6+1.02255 + +This involves multiplication and subtraction operations, and I need to be careful about the order of operations (PEMDAS/BODMAS). Let me break this down: + +65465-6544 * 65464-6+1.02255 + +Following order of operations: +1. First, multiplication: 6544 * 65464 +2. Then left to right for addition and subtraction: 65465 - (result from step 1) - 6 + 1.02255 + +This is a computational task that requires precise calculations, so I should use the code_execution tool to get an accurate result.\ +""", + signature='EucFCkYIBxgCKkCfcR3zTiKFcMLhP1aMZu4l0cfgiw3ukkSHOSX2qV1DEKtpe3pu1HpRvDz1mEw32e/wvHoS/AfpVYk3AFb8oAscEgxips//IwdGKRINkQoaDDc122APa5lQXEtsuiIw7RQW/ow7z+MOXL6D8pAl4Iz5V6VSbn2A37DxwRbzOYHSicZuvVrhZHLmn2WWwTZjKs4EYn4HNPF6+Y+9dITwGBWUz6WXsOnv/S1sp+WJLYD8vGMDG9DzTIdjQ9pMN/Bg6VB3hPTveXqxopBk+V7u1WaQC0NmkEmREv6Pdq9iHHEnuIhN0t7UrrNDxPwt/cmbilfa7QL8ofeeSorIRwvibXtG0aqNDu42r6JkatwttDSRIBSqIgKLkel8yPP9ksmOf4SRbNAbgijmq63s+EIkNHt2yjuTHV48pR1j1czHWcsoqJOHj6faeXge0OyGKuPqbBCzoqAjecNq0dRfHQUgXMWmeaJp1R6iWhKxyJV5Y2EwhA5WGH9xzc9h0TobIgGFGAk2OvzDPBO5qr+O85LbjNeHF3WfZciaj2lMIVsveklN9S8598m+R+D4/O8Sscebc2xoVf8qBDazJP5gVtuMoAKBcJuNVWeTR5snv2vs5BEejv6Q2gcb6rPa4ZxEmilhK1NTy9+dwoYvgLUm5o11PBXbI7uRv18tLwwer55Ult5Aq3JgG8Uj8FgBA4exLCw9LKUhzd+1lN0i19f2mDDuBORw5dPUBj2unzIb6sro/2SYm3MF2nmKhh5mm1F/v37ksOzJlTUPhbcs6aYrUJo5cM1H9AB8vpcNln38uWb4tuFgD5Wqy/0WFu60nsRsnInI5SPMN39wA4cx2eyrCfne32iw0Ov+VAdn0+D8FFzyVEEh7lrCQlJFoqoznxvpKh6NRhUzLmLpfEPOhFN/bZBHsj+3YJLT4JgRaYGTf6fMkZGCyIk60hIbqofwcuMFNqFYOK0nffOV8dz9ElisN/6cSJsYAQ==', + provider_name='anthropic', + ), + next_part_kind='text', + ), PartStartEvent( - index=1, part=TextPart(content="I'll calculate this mathematical expression for you. Let me break") + index=1, + part=TextPart(content="I'll calculate this mathematical expression for you. Let me break"), + previous_part_kind='thinking', ), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent( index=1, delta=TextPartDelta(content_delta=' it down step by step following the order of operations.') ), + PartEndEvent( + index=1, + part=TextPart( + content="I'll calculate this mathematical expression for you. Let me break it down step by step following the order of operations." + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=2, part=BuiltinToolCallPart( @@ -3434,6 +3704,7 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, tool_call_id='srvtoolu_01MKwyo39KHRDr9Ubff5vWtG', provider_name='anthropic', ), + previous_part_kind='text', ), PartDeltaEvent( index=2, delta=ToolCallPartDelta(args_delta='', tool_call_id='srvtoolu_01MKwyo39KHRDr9Ubff5vWtG') @@ -3625,6 +3896,16 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, PartDeltaEvent( index=2, delta=ToolCallPartDelta(args_delta='"}', tool_call_id='srvtoolu_01MKwyo39KHRDr9Ubff5vWtG') ), + PartEndEvent( + index=2, + part=BuiltinToolCallPart( + tool_name='code_execution', + args='{"code": "# Calculate the expression: 65465-6544 * 65464-6+1.02255\\n# Following order of operations (PEMDAS/BODMAS)\\n\\nexpression = \\"65465-6544 * 65464-6+1.02255\\"\\nprint(f\\"Expression: {expression}\\")\\n\\n# Let\'s break it down step by step\\nstep1 = 6544 * 65464 # Multiplication first\\nprint(f\\"Step 1 - Multiplication: 6544 * 65464 = {step1}\\")\\n\\nstep2 = 65465 - step1 # First subtraction\\nprint(f\\"Step 2 - First subtraction: 65465 - {step1} = {step2}\\")\\n\\nstep3 = step2 - 6 # Second subtraction\\nprint(f\\"Step 3 - Second subtraction: {step2} - 6 = {step3}\\")\\n\\nfinal_result = step3 + 1.02255 # Final addition\\nprint(f\\"Step 4 - Final addition: {step3} + 1.02255 = {final_result}\\")\\n\\n# Let\'s also verify with direct calculation\\ndirect_result = 65465-6544 * 65464-6+1.02255\\nprint(f\\"\\\\nDirect calculation: {direct_result}\\")\\nprint(f\\"Results match: {final_result == direct_result}\\")"}', + tool_call_id='srvtoolu_01MKwyo39KHRDr9Ubff5vWtG', + provider_name='anthropic', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=3, part=BuiltinToolReturnPart( @@ -3649,8 +3930,9 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, timestamp=IsDatetime(), provider_name='anthropic', ), + previous_part_kind='builtin-tool-call', ), - PartStartEvent(index=4, part=TextPart(content='The answer to')), + PartStartEvent(index=4, part=TextPart(content='The answer to'), previous_part_kind='builtin-tool-return'), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' **65465-6544 * ')), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='65464-6+1.02255** is **')), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='-428,330,955.97745**.')), @@ -3697,6 +3979,20 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, ), ), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='1.02255 = -428,330,955.97745')), + PartEndEvent( + index=4, + part=TextPart( + content="""\ +The answer to **65465-6544 * 65464-6+1.02255** is **-428,330,955.97745**. + +Here's how it breaks down following the order of operations: +1. First, multiplication: 6,544 × 65,464 = 428,396,416 +2. Then left to right: 65,465 - 428,396,416 = -428,330,951 +3. Continue: -428,330,951 - 6 = -428,330,957 +4. Finally: -428,330,957 + 1.02255 = -428,330,955.97745\ +""" + ), + ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='code_execution', @@ -4398,6 +4694,16 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth index=0, delta=ToolCallPartDelta(args_delta='s today"}', tool_call_id='srvtoolu_01NcU4XNwyxWK6a9tcJZ8wGY'), ), + PartEndEvent( + index=0, + part=BuiltinToolCallPart( + tool_name='web_search', + args='{"query": "top world news today"}', + tool_call_id='srvtoolu_01NcU4XNwyxWK6a9tcJZ8wGY', + provider_name='anthropic', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=1, part=BuiltinToolReturnPart( @@ -4478,15 +4784,28 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth timestamp=IsDatetime(), provider_name='anthropic', ), + previous_part_kind='builtin-tool-call', + ), + PartStartEvent( + index=2, + part=TextPart(content='Let me search for more specific breaking'), + previous_part_kind='builtin-tool-return', ), - PartStartEvent(index=2, part=TextPart(content='Let me search for more specific breaking')), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=' news stories to get clearer headlines.')), + PartEndEvent( + index=2, + part=TextPart( + content='Let me search for more specific breaking news stories to get clearer headlines.' + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=3, part=BuiltinToolCallPart( tool_name='web_search', tool_call_id='srvtoolu_01WiP3ZfXZXSykVQEL78XJ4T', provider_name='anthropic' ), + previous_part_kind='text', ), PartDeltaEvent( index=3, delta=ToolCallPartDelta(args_delta='', tool_call_id='srvtoolu_01WiP3ZfXZXSykVQEL78XJ4T') @@ -4515,6 +4834,16 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth PartDeltaEvent( index=3, delta=ToolCallPartDelta(args_delta='"}', tool_call_id='srvtoolu_01WiP3ZfXZXSykVQEL78XJ4T') ), + PartEndEvent( + index=3, + part=BuiltinToolCallPart( + tool_name='web_search', + args='{"query": "breaking news headlines August 14 2025"}', + tool_call_id='srvtoolu_01WiP3ZfXZXSykVQEL78XJ4T', + provider_name='anthropic', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=4, part=BuiltinToolReturnPart( @@ -4595,8 +4924,9 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth timestamp=IsDatetime(), provider_name='anthropic', ), + previous_part_kind='builtin-tool-call', ), - PartStartEvent(index=5, part=TextPart(content='Base')), + PartStartEvent(index=5, part=TextPart(content='Base'), previous_part_kind='builtin-tool-return'), PartDeltaEvent( index=5, delta=TextPartDelta(content_delta='d on the search results, I can identify the top') ), @@ -4623,27 +4953,70 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth ), PartDeltaEvent(index=5, delta=TextPartDelta(content_delta='1. Trump-Putin Summit and Ukraine Crisis')), PartDeltaEvent(index=5, delta=TextPartDelta(content_delta='**\n')), + PartEndEvent( + index=5, + part=TextPart( + content="""\ +Based on the search results, I can identify the top 3 major news stories from around the world today (August 14, 2025): + +## Top 3 World News Stories Today + +**1. Trump-Putin Summit and Ukraine Crisis** +""" + ), + next_part_kind='text', + ), PartStartEvent( index=6, part=TextPart( content='European leaders held a high-stakes meeting Wednesday with President Trump, Vice President Vance, Ukraine' ), + previous_part_kind='text', ), PartDeltaEvent(index=6, delta=TextPartDelta(content_delta="'s Volodymyr Zel")), PartDeltaEvent(index=6, delta=TextPartDelta(content_delta="enskyy and NATO's chief ahea")), PartDeltaEvent(index=6, delta=TextPartDelta(content_delta="d of Friday's U.S.-")), PartDeltaEvent(index=6, delta=TextPartDelta(content_delta='Russia summit')), - PartStartEvent(index=7, part=TextPart(content='. ')), - PartStartEvent(index=8, part=TextPart(content='The White House lowered its expectations surrounding')), + PartEndEvent( + index=6, + part=TextPart( + content="European leaders held a high-stakes meeting Wednesday with President Trump, Vice President Vance, Ukraine's Volodymyr Zelenskyy and NATO's chief ahead of Friday's U.S.-Russia summit" + ), + next_part_kind='text', + ), + PartStartEvent(index=7, part=TextPart(content='. '), previous_part_kind='text'), + PartEndEvent(index=7, part=TextPart(content='. '), next_part_kind='text'), + PartStartEvent( + index=8, + part=TextPart(content='The White House lowered its expectations surrounding'), + previous_part_kind='text', + ), PartDeltaEvent(index=8, delta=TextPartDelta(content_delta=' the Trump-Putin summit on Friday')), - PartStartEvent(index=9, part=TextPart(content='. ')), + PartEndEvent( + index=8, + part=TextPart( + content='The White House lowered its expectations surrounding the Trump-Putin summit on Friday' + ), + next_part_kind='text', + ), + PartStartEvent(index=9, part=TextPart(content='. '), previous_part_kind='text'), + PartEndEvent(index=9, part=TextPart(content='. '), next_part_kind='text'), PartStartEvent( - index=10, part=TextPart(content='In a surprise move just days before the Trump-Putin summit') + index=10, + part=TextPart(content='In a surprise move just days before the Trump-Putin summit'), + previous_part_kind='text', ), PartDeltaEvent(index=10, delta=TextPartDelta(content_delta=', the White House swapped out pro')), PartDeltaEvent(index=10, delta=TextPartDelta(content_delta="-EU PM Tusk for Poland's new president –")), PartDeltaEvent(index=10, delta=TextPartDelta(content_delta=" a political ally who once opposed Ukraine's")), PartDeltaEvent(index=10, delta=TextPartDelta(content_delta=' NATO and EU bids')), + PartEndEvent( + index=10, + part=TextPart( + content="In a surprise move just days before the Trump-Putin summit, the White House swapped out pro-EU PM Tusk for Poland's new president – a political ally who once opposed Ukraine's NATO and EU bids" + ), + next_part_kind='text', + ), PartStartEvent( index=11, part=TextPart( @@ -4653,33 +5026,72 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth **2. Trump's Federal Takeover of Washington D\ """ ), + previous_part_kind='text', ), PartDeltaEvent(index=11, delta=TextPartDelta(content_delta='.C.**')), PartDeltaEvent(index=11, delta=TextPartDelta(content_delta='\n')), + PartEndEvent( + index=11, + part=TextPart( + content="""\ +. + +**2. Trump's Federal Takeover of Washington D.C.** +""" + ), + next_part_kind='text', + ), PartStartEvent( index=12, part=TextPart( content="Federal law enforcement's presence in Washington, DC, continued to be felt Wednesday as President Donald Trump's tak" ), + previous_part_kind='text', ), PartDeltaEvent(index=12, delta=TextPartDelta(content_delta="eover of the city's police entered its thir")), PartDeltaEvent(index=12, delta=TextPartDelta(content_delta='d night')), - PartStartEvent(index=13, part=TextPart(content='. ')), + PartEndEvent( + index=12, + part=TextPart( + content="Federal law enforcement's presence in Washington, DC, continued to be felt Wednesday as President Donald Trump's takeover of the city's police entered its third night" + ), + next_part_kind='text', + ), + PartStartEvent(index=13, part=TextPart(content='. '), previous_part_kind='text'), + PartEndEvent(index=13, part=TextPart(content='. '), next_part_kind='text'), PartStartEvent( index=14, part=TextPart( content="National Guard troops arrived in Washington, D.C., following President Trump's deployment an" ), + previous_part_kind='text', ), PartDeltaEvent( index=14, delta=TextPartDelta(content_delta='d federalization of local police to crack down on crime') ), PartDeltaEvent(index=14, delta=TextPartDelta(content_delta=" in the nation's capital")), - PartStartEvent(index=15, part=TextPart(content='. ')), + PartEndEvent( + index=14, + part=TextPart( + content="National Guard troops arrived in Washington, D.C., following President Trump's deployment and federalization of local police to crack down on crime in the nation's capital" + ), + next_part_kind='text', + ), + PartStartEvent(index=15, part=TextPart(content='. '), previous_part_kind='text'), + PartEndEvent(index=15, part=TextPart(content='. '), next_part_kind='text'), PartStartEvent( - index=16, part=TextPart(content='Over 100 arrests made as National Guard rolls into DC under') + index=16, + part=TextPart(content='Over 100 arrests made as National Guard rolls into DC under'), + previous_part_kind='text', ), PartDeltaEvent(index=16, delta=TextPartDelta(content_delta=" Trump's federal takeover")), + PartEndEvent( + index=16, + part=TextPart( + content="Over 100 arrests made as National Guard rolls into DC under Trump's federal takeover" + ), + next_part_kind='text', + ), PartStartEvent( index=17, part=TextPart( @@ -4689,21 +5101,43 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth **3. Air\ """ ), + previous_part_kind='text', ), PartDeltaEvent(index=17, delta=TextPartDelta(content_delta=' Canada Flight Disruption')), PartDeltaEvent(index=17, delta=TextPartDelta(content_delta='**\n')), + PartEndEvent( + index=17, + part=TextPart( + content="""\ +. + +**3. Air Canada Flight Disruption** +""" + ), + next_part_kind='text', + ), PartStartEvent( index=18, part=TextPart( content='Air Canada plans to lock out its flight attendants and cancel all flights starting this weekend' ), + previous_part_kind='text', ), - PartStartEvent(index=19, part=TextPart(content='. ')), + PartEndEvent( + index=18, + part=TextPart( + content='Air Canada plans to lock out its flight attendants and cancel all flights starting this weekend' + ), + next_part_kind='text', + ), + PartStartEvent(index=19, part=TextPart(content='. '), previous_part_kind='text'), + PartEndEvent(index=19, part=TextPart(content='. '), next_part_kind='text'), PartStartEvent( index=20, part=TextPart( content='Air Canada says it will begin cancelling flights starting Thursday to allow an orderly shutdown of operations' ), + previous_part_kind='text', ), PartDeltaEvent( index=20, @@ -4715,6 +5149,13 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth index=20, delta=TextPartDelta(content_delta=' Saturday as it faces a potential work stoppage by') ), PartDeltaEvent(index=20, delta=TextPartDelta(content_delta=' its flight attendants')), + PartEndEvent( + index=20, + part=TextPart( + content="Air Canada says it will begin cancelling flights starting Thursday to allow an orderly shutdown of operations with a complete cessation of flights for the country's largest airline by Saturday as it faces a potential work stoppage by its flight attendants" + ), + next_part_kind='text', + ), PartStartEvent( index=21, part=TextPart( @@ -4724,9 +5165,20 @@ async def test_anthropic_web_search_tool_stream(allow_model_requests: None, anth These stories represent major international diplomatic developments, significant domestic policy\ """ ), + previous_part_kind='text', ), PartDeltaEvent(index=21, delta=TextPartDelta(content_delta=' changes in the US, and major transportation')), PartDeltaEvent(index=21, delta=TextPartDelta(content_delta=' disruptions affecting North America.')), + PartEndEvent( + index=21, + part=TextPart( + content="""\ +. + +These stories represent major international diplomatic developments, significant domestic policy changes in the US, and major transportation disruptions affecting North America.\ +""" + ), + ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='web_search', @@ -4940,16 +5392,21 @@ async def test_anthropic_text_parts_ahead_of_built_in_tool_call(allow_model_requ '', '', '', + '', + '', 'Here', "Here's one notable historical event that occurred on September", "Here's one notable historical event that occurred on September 18th: ", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marke", + "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marke", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", + "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", + "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", ] ) diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index 831a86c421..8f60786f03 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -17,6 +17,7 @@ ModelRequest, ModelResponse, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, SystemPromptPart, @@ -416,13 +417,28 @@ async def get_temperature(city: str) -> str: PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' in Paris.\n')), + PartEndEvent( + index=0, + part=TextPart( + content=' To find the temperature of the capital of France, I need to first determine the capital of France and then get the current temperature in that city. The capital of France is Paris. I will use the "get_temperature" tool to find the current temperature in Paris.\n' + ), + next_part_kind='tool-call', + ), PartStartEvent( - index=1, part=ToolCallPart(tool_name='get_temperature', tool_call_id='tooluse_lAG_zP8QRHmSYOwZzzaCqA') + index=1, + part=ToolCallPart(tool_name='get_temperature', tool_call_id='tooluse_lAG_zP8QRHmSYOwZzzaCqA'), + previous_part_kind='text', ), PartDeltaEvent( index=1, delta=ToolCallPartDelta(args_delta='{"city":"Paris"}', tool_call_id='tooluse_lAG_zP8QRHmSYOwZzzaCqA'), ), + PartEndEvent( + index=1, + part=ToolCallPart( + tool_name='get_temperature', args='{"city":"Paris"}', tool_call_id='tooluse_lAG_zP8QRHmSYOwZzzaCqA' + ), + ), IsInstance(FunctionToolCallEvent), FunctionToolResultEvent( result=ToolReturnPart( @@ -438,6 +454,9 @@ async def get_temperature(city: str) -> str: PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' capital of France,')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' is 30°C')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='.')), + PartEndEvent( + index=0, part=TextPart(content='The current temperature in Paris, the capital of France, is 30°C.') + ), ] ) @@ -881,16 +900,41 @@ async def test_bedrock_model_thinking_part_redacted_stream( provider_name='bedrock', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content='', + id='redacted_content', + signature='EtkECkgIBxABGAIqQJTfqS/PYuAFZeOls6R8uGN014YNT7YDIFuhNyywoX1Cjf9oIYThX1ucUFJ1cfckdN55jozmXi1PEgMfufPmD44SDHBw8Yp6gJ8Ys/Gt3BoMYdLaNUOqr7k/MAeYIjBhPIc9z85HrJAbeS8Hz/69R+vKHpRanI0n/B69dnv2nebRe7LKZgHs2AlVPEtNyyoqvgP463qJ7/KvDrAPSnhHQqZ8TH8JBC4eYb4Qow5eX7dI3UXY/DrQ2IOWLADJqshcXBg7zbN78H4l6fTP97Ztzz0qw4fadTzTb36dRR7p8rs2zA/pHWhK+75xvUGh8IdLPvMikKccHssHKdceru4JLG1cMVtq1Ci7ZPAbHRU8/XsjFtLWPHeYLfKGJN33C1MpWX5nQU2BjYICs5Hn+8Z9Smxhp06rZXTjZARiExrd1dgLn5/5PbEzMLJv/Q3c6XJH7kx7iUO4NAonTT1Q3WY1cGa38UNGYuTUae3CNFEZWjS21tWRmjX4t5w8L0BtQ5DSaW/ZzGf0yzUKUaS/fkVjr2xztQBvysFFbb7UrX+/lNw26CHXKUIXFcZzV9l0HrA6z3oQrqSpnwem/pt/Cxdh5YQlXq6DSdzstqwJA53n9Hj3osjT/viH4Y6N5dWLLBTQBvhUEy24FhlytD3scYrvAqCdxW9aDSW+e+Foj5vsjVA9VFrXqZeNSO77Qp5dLw5XcA8CH6YFTE6EWeFTki5vfTfSIw+m4inZGVzIRi8Qk90IzW2EnrxGtx3wsEn5XImQr1vg1Npq2jN6uiOPOp8nsBgB', + provider_name='bedrock', + ), + next_part_kind='thinking', + ), PartStartEvent( index=1, part=ThinkingPart( content='', id='redacted_content', - signature=IsStr(), + signature='EqADCkgIBxABGAIqQB3h5GyHJD4hocRchUq2I40ChRLdpxjVl0xZkyVZrrk6JIJWeInuRQfJG5nJymmQBjH9VDeV53H/D3W9xjIJvPUSDLv7jRCF9b6Tx1Z5EBoMSv3CBw4zUjjSDaqlIjDBpH7V3YQB5twUmulAycDyZRvP3loupy6o2eqrfKAZZjq3rwkApWD9qOqJD3OEfd4qhQJZfOcHs9bt5zCqzYjoaIkxE3raXnhUHOlwq1Jq60bTQt2SQiHqoZTEht/DeDEEgpFy9Z32Zz3/Az0ORgTi3QE56K15OXo6GWMPYq/CTJ/xzPXfH0/yoQ4EP103VfVqvymEpXUru6RQGkou41LKRI92fRsqCK+jPOpxeED4kz7CFhQYMHttk7cOAF85SE3nCcpliARrLDvsApjgMFAYnineZQMLwawmnIm6EB61C20dB1Ft7vLG1TS6fn27EB8JZjr/jeC8O4ZysKv5iUxpMlDZib8jFszfzxCXdFX7NVKO9+dH8cW3RsJ80kzBp6xyoQhXSFx72jFllwDy8e+QlI3OIhweJ8IYAQ==', + provider_name='bedrock', + ), + previous_part_kind='thinking', + ), + PartEndEvent( + index=1, + part=ThinkingPart( + content='', + id='redacted_content', + signature='EqADCkgIBxABGAIqQB3h5GyHJD4hocRchUq2I40ChRLdpxjVl0xZkyVZrrk6JIJWeInuRQfJG5nJymmQBjH9VDeV53H/D3W9xjIJvPUSDLv7jRCF9b6Tx1Z5EBoMSv3CBw4zUjjSDaqlIjDBpH7V3YQB5twUmulAycDyZRvP3loupy6o2eqrfKAZZjq3rwkApWD9qOqJD3OEfd4qhQJZfOcHs9bt5zCqzYjoaIkxE3raXnhUHOlwq1Jq60bTQt2SQiHqoZTEht/DeDEEgpFy9Z32Zz3/Az0ORgTi3QE56K15OXo6GWMPYq/CTJ/xzPXfH0/yoQ4EP103VfVqvymEpXUru6RQGkou41LKRI92fRsqCK+jPOpxeED4kz7CFhQYMHttk7cOAF85SE3nCcpliARrLDvsApjgMFAYnineZQMLwawmnIm6EB61C20dB1Ft7vLG1TS6fn27EB8JZjr/jeC8O4ZysKv5iUxpMlDZib8jFszfzxCXdFX7NVKO9+dH8cW3RsJ80kzBp6xyoQhXSFx72jFllwDy8e+QlI3OIhweJ8IYAQ==', provider_name='bedrock', ), + next_part_kind='text', + ), + PartStartEvent( + index=2, + part=TextPart(content="I notice you've sent what appears to be some"), + previous_part_kind='thinking', ), - PartStartEvent(index=2, part=TextPart(content="I notice you've sent what appears to be some")), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=' kind of command or trigger string, but I don')), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta="'t respond to special codes or")), @@ -914,6 +958,16 @@ async def test_bedrock_model_thinking_part_redacted_stream( index=2, delta=TextPartDelta(content_delta=' a straightforward conversation. What would you like to') ), PartDeltaEvent(index=2, delta=TextPartDelta(content_delta=' talk about today?')), + PartEndEvent( + index=2, + part=TextPart( + content="""\ +I notice you've sent what appears to be some kind of command or trigger string, but I don't respond to special codes or triggers. That string doesn't have any special meaning to me. + +If you have a question you'd like to discuss or need assistance with something, I'd be happy to help in a straightforward conversation. What would you like to talk about today?\ +""" + ), + ), ] ) @@ -1121,12 +1175,22 @@ async def test_bedrock_model_thinking_part_stream(allow_model_requests: None, be PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' how I can help')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' them today.')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(signature_delta=IsStr(), provider_name='bedrock')), - PartStartEvent(index=1, part=TextPart(content='Hello! It')), + PartEndEvent( + index=0, + part=ThinkingPart( + content='The user has greeted me with a simple "Hello". I should respond in a friendly and welcoming manner. This is a straightforward greeting, so I\'ll respond warmly and ask how I can help them today.', + signature='Eu0CCkgIBxABGAIqQJDccbDQkr81n7QjZ0Fi43umSvw0YvnGkMPEpaGAa2btYHyWw06KhwckvsnKzpKcxiRJT35meoG4/pdrTUiy2UISDPDaEWfOl3+HlRVsCxoMzfiqBp252RMvpmEyIjCbQ97Ac9Epkr5mgxeu1vGtJg+fDWIg0UnpMM8NYknhhvJmsXpYrfquwGL1ZnlBslUq0gHtbAAPwlWPmiQXU7gDQCDW9IdMVyw42b4f5MrAlpWkPWOJc9H+yYv0TpP/jY72SD1opqwkWnBgkzbi7A2jPmEFzIMQSO1KDXha5ADqQ3cLYMmVdNTSH9wlM7G7/JJ2/cqowqkwD6/q1AnYzcPte9iC67fY1LYN0NMCOSABFojP1rmkv9YBEulx5Y6eQpeVXBQiIqcGoCmWSumpGBskS1KxGerUmzUB0JmJnTENv4x3fSGSUSEPqMiz6Ebao8sVkb1wCWuZEXWJGtiQLMIm1o471iEYAQ==', + provider_name='bedrock', + ), + next_part_kind='text', + ), + PartStartEvent(index=1, part=TextPart(content='Hello! It'), previous_part_kind='thinking'), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta="'s nice")), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' to meet you.')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' How can I help')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' you today?')), + PartEndEvent(index=1, part=TextPart(content="Hello! It's nice to meet you. How can I help you today?")), ] ) assert agent_run.result is not None diff --git a/tests/models/test_deepseek.py b/tests/models/test_deepseek.py index b38080ca23..0a1d19dab0 100644 --- a/tests/models/test_deepseek.py +++ b/tests/models/test_deepseek.py @@ -3,7 +3,6 @@ from typing import Any import pytest -from dirty_equals import IsListOrTuple from inline_snapshot import snapshot from pydantic_ai import ( @@ -12,6 +11,7 @@ ModelRequest, ModelResponse, PartDeltaEvent, + PartEndEvent, PartStartEvent, TextPart, TextPartDelta, @@ -79,19 +79,274 @@ async def test_deepseek_model_thinking_stream(allow_model_requests: None, deepse async for event in request_stream: event_parts.append(event) - assert event_parts == IsListOrTuple( - positions={ - 0: snapshot( - PartStartEvent( - index=0, part=ThinkingPart(content='H', id='reasoning_content', provider_name='deepseek') - ) + assert event_parts == snapshot( + [ + PartStartEvent(index=0, part=ThinkingPart(content='H', id='reasoning_content', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='mm', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' user', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' just', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' said', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' "', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Hello', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='".', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' It', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'s", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' simple', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' greeting', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' but', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' wonder', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' if', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' there', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'s", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' more', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' it', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent( + index=0, + delta=ThinkingPartDelta( + content_delta="""\ + \n\ + +""", + provider_name='deepseek', + ), + ), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='The', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' message', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' very', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' brief', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' so', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' don', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'t", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' have', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' much', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' context', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' work', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Maybe', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' they', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'re", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' just', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' testing', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' if', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'m", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' responsive', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' perhaps', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' they', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'re", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' new', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' chatting', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' AI', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent( + index=0, + delta=ThinkingPartDelta( + content_delta="""\ + \n\ + +""", + provider_name='deepseek', + ), ), - 1: snapshot(PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='mm', provider_name='deepseek'))), - 2: snapshot(PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek'))), - 198: snapshot(PartStartEvent(index=1, part=TextPart(content='Hello'))), - 199: snapshot(FinalResultEvent(tool_name=None, tool_call_id=None)), - 200: snapshot(PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' there'))), - 201: snapshot(PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='!'))), - }, - length=211, + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' should', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' keep', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' my', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' reply', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' warm', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' inviting', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' encourage', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' further', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' conversation', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' A', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' smile', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='y', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' face', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' would', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' help', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' make', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' it', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' friendly', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Since', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' they', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' didn', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'t", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' specify', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' need', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'ll", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' leave', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' it', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' open', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-ended', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' by', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' asking', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' how', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' can', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' help', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' them', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' today', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent( + index=0, + delta=ThinkingPartDelta( + content_delta="""\ + \n\ + +""", + provider_name='deepseek', + ), + ), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='The', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' tone', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' should', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' be', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cheerful', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' but', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' professional', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' -', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' not', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' too', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' stiff', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' not', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' too', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' casual', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' "', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Hello', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' there', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='!"', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' feels', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' right', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' start', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Adding', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' "', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='What', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' can', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' do', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' you', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' today', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='?"', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' turns', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' it', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' into', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' an', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' invitation', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' rather', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' than', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' just', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' mirror', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='ing', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' their', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' greeting', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent( + index=0, + delta=ThinkingPartDelta( + content_delta="""\ + \n\ + +""", + provider_name='deepseek', + ), + ), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='I', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'ll", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' avoid', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' assumptions', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' about', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' their', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' gender', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' location', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' intent', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' since', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' there', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'s", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' zero', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' information', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' If', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' they', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'re", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' just', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' being', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' polite', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' they', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' might', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' not', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' reply', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' further', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' -', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' that', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'s", provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' okay', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' too', provider_name='deepseek')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.', provider_name='deepseek')), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ +Hmm, the user just said "Hello". It's a simple greeting but I wonder if there's more to it. \n\ + +The message is very brief, so I don't have much context to work with. Maybe they're just testing if I'm responsive, or perhaps they're new to chatting with AI. \n\ + +I should keep my reply warm and inviting to encourage further conversation. A smiley face would help make it friendly. Since they didn't specify a need, I'll leave it open-ended by asking how I can help them today. \n\ + +The tone should be cheerful but professional - not too stiff, not too casual. "Hello there!" feels right for a start. Adding "What can I do for you today?" turns it into an invitation rather than just mirroring their greeting. \n\ + +I'll avoid assumptions about their gender, location, or intent since there's zero information. If they're just being polite, they might not reply further - and that's okay too.\ +""", + id='reasoning_content', + provider_name='deepseek', + ), + next_part_kind='text', + ), + PartStartEvent(index=1, part=TextPart(content='Hello'), previous_part_kind='thinking'), + FinalResultEvent(tool_name=None, tool_call_id=None), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' there')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='!')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' 😊')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' How')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' can')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' I')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' help')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' you')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' today')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='?')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='')), + PartEndEvent(index=1, part=TextPart(content='Hello there! 😊 How can I help you today?')), + ] ) diff --git a/tests/models/test_fallback.py b/tests/models/test_fallback.py index 30370fda86..7b10943b96 100644 --- a/tests/models/test_fallback.py +++ b/tests/models/test_fallback.py @@ -17,7 +17,7 @@ from pydantic_ai.settings import ModelSettings from pydantic_ai.usage import RequestUsage -from ..conftest import IsNow, try_import +from ..conftest import IsDatetime, IsNow, try_import if sys.version_info < (3, 11): from exceptiongroup import ExceptionGroup as ExceptionGroup # pragma: lax no cover @@ -218,6 +218,12 @@ async def test_first_failed_instrumented_stream(capfire: CaptureLogfire) -> None model_name='function::success_response_stream', timestamp=IsNow(tz=timezone.utc), ), + ModelResponse( + parts=[TextPart(content='hello world')], + usage=RequestUsage(input_tokens=50, output_tokens=2), + model_name='function::success_response_stream', + timestamp=IsDatetime(), + ), ] ) assert result.is_complete @@ -449,6 +455,12 @@ async def test_first_success_streaming() -> None: model_name='function::success_response_stream', timestamp=IsNow(tz=timezone.utc), ), + ModelResponse( + parts=[TextPart(content='hello world')], + usage=RequestUsage(input_tokens=50, output_tokens=2), + model_name='function::success_response_stream', + timestamp=IsDatetime(), + ), ] ) assert result.is_complete @@ -478,6 +490,12 @@ async def test_first_failed_streaming() -> None: model_name='function::success_response_stream', timestamp=IsNow(tz=timezone.utc), ), + ModelResponse( + parts=[TextPart(content='hello world')], + usage=RequestUsage(input_tokens=50, output_tokens=2), + model_name='function::success_response_stream', + timestamp=IsDatetime(), + ), ] ) assert result.is_complete diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index b4512ff960..1a7308cac1 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -825,6 +825,7 @@ async def test_stream_text(get_gemini_client: GetGeminiClient): # This last value is repeated due to the debounce_by=None combined with the need to emit # a final empty chunk to signal the end of the stream 'Hello world', + 'Hello world', ] ) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) @@ -863,7 +864,7 @@ async def test_stream_invalid_unicode_text(get_gemini_client: GetGeminiClient): async with agent.run_stream('Hello') as result: chunks = [chunk async for chunk in result.stream_output(debounce_by=None)] - assert chunks == snapshot(['abc', 'abc€def', 'abc€def']) + assert chunks == snapshot(['abc', 'abc€def', 'abc€def', 'abc€def']) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) @@ -893,7 +894,7 @@ async def test_stream_structured(get_gemini_client: GetGeminiClient): async with agent.run_stream('Hello') as result: chunks = [chunk async for chunk in result.stream_output(debounce_by=None)] - assert chunks == snapshot([(1, 2), (1, 2)]) + assert chunks == snapshot([(1, 2), (1, 2), (1, 2)]) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) diff --git a/tests/models/test_google.py b/tests/models/test_google.py index ce461e2810..6025598578 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -26,6 +26,7 @@ ModelRequest, ModelResponse, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, SystemPromptPart, @@ -380,6 +381,23 @@ async def test_google_model_builtin_code_execution_stream( provider_name='google-gla', ), ), + PartEndEvent( + index=0, + part=BuiltinToolCallPart( + tool_name='code_execution', + args={ + 'code': """\ + result = 65465 - 6544 * 65464 - 6 + 1.02255 + print(result) + \ +""", + 'language': 'PYTHON', + }, + tool_call_id=IsStr(), + provider_name='google-gla', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=1, part=BuiltinToolReturnPart( @@ -389,6 +407,7 @@ async def test_google_model_builtin_code_execution_stream( timestamp=IsDatetime(), provider_name='google-gla', ), + previous_part_kind='builtin-tool-call', ), PartStartEvent( index=2, @@ -405,6 +424,24 @@ async def test_google_model_builtin_code_execution_stream( tool_call_id=IsStr(), provider_name='google-gla', ), + previous_part_kind='builtin-tool-return', + ), + PartEndEvent( + index=2, + part=BuiltinToolCallPart( + tool_name='code_execution', + args={ + 'code': """\ +# Calculate the expression 65465-6544 * 65464-6+1.02255 +result = 65465 - 6544 * 65464 - 6 + 1.02255 +print(result)\ +""", + 'language': 'PYTHON', + }, + tool_call_id=IsStr(), + provider_name='google-gla', + ), + next_part_kind='builtin-tool-return', ), PartStartEvent( index=3, @@ -415,11 +452,13 @@ async def test_google_model_builtin_code_execution_stream( timestamp=IsDatetime(), provider_name='google-gla', ), + previous_part_kind='builtin-tool-call', ), - PartStartEvent(index=4, part=TextPart(content='The result is')), + PartStartEvent(index=4, part=TextPart(content='The result is'), previous_part_kind='builtin-tool-return'), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' -428,330,955.977')), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='45.')), + PartEndEvent(index=4, part=TextPart(content='The result is -428,330,955.97745.')), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='code_execution', @@ -625,6 +664,14 @@ async def get_temperature(city: str) -> str: index=0, part=ToolCallPart(tool_name='get_capital', args={'country': 'France'}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart( + tool_name='get_capital', + args={'country': 'France'}, + tool_call_id=IsStr(), + ), + ), IsInstance(FunctionToolCallEvent), FunctionToolResultEvent( result=ToolReturnPart( @@ -635,6 +682,14 @@ async def get_temperature(city: str) -> str: index=0, part=ToolCallPart(tool_name='get_temperature', args={'city': 'Paris'}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart( + tool_name='get_temperature', + args={'city': 'Paris'}, + tool_call_id=IsStr(), + ), + ), IsInstance(FunctionToolCallEvent), FunctionToolResultEvent( result=ToolReturnPart( @@ -644,6 +699,7 @@ async def get_temperature(city: str) -> str: PartStartEvent(index=0, part=TextPart(content='The temperature in Paris')), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' is 30°C.\n')), + PartEndEvent(index=0, part=TextPart(content='The temperature in Paris is 30°C.\n')), ] ) @@ -1156,6 +1212,22 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g index=0, delta=TextPartDelta(content_delta=' the evening. The chance of rain remains low throughout the day.'), ), + PartEndEvent( + index=0, + part=TextPart( + content="""\ +### Weather in San Francisco is Mild and Partly Cloudy Today + +**San Francisco, CA** - Today's weather in San Francisco is partly cloudy with temperatures ranging from the high 50s to the low 80s, according to various weather reports. + +As of Tuesday afternoon, the temperature is around 69°F (21°C), with a real feel of about 76°F (24°C) and humidity at approximately 68%. Another report indicates a temperature of 68°F with passing clouds. There is a very low chance of rain throughout the day. + +The forecast for the remainder of the day predicts sunny skies with highs ranging from the mid-60s to the lower 80s. Some sources suggest the high could reach up to 85°F. Tonight, the weather is expected to be partly cloudy with lows in the upper 50s. + +Hourly forecasts show temperatures remaining in the low 70s during the afternoon before gradually cooling down in the evening. The chance of rain remains low throughout the day.\ +""" + ), + ), ] ) @@ -1767,7 +1839,43 @@ def dummy() -> None: ... # pragma: no cover PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=IsStr())), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=IsStr())), PartDeltaEvent(index=0, delta=ThinkingPartDelta(signature_delta=IsStr(), provider_name='google-gla')), - PartStartEvent(index=1, part=TextPart(content=IsStr())), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ +**Clarifying User Goals** + +I'm currently focused on defining the user's ultimate goal: ensuring their safety while crossing the street. I've pinpointed that this is a real-world scenario with significant safety considerations. However, I'm also mindful of my limitations as an AI and my inability to physically assist or visually assess the situation. + + +**Developing a Safety Protocol** + +I'm now formulating a comprehensive safety procedure. I've pinpointed the essential first step: finding a safe crossing location, such as marked crosswalks or intersections. Stopping at the curb, and looking and listening for traffic are vital too. The rationale behind "look left, right, then left again" now needs further exploration. I'm focusing on crafting universally applicable and secure steps. + + +**Prioritizing Safe Crossing** + +I've revised the procedure's initial step, emphasizing safe crossing zones (crosswalks, intersections). Next, I'm integrating the "look left, right, then left" sequence, considering why it's repeated. I'm focusing on crafting universal, safety-focused instructions that suit diverse situations and address my inherent limitations. + + +**Crafting Safe Instructions** + +I've identified the core user intent: to learn safe street-crossing. Now, I'm focusing on crafting universally applicable steps. Finding safe crossing locations and looking-listening for traffic remain paramount. I'm prioritizing direct, clear language, addressing my limitations as an AI. I'm crafting advice that works generally, regardless of specific circumstances or locations. + + +""", + signature='CiIB0e2Kb6Syj1a961EfbWv4W5C8RgAA/hGleV9VYJtnJFh4CmkB0e2Kb2qMva5NvDLuUvN8VpUjtONdaccbsRQ79+XvVh1AFoHjMdZAETCTSMzbyNktSx0w0C4lJFdld7kI+5ebYSU7ohQP0bDh4gC2w/yL8P7jC2EsgTI4V81lh0geK/9ktUxg6zkbP+oKfQHR7Ypv9395FWZW4+/829hMAush43zw0QshgLy6gCngMYKmJrtYtvjZ2FP5xIvfU/PPHfldzCim2+UQKze4+cLUk/bFJc5W3G5s5bIq/ERKUf5W1Wj62ZLqlu8AI1K+XRQh80EHvayt1im86y2goz+a/+5OsUTwkGpS/6UPCpkBAdHtim8jtCeEvH7amxWDHJTFu6fBt+wX03WIl/Dsn1uTOL9MHR4x5L1AOm+45iJlxdoGIlXtR5bijCGoRpOQVc7WNT9Dt9q0FYEycA85mum+GxJBN9/yug6ULAxmQ55TFNaAwqveUoB2WOj0l4aYPFxZKnBRXoWkiUDmkYBqWg0/JpJVLG/Lh4oQx9DGXpSA7sHsFXO/0J7TCqkBAdHtim/2mGLbQSFLeexCigBRypMkOioMaTMH/brwjRBwzqu1oOqiFjoC1hX1KEehhWRUvL5ytBF3hmtadCs5yRUAcaClTylOT7ac9o9X2Zew5PdlV3uJhQJyclrZq7v3/T7FpzNxtXnW04nyyN1xTOhhnQreeQktmeOG7/eTpZfQbkauZ4ktcTWVQrN1cqUMmcLRhATxDv1JmVKZMzFt/TZ1TOiQ0P+MrgqTAQHR7Ypvect1e/TIFJ1Iv4IHEAK/oNS9iboCWraGGK9LaS7Jve67/GnTGqXB8lnyUdI6VKol/B8mhK2j8GkGrz3i8jyZzUmaVG+1cQKgSR5S9Ydc2XIZA+RD03o5WwgCCUoCnCX1ibQBvDfhnfH2hoQgBqHfIhlsJbnlnE5/daAK+0in+4riONRWNwYrfSd9cPtKfwqOAQHR7YpvF/32Xtcd64G3KIWgzlOuJyrDJtlDRiDr7L/HXp27AkJ9tQLihyGDLNXPumfulkyXMj6fJ6/yVJA7iChdXSrBLN5cstCy5fTmKToBNB1Jy6DMeVq3EEiLwvWRFmmyaLPVhPdsv4caiFk+zIkyZyqNl+b+I5aO7C3zgCQLBz03BJi4e5iY6UYBitwKjgEB0e2Kb40Mzzj3wRl+zYIxmxKeBboY10T0xjUxKQuI5R0m5QckA/YouNyLyOHOgoYdSm3FxcqmzOuLfKGuopjxi3b8VpMcwyRe68+JnnXRqYRlioDaDoTiFMkX+cw/jVzSuezZ9TSlw3XFN1tQgB5qMxaYA+/SDoKdbGq/vrCX4bVsXZ2MDLDkHML2AhwLCp0BAdHtim9Oubf02UU50cfreIZlHR6hxe3tS8AiI+KuzVs3bPD6vuv8igK21QZHbOD17Sql+NCepOUELMizth1neQwTjtomXfHHBODfKVUJ4F20F0CNjhhlKt1aVS2+O6tvrS7aMVmvk3KRt0drrm5VR7pRNXA1oPJdhX1q3MhJDuqan7orvWh3YZ5WGFyEK9YuB5z0pgvYtDercaQ69wqRAQHR7YpvoATYq0iXLopzpIaEXcnZPLxyzHqpVnNqSn2fJRPmLQdspJUM62TsxpBeXAsR0F8yAv3wxuk25Lx84W2cnt20hFt+PbtGQVSM6KfE104XA0iHuidSwb9h5bcicQOQyzkIlrwosgo5FJyYQJFspMwDcHPt1H1xiW/yPaF8ZtL/ZXAomLouhq/bErZ84WEKigEB0e2Kbxs7yDL/L2OSWSIPGHnybOO+2mo9+7iQMARzd6a9AxjNvdTYKwn0iINhZ6Rx1TeVCW0w4UbYQF/ujzgmNtGHdPsEZ+M+5wMDu/U/8kpuWRJZVuJ48f3V19YQxU8Isq75n4AzaqXjK/KUFYeQJbGSfBS5EHrSwlQijhNIv8HQ+NVMj/Svf24KoQEB0e2Kb8FthNnzJDZ+f2+Lshah8D6O/QjfJGrnKvMMrUoUqX5ZqAxYg4R4UBirA6zvaFuKI0V6odeGwXWmPArIp5RC2NiEBaxCtwirXSe0amvaL0hk8CLkKy+brTrZiC7UCdiW6sLz5f9wrU50CdUH1P0jh5VDSuNXFkGBSiz8Yf8WL5DmOdnzs7/HSw8i99XzUdVdKCbzNrT8rXE1RveglgqAAQHR7YpvGr3dgHVESEDYAfaFXQI8ZCZUe3Cv2DmR3wBev2kMmRlixDyjRqXCgCw0EdXsJM8okkHj55sp6EZE0THrCxPCxaqUnALaaFSfh5AJiaC8bRZm/KUgL3I3phMtqSbIlKptGo03BLq9rz8bXgPc6Byiaic+wnfNUJQo8vO5CpQBAdHtim8C13z5v4gDZaJo9xgMLa+CPHKD2fTsBfVEIEJ7RI8G3C/6r6i7sJzvCAqsAC9pX+KgF2iGYM6kLxBRV+cuaV23OSVWqrd4uqBpIIrKjmN8423MHivDsEe6390BTRmSuev8N5SB6Bhdh7q6wzyblOaQ7VO+QpMt+HEfdlXCxtdwyQlQ0RdlHioAOem+VmtvhQqTAQHR7Ypv/TSKdwJl31A4G5XnObS4STu3FwdEdIECw6loDG2t5oTRnVJ69a93v2zNeNztp/LqUb7ptIN2UgileFq6Hiv5mNGpCNyThLSyGiN8JlHHAAEAzlnu2q+d97FxTv1zFMjIVfsWIKNrrr2PpJPv0sgYyYbsxuiOem4azhnFy2Q7ZVuI4xtQbQ/Mis6jNWiaWwqTAQHR7YpvsBQsV+yPEVR6uNrS0i1ToyFW1xp18q8Xzp151kDQbM3CTLxrJtrpi/Y1A/W38plOMMYTH/xZWf7o+PbvAIeXpEVRyZ2ST73gqacgZCRJYqgNybhATFzMMka4YF/ZQIKeYoT6L9mGpaSxeLzIVtiMxCdg5+FCLU4/rEWYoeaO0SXFGZOkcXC5IwmxP766MwqaAQHR7YpvCey4HxoWg4wh/pl5RL1x+GYt4okG+LPCIspPFmOE5ZL4L8CC+CnTmuppL25hGPBxjbTE0/Cld04d2cu+S4ajupggMXN6gt8N7BiAeRW0JWuWRM8kwD7XQ7Ngy8XG2kAIqjwEwX5e8qm6Bc4MrwziwLcjnwjK0M5zmBO7fU7qpMwcdONw06r9fJV1rHp8eicOJDRE48EKdQHR7YpvBby7jsEEGC9v0Ku5pIoeRcn84d7mWEHQnNWeFvX4AD3kp3/7PmxRCBvxHfa6k62zz5MsMwVGHHpU/PGsN/+mObu4tZcIlcPYXprM28wFDNkFgzo/9jprbR0lTAOhyQdkwYC1l+XjNQZgDSiSWHg5zgqKAQHR7YpvLo+F+bUs/EgO1F8w+oGBbMIur2LFu/ptvMzAjN4adrogDjtZvuIMxT9i4kOcGrhGkey5E4jtlzR4q2O46INZk7ubFInL2/TnknmR8uj0LEn08NQb6Qm8T6ftiApfpv5gKgGvGwJz6jttExkNq04DGpnKOF/iYJfk8a/604BVCogeAvSfrAqCAQHR7YpvRPJBA1auMRSVnz0MjIEkMP0Nfi30IUbhb4RLOaQZ5F6TdxociF2tLU92nDbHydkDgZhEQEEotia6xUl5tOrBABk1zASKkTTnLeNhi6JHBct3JuX3T5OxS4oKQzFlRySBZgvjQWk/H1MDQFoCQq7SofII6h/41DfCi0y+LJgKlwEB0e2KbzGjX2We98l4sdEf3aaVDmY2oka/8sUcEKXbPN12ip4hvdt8apDfdx+T3al50oabnNhl8Hd1G1FIlOr+oJWBH7+TfSLQ1vt1SczRX3QJwhBV145FhcO9+yHhuLVOvxk1QAI8onelLnX/oTSrKcAb6dQjj+kZOsYIq67Hoe2FXn5edxN0Bppg76TWp/PzoyFkiHwSCoYBAdHtim8Cqd73rN2h3De6n/c/CjZmfNYzx/NNgA2XrZzXeuB+DbPINOKNzHkzZQ1kYh1EjlTreIdpVhx58wI3zw1ec7x1u5G7oHDf26IhS85AjDcIXWn7Xp6k2fxJV4K7DzA0gclKmCJFqnzZUNZ7F0NL4vRObmBy/GIILvVP/sBzF2L4KdsKmgEB0e2Kb3zjJMWKJLl/uUxDaoveBXGzzz9mHV6aI65Ur8oIEAYUytuL/1p7YlWylkiBk7UPJ98FmC9TCd9An6f3N7oebwwiFnf7aMtjoKPfhgKPZaHNjRQOJi4egyLkdk/YfPYWDWyJMvDOUMuJtpFhf5oYzTsoYzTrwsw5zeh/n/YL/RISa7KgZwESq9dbXP396n5gEr8J/NwDCo4BAdHtim9xqhsvPCOmY7nmz2ijFtMSFQNd3buUFQRNM18N+knI1AXX/A01rlh29qcdxZIeQ+kN4YKOZoHfRxqlvhTyl/0AT6Q/jI/oWwGHdDdZwZCDE4n3ju1ZN2up2S4lsbXTqSTUKhD5qaV8dGktZAZ88mY8wuiJF2iOsE8uyCM247Z/Sz8fGsgP9Ets6QqOAQHR7Ypvl8gvPbQbGnn0iafjVSBDpHWhJU81msZg+qVjOyUJRmhF4lV97ds4lDpUtl52BwTyHNTlz4STXDMU8PdHpDZTMzMmJ3Qg3iJ/gYDXP5kpGqasQelo9yz0qvEIqeWsKV7tXGxY1njzrRYYEGl/4mmHo23XrS2U9hJPJBz8TMdFQDuw5wRarB9SJ7kKgAEB0e2Kb58mxC0KZgOB7u3f4m66IbHeDWR51Af08Ah+KH4EpcSRqt2iYXijH589mPTKEEJnSNcRkpm/rpRDo+NbYO83B7LB06R/J+JKq/hpzI9JSviv6YFkMMGgvhsWFkHvFN62OFG3y5w8id/IZfvl54z/0ApnTZO0DnVXo2b1vAp3AdHtim/ncRkntLVBoi+V6IJjKZ5Uwye9jnCLbQHyoWeQ0AzP7IWOnDMZLvT1VupfJysJgGuF9mzQVsFf86+abuNBOAUJXcjkTViqFoDEfWWTyZIlQ1dBa/s32qQvkCPQpPLb68rx9IcXpBh9KKaVE8wXn2FhZqgKkAEB0e2KbyzwhSOwreaWP7nfhxP76KGa8iSzUYupJ/IhYwIbi+hNPxOrGAmYoyYM4ywLFljv8IYoy5P4Ht4grxl6kQjUrGu4A0NlioV8UG7iKdZX+NwvIB2iwYKjRhLYz7uE1v4U0t4vGBL6a5W4ulic6Pw3MS2G2TJZRDnv47E6jTLUHlxLpwE7vgYjP/w0AZMKigEB0e2Kb2OtrSlqymij19/hNYnF5ZKclwE2c5hgwpgxqlt6KPIIwYlqyh1JlLrTrK7a/Kwm8RrBq9i90NX1TQbNDBf178fZ55MyyfT92yFzsjnpiqtUEmcLwWmVZpRzlNNugsHS7WG3gpjPKI2tXDy4oKkNCax2qu5zxsbAYjd0WmJhoHlixwu4kz8KlwEB0e2Kb+UXDHMn8p+kZ/6WmCYRbQ9wxkQlKYjbE+28G3g8HgTj/kyqu0ED0meRDCEfH4258605JMv88QSMW/xNXDWegZngBYCuz7izrHD5745Ps4PldgGptwqhs+3LxKqvAPQeYsU+Fllk60I/XuVtfcTAeZRQBy5v+OLzIjD7nSPL3njsDVKRmhyd4hmRLZRgV5Qi6WAHCqgBAdHtim+k265Inoii+qCLrUDth4v5RCK/+siGsm4QS3ACeGPY5UivNrimEsbCM8KuwFq8ykAUCplBUEI8HDI1+OXy7jUx7dNM3Dxvs/L7C7OxF3b3FCF2w7rEIO1MRyYfC/GwMlXdjrcvRBbIy2ZyOXj/C6bO5kO0LFGuxkhyDLyM8kcG9drbDpObNJAFOi7h5ZEXWESsP62fI6xfc2ykcc2Thd7grJ/fCpABAdHtim96cGLSuRmr5lCfmme0s/o7+9n2nSZ8ziW/BLgprp6fg5magVwqRa8L91eLzMHmHbwafd2sa0Ki+dgUWiqJRnItVfNPK1HaIO/r+EAw89KLXMtSgtaHDED2YL1WNsM2QBWnNlIET8ZjK/6BVDJk64eA96lp7m69m7WEbsQkd31f1q+pkEcVCdNg2/jgCk4B0e2Kb3o9fcDaQ0TzMW3qo00/kjGwr7xO/Mlmz30HuSaH48iO92G52Tdqn4Yy0e2GFCnk9JlNjRyjsqeWrw7oTiOIFZ1EgMKlqm/dH8k=', + provider_name='google-gla', + ), + next_part_kind='text', + ), + PartStartEvent( + index=1, + part=TextPart( + content='This is a great question! Safely crossing the street is all about being aware and predictable. Here is a step-by-step' + ), + previous_part_kind='thinking', + ), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), @@ -1787,6 +1895,45 @@ def dummy() -> None: ... # pragma: no cover PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), + PartEndEvent( + index=1, + part=TextPart( + content="""\ +This is a great question! Safely crossing the street is all about being aware and predictable. Here is a step-by-step guide that is widely taught for safety: + +### 1. Find a Safe Place to Cross +The best place is always at a designated **crosswalk** or a **street corner/intersection**. These are places where drivers expect to see pedestrians. Avoid crossing in the middle of the block or from between parked cars. + +### 2. Stop at the Edge of the Curb +Stand on the sidewalk, a safe distance from the edge of the street. This gives you a clear view of the traffic without putting you in danger. + +### 3. Look and Listen for Traffic +Follow the "Left-Right-Left" rule: +* **Look left** for the traffic that will be closest to you first. +* **Look right** for oncoming traffic in the other lane. +* **Look left again** to make sure nothing has changed. +* **Listen** for the sound of approaching vehicles that you might not be able to see. + +### 4. Wait for a Safe Gap +Wait until there is a large enough gap in traffic for you to walk all the way across. Don't assume a driver will stop for you. If you can, try to **make eye contact** with drivers to ensure they have seen you. + +### 5. Walk, Don't Run +Once it's safe: +* Walk straight across the street. +* **Keep looking and listening** for traffic as you cross. The situation can change quickly. +* **Don't use your phone** or wear headphones that block out the sound of traffic. + +--- + +### Special Situations: + +* **At a Traffic Light:** Wait for the pedestrian signal to show the "Walk" sign (often a symbol of a person walking). Even when the sign says to walk, you should still look left and right before crossing. +* **At a Stop Sign:** Wait for the car to come to a complete stop. Make eye contact with the driver before you step into the street to be sure they see you. + +The most important rule is to **stay alert and be predictable**. Always assume a driver might not see you.\ +""" + ), + ), ] ) @@ -2600,6 +2747,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='8a7952', identifier='8a7952', ) ) @@ -2620,6 +2768,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='8a7952', identifier='8a7952', ) ), @@ -2644,6 +2793,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='7d173c', identifier='7d173c', ) ) @@ -2664,6 +2814,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='7d173c', identifier='7d173c', ) ), @@ -2693,6 +2844,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='9ff9cc', identifier='9ff9cc', ) ) @@ -2710,6 +2862,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='2af2a7', identifier='2af2a7', ) ) @@ -2730,6 +2883,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='2af2a7', identifier='2af2a7', ) ), @@ -2752,15 +2906,17 @@ async def test_google_image_generation_stream(allow_model_requests: None, google [ PartStartEvent(index=0, part=TextPart(content='Here you go!')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' ')), + PartEndEvent(index=0, part=TextPart(content='Here you go! '), next_part_kind='file'), PartStartEvent( index=1, part=FilePart( content=BinaryImage( data=IsBytes(), media_type='image/png', - identifier='2af2a7', + _identifier='2af2a7', ) ), + previous_part_kind='text', ), FinalResultEvent(tool_name=None, tool_call_id=None), ] @@ -2796,6 +2952,7 @@ async def test_google_image_generation_with_text(allow_model_requests: None, goo content=BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='00f2af', identifier=IsStr(), ) ), @@ -2831,6 +2988,7 @@ async def test_google_image_or_text_output(allow_model_requests: None, google_pr BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='f82faf', identifier='f82faf', ) ) @@ -2849,6 +3007,7 @@ async def test_google_image_and_text_output(allow_model_requests: None, google_p BinaryImage( data=IsBytes(), media_type='image/png', + _identifier='67b12f', identifier='67b12f', ) ] diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index a24089cf7c..401dc1b1f4 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -27,6 +27,7 @@ ModelResponse, ModelRetry, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, SystemPromptPart, @@ -409,7 +410,7 @@ async def test_stream_text(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world'] + ['hello ', 'hello world', 'hello world', 'hello world'] ) assert result.is_complete @@ -423,7 +424,7 @@ async def test_stream_text_finish_reason(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world.', 'hello world.'] + ['hello ', 'hello world', 'hello world.', 'hello world.', 'hello world.'] ) assert result.is_complete @@ -476,6 +477,7 @@ async def test_stream_structured(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -532,6 +534,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -546,7 +549,7 @@ async def test_no_delta(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world'] + ['hello ', 'hello world', 'hello world', 'hello world'] ) assert result.is_complete @@ -1293,6 +1296,19 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Francisco')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' today')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='?)\n')), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ + +To find the current weather in San Francisco, I will use the search tool to look up this information. + + +search(What is the weather in San Francisco today?) +""" + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=1, part=BuiltinToolCallPart( @@ -1301,6 +1317,17 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro tool_call_id=IsStr(), provider_name='groq', ), + previous_part_kind='thinking', + ), + PartEndEvent( + index=1, + part=BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'What is the weather in San Francisco today?'}, + tool_call_id=IsStr(), + provider_name='groq', + ), + next_part_kind='builtin-tool-return', ), PartStartEvent( index=2, @@ -1411,6 +1438,7 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro timestamp=IsDatetime(), provider_name='groq', ), + previous_part_kind='builtin-tool-call', ), PartDeltaEvent( index=0, @@ -1676,7 +1704,7 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' high')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' humidity')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), - PartStartEvent(index=3, part=TextPart(content='The')), + PartStartEvent(index=3, part=TextPart(content='The'), previous_part_kind='builtin-tool-return'), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' weather')), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' in')), @@ -1726,6 +1754,12 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='-')), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='94')), PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='%.')), + PartEndEvent( + index=3, + part=TextPart( + content='The weather in San Francisco today is partly cloudy with a temperature of 61°F (17°C) and high humidity. The current conditions include a wind speed of around 7-22 km/h and a humidity level of 90-94%.' + ), + ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='web_search', @@ -2553,7 +2587,33 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' next')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' time')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.\n')), - PartStartEvent(index=1, part=TextPart(content='To')), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ + +Okay, so I want to make Uruguayan alfajores. I've heard they're a type of South American cookie sandwich with dulce de leche. I'm not entirely sure about the exact steps, but I can try to figure it out based on what I know. + +First, I think alfajores are cookies, so I'll need to make the cookie part. From what I remember, the dough is probably made with flour, sugar, butter, eggs, vanilla, and maybe some baking powder or baking soda. I should look up a typical cookie dough recipe and adjust it for alfajores. + +Once the dough is ready, I'll need to roll it out and cut into circles. I've seen people use a cookie cutter or even the rim of a glass. The thickness should be medium, not too thin to break easily. + +Baking them in the oven, I suppose at around 350°F for about 10-15 minutes until they're lightly golden. I should keep an eye on them to make sure they don't burn. + +After the cookies are baked and cooled, the next step is the dulce de leche filling. I can either make it from scratch or use store-bought. If I make it, I'll need to heat condensed milk until it thickens and turns golden. That might take some time, so I need to be patient and stir frequently to avoid burning. + +Then, I'll sandwich two cookies together with the dulce de leche in the middle. I think pressing them gently is important so they stick together without breaking. + +Finally, I've seen alfajores coated in powdered sugar. So, after assembling, I'll roll each sandwich in powdered sugar to coat them evenly. That should give them the classic look and extra sweetness. + +Wait, I should make sure the cookies are completely cool before filling, otherwise the dulce de leche might melt or the cookies could become soggy. Also, maybe I can add a pinch of salt to balance the sweetness. Oh, and the vanilla extract is important for flavor. + +I might have missed something, but this seems like a good start. I'll follow the steps, and if something doesn't turn out right, I can adjust next time. +""" + ), + next_part_kind='text', + ), + PartStartEvent(index=1, part=TextPart(content='To'), previous_part_kind='thinking'), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' make')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Ur')), @@ -3173,6 +3233,63 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='aj')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ores')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='!')), + PartEndEvent( + index=1, + part=TextPart( + content="""\ +To make Uruguayan alfajores, follow these organized steps for a delightful cookie sandwich with dulce de leche: + +### Ingredients: +- **For the Cookies:** + - 2 cups all-purpose flour + - 1 cup powdered sugar + - 1/2 tsp baking powder + - 1/4 tsp baking soda + - 1/4 tsp salt + - 1/2 cup unsalted butter, softened + - 1 large egg + - 1 egg yolk + - 1 tsp vanilla extract + +- **For the Filling:** + - 1 can (14 oz) sweetened condensed milk (for dulce de leche) + - Powdered sugar (for coating) + +### Instructions: + +1. **Prepare the Cookie Dough:** + - In a large bowl, whisk together flour, powdered sugar, baking powder, baking soda, and salt. + - Add softened butter and mix until the mixture resembles coarse crumbs. + - In a separate bowl, whisk together egg, egg yolk, and vanilla extract. Pour into the dry mixture and mix until a dough forms. + - Wrap dough in plastic wrap and refrigerate for 30 minutes. + +2. **Roll and Cut Cookies:** + - Roll out dough on a floured surface to about 1/4 inch thickness. + - Cut into circles using a cookie cutter or glass rim. + - Place cookies on a parchment-lined baking sheet, leaving space between each. + +3. **Bake the Cookies:** + - Preheat oven to 350°F (180°C). + - Bake for 10-15 minutes until lightly golden. Allow to cool on the baking sheet for 5 minutes, then transfer to a wire rack to cool completely. + +4. **Make Dulce de Leche:** + - Pour sweetened condensed milk into a saucepan and heat over medium heat, stirring frequently, until thickened and golden (about 10-15 minutes). + +5. **Assemble Alfajores:** + - Spread a layer of dulce de leche on the flat side of one cookie. Sandwich with another cookie, pressing gently. + - Roll each sandwich in powdered sugar to coat evenly. + +6. **Serve:** + - Enjoy your alfajores with a dusting of powdered sugar. Store in an airtight container. + +### Tips: +- Ensure cookies are completely cool before filling to prevent sogginess. +- For an extra touch, add a pinch of salt to the dough for flavor balance. + +Enjoy your homemade Uruguayan alfajores!\ +""" + ), + ), ] ) @@ -4190,7 +4307,40 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' alf')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='ajor')), PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.\n')), - PartStartEvent(index=1, part=TextPart(content='To')), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ +Alright, so I'm trying to figure out how to make Argentinian alfajores. I know that Uruguayan alfajores are these delicious cookie sandwiches filled with dulce de leche and coated in powdered sugar. But I heard that Argentinian alfajores are a bit different. I'm not exactly sure what makes them unique, so I need to look into that. + +First, I think about what I know about Argentinian desserts. They have a rich tradition of sweet treats, and alfajores are definitely one of them. Maybe the difference lies in the type of cookies used or the filling. I recall that in some South American countries, alfajores can be more like a biscuit or even a cake-like cookie, whereas in others, they might be crisper. + +I also remember that sometimes alfajores are coated in chocolate instead of just powdered sugar. That could be an Argentinian twist. I need to confirm that. Also, the filling might not just be dulce de leche; perhaps they use other ingredients like jam or chocolate ganache. + +Another thing to consider is the texture of the cookies. Uruguayan alfajores have a softer, more delicate cookie, while Argentinian ones might be crunchier. Or maybe they use a different type of flour or baking technique. I should check recipes from both countries to see the differences in ingredients and preparation methods. + +I also wonder about the history of alfajores in Argentina. They might have been influenced by European immigrants, especially from Spain or Italy, which could explain variations in the recipe. This cultural influence might contribute to differences in how the cookies are made and filled. + +Additionally, I think about the assembly of the alfajores. In Uruguay, it's typically two cookies sandwiching the dulce de leche and then coated in powdered sugar. Maybe in Argentina, they add more layers or use a different coating, like cinnamon or cocoa powder mixed with sugar. + +I also need to consider the availability of ingredients. Dulce de leche is a staple in many South American countries, but maybe in Argentina, they have a slightly different version of it or use it in combination with other fillings. Perhaps they also use nuts or other ingredients in the dough for added texture and flavor. + +Another aspect is the baking process. The Uruguayan cookies might be baked until just set, while Argentinian ones could be baked longer for a crisper texture. Or perhaps they use a different leavening agent to achieve a lighter or denser cookie. + +I also think about the size of the cookies. Are Argentinian alfajores larger or smaller than the Uruguayan ones? This could affect baking time and the overall appearance of the final product. + +Furthermore, I recall that in some regions, alfajores are dipped in chocolate after being filled. This could be a distinguishing feature of the Argentinian version. The chocolate coating might be milk, dark, or even white chocolate, adding another layer of flavor to the cookies. + +I also wonder about the storage and serving of Argentinian alfajores. Maybe they are best served fresh, or perhaps they can be stored for a few days like the Uruguayan ones. Understanding this can help in planning the baking and assembly process. + +Lastly, I think about potential variations within Argentina itself. Different regions might have their own take on alfajores, so there could be multiple authentic Argentinian recipes. It would be helpful to find a classic or widely recognized version to ensure authenticity. + +Overall, to cook Argentinian alfajores, I need to focus on the specific characteristics that distinguish them from their Uruguayan counterparts, whether it's the type of cookie, the filling, the coating, or the baking method. By identifying these differences, I can adapt the recipe accordingly to achieve an authentic Argentinian alfajor. +""" + ), + next_part_kind='text', + ), + PartStartEvent(index=1, part=TextPart(content='To'), previous_part_kind='thinking'), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cook')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Arg')), @@ -5017,6 +5167,75 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' chocolate')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' coating')), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartEndEvent( + index=1, + part=TextPart( + content="""\ +To cook Argentinian alfajores, follow these steps, which highlight the unique characteristics that distinguish them from their Uruguayan counterparts: + +### Ingredients: +- **For the Cookies:** + - 2 cups all-purpose flour + - 1 cup powdered sugar + - 1/2 teaspoon baking powder + - 1/4 teaspoon baking soda + - 1/4 teaspoon salt + - 1/2 cup unsalted butter, softened + - 1 large egg + - 1 egg yolk + - 1 teaspoon vanilla extract + +- **For the Filling:** + - 1 can (14 oz) sweetened condensed milk (for dulce de leche) + - Optional: jam or chocolate ganache + +- **For the Coating:** + - Powdered sugar + - Optional: cinnamon or cocoa powder mixed with sugar + - Optional: melted chocolate (milk, dark, or white) + +### Instructions: + +1. **Prepare the Cookie Dough:** + - In a large bowl, whisk together flour, powdered sugar, baking powder, baking soda, and salt. + - Add softened butter and mix until the mixture resembles coarse crumbs. + - In a separate bowl, whisk together egg, egg yolk, and vanilla extract. Pour into the dry mixture and mix until a dough forms. + - Wrap dough in plastic wrap and refrigerate for 30 minutes. + +2. **Roll and Cut Cookies:** + - Roll out dough on a floured surface to about 1/4 inch thickness. + - Cut into circles using a cookie cutter or glass rim. + - Place cookies on a parchment-lined baking sheet, leaving space between each. + +3. **Bake the Cookies:** + - Preheat oven to 350°F (180°C). + - Bake for 15-20 minutes until golden. Argentinian cookies might be baked longer for a crisper texture. + - Allow to cool on the baking sheet for 5 minutes, then transfer to a wire rack to cool completely. + +4. **Make Dulce de Leche:** + - Pour sweetened condensed milk into a saucepan and heat over medium heat, stirring frequently, until thickened and golden (about 10-15 minutes). + +5. **Assemble Alfajores:** + - Spread a layer of dulce de leche on the flat side of one cookie. For added flavor, a thin layer of jam or chocolate ganache can also be used. + - Sandwich with another cookie, pressing gently. + +6. **Coat the Alfajores:** + - Roll each sandwich in powdered sugar to coat evenly. + - For an Argentinian twist, dip the filled alfajores in melted chocolate (milk, dark, or white) for a chocolate coating. + - Optionally, mix cinnamon or cocoa powder with powdered sugar for a different coating flavor. + +7. **Serve:** + - Enjoy your Argentinian alfajores with a dusting of powdered sugar or chocolate coating. Store in an airtight container for up to 5 days. + +### Tips: +- Ensure cookies are completely cool before filling to prevent sogginess. +- For an extra touch, add a pinch of salt to the dough for flavor balance. +- Experiment with different fillings and coatings to explore various regional variations within Argentina. + +By following these steps, you can create authentic Argentinian alfajores that showcase their unique characteristics, such as a crisper texture and optional chocolate coating.\ +""" + ), + ), ] ) diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index 7c9493fed7..c6729545b7 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -9,7 +9,6 @@ from unittest.mock import Mock import pytest -from dirty_equals import IsListOrTuple from inline_snapshot import snapshot from typing_extensions import TypedDict @@ -24,6 +23,7 @@ ModelResponse, ModelRetry, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, SystemPromptPart, @@ -568,6 +568,7 @@ async def test_stream_structured(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -597,6 +598,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -987,26 +989,1230 @@ async def test_hf_model_thinking_part_iter(allow_model_requests: None, huggingfa event_parts.append(event) assert event_parts == snapshot( - IsListOrTuple( - positions={ - 0: PartStartEvent(index=0, part=ThinkingPart(content='')), - 1: PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='\n')), - 2: PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Okay')), - 3: PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), - 4: PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), - 5: PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' user')), - 6: PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - 7: PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' asking')), - 413: PartStartEvent(index=1, part=TextPart(content='Cross')), - 414: FinalResultEvent(tool_name=None, tool_call_id=None), - 415: PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ing')), - 416: PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), - 417: PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' street')), - 418: PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' safely')), - 419: PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' requires')), - 420: PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' attent')), - 421: PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='iveness')), - }, - length=1062, - ) + [ + PartStartEvent(index=0, part=ThinkingPart(content='')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='\n')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Okay')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' user')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' asking')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' how')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cross')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' street')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Let')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' me')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' think')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' about')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' basic')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' steps')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' First')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' they')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' should')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' check')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' traffic')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' need')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' emphasize')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' looking')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' both')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ways')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Wait')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' but')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' in')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' some')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' places')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' people')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' drive')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' on')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' right')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' left')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' side')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' so')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' maybe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' mention')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' looking')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' left')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' then')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' right')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' right')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' then')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' left')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' depending')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' on')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' country')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Then')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' they')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' should')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' wait')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' safe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' gap')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' But')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' what')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' about')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' intersections')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' traffic')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' lights')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='?')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' They')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' need')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' use')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' signals')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Maybe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' mention')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' pedestrian')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' crossings')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cross')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='walk')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='s')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Also')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' distractions')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' like')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' phones')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' can')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' be')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' dangerous')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Should')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' include')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' something')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' about')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' not')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' using')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' phones')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' while')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' crossing')), + PartDeltaEvent( + index=0, + delta=ThinkingPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Oh')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' accessibility')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' some')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' people')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' might')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' use')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' wheel')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='ch')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='airs')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' walkers')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' so')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cross')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='walk')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='s')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' should')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' be')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' accessible')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Also')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' if')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' there')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'s")), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' curb')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' check')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ramps')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Oh')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' right')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' traffic')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' signals')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' pedestrians')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='—')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='like')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' walk')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='/d')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='on')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'t")), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' walk')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' signs')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Maybe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' tell')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' them')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' start')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' only')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' when')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' walk')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' signal')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' on')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' And')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' halfway')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' check')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' again')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='?')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Also')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' if')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' there')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'s")), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' public')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' transport')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' like')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' buses')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' wait')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' them')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' stop')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' before')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' in')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' front')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' What')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' about')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' bicycles')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='?')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' In')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' some')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' places')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' bike')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' lanes')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' are')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' present')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' so')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' watch')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cyclists')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' too')), + PartDeltaEvent( + index=0, + delta=ThinkingPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Wait')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' user')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' child')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='?')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Maybe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' advice')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' needs')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' be')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' simple')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' But')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' since')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' don')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'t")), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' know')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' age')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' better')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cover')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' all')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ages')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Also')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' in')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' some')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' areas')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' drivers')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' might')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' not')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' always')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' obey')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' rules')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' so')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' stay')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' vigilant')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Alcohol')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' drugs')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' impair')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' judgment')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' so')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' avoid')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' under')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' their')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' influence')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Maybe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' add')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' tips')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' about')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' visibility')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' like')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' at')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' night')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' wear')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' bright')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' clothes')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Okay')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' organize')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' these')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' thoughts')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' step')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' by')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' step')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' starting')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' preparation')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' choosing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' right')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' spot')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' checking')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' traffic')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' timing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' signals')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' safe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' steps')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' additional')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' tips')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Make')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' sure')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' it')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'s")), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' clear')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' covers')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' all')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' scenarios')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Check')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' any')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' missing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' points')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' like')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' emergency')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' situations')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' what')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' do')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' if')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' stuck')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' mid')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-cross')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='ing')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' H')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='mmm')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' maybe')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' mention')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' if')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' halfway')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' car')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' comes')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' keep')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' going')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' or')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' adjust')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' steps')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Also')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' if')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' unsure')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' don')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta="'t")), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' hesitate')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ask')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' for')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' help')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Okay')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' structure')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' answer')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' these')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' points')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' in')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' mind')), + PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.\n')), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ + +Okay, the user is asking how to cross the street. Let me think about the basic steps. First, they should check for traffic. I need to emphasize looking both ways. Wait, but in some places, people drive on the right or left side, so maybe mention looking left then right, or right then left depending on the country. Then they should wait for a safe gap. But what about intersections with traffic lights? They need to use the signals. Maybe mention pedestrian crossings and crosswalks. Also, distractions like phones can be dangerous. Should include something about not using phones while crossing. + +Oh, and for accessibility, some people might use wheelchairs or walkers, so crosswalks should be accessible. Also, if there's a curb, check for ramps. Oh, right, traffic signals for pedestrians—like walk/don't walk signs. Maybe tell them to start crossing only when the walk signal is on. And halfway, check again? Also, if there's public transport like buses, wait for them to stop before crossing in front. What about bicycles? In some places, bike lanes are present, so watch for cyclists too. + +Wait, is the user a child? Maybe the advice needs to be simple. But since I don't know the age, better cover all ages. Also, in some areas, drivers might not always obey the rules, so stay vigilant. Alcohol or drugs impair judgment, so avoid crossing under their influence. Maybe add tips about visibility, like at night, wear bright clothes. Okay, organize these thoughts step by step, starting with preparation, choosing the right spot, checking traffic, timing with signals, safe crossing steps, and additional tips. Make sure it's clear and covers all scenarios. Check for any missing points like emergency situations or what to do if stuck mid-crossing. Hmmm, maybe mention if halfway and a car comes, keep going or adjust steps. Also, if unsure, don't hesitate to ask for help. Okay, structure the answer with these points in mind. +""" + ), + next_part_kind='text', + ), + PartStartEvent(index=1, part=TextPart(content='Cross'), previous_part_kind='thinking'), + FinalResultEvent(tool_name=None, tool_call_id=None), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' street')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' safely')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' requires')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' attent')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='iveness')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' awareness')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' of')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' your')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' surroundings')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Here')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta="'s")), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' step')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='-by')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='-step')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' guide')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' to')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' help')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ensure')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' your')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' safety')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +: + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='###')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='1')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Choose')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Right')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Spot')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Use')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='walk')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' intersection')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' at')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' marked')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='walk')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='s')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' intersections')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' with')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' traffic')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signals')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' where')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' pedestrians')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' commonly')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Avoid')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' blind')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' spots')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Do')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' not')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' near')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' curves')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' hills')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' obstacles')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='e')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.g')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' parked')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cars')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' bushes')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=')')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' that')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' block')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' drivers')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' views')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='###')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='2')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Check')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Traffic')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Stop')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' at')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' curb')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Face')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' street')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' scan')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' left')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' center')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' right')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' approaching')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' vehicles')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Look')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' listen')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Watch')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cars')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cyclists')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' motor')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='cycl')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ists')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Make')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' eye')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' contact')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' with')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' drivers')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' to')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' confirm')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' they')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' see')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' you')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Right')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='-hand')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' vs')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' left')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='-hand')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' traffic')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=':\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' In')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' countries')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' where')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' vehicles')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' drive')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' on')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='right')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='e')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.g')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' U')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.S')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' China')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='):')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Look')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='left')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' then')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' right')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' then')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' left')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' again')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' In')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' countries')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' where')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' vehicles')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' drive')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' on')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='left')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='e')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.g')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' U')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.K')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Japan')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='):')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Flip')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' this')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' process')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='—')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='look')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='right')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' then')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' left')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' then')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' right')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='###')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='3')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Use')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Traffic')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Signals')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Wait')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' pedestrian')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signal')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' If')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' available')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' wait')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' until')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' "')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Walk')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='"')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signal')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' green')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' hand')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=')')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' appears')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Start')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' promptly')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Begin')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' when')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signal')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' changes')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' If')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' it')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' still')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' flashing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' finish')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' but')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' move')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' quickly')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='No')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signal')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='?')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Wait')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' safe')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' gap')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=':')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' All')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' lanes')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' should')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' be')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' clear')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' of')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' approaching')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' vehicles')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' before')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' stepping')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' off')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' curb')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='###')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='4')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Cal')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='m')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ly')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Predict')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ably')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Walk')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='don')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' run')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=')**')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=':')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Maintain')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' steady')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' pace')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Never')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' dart')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' into')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' street')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Keep')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' looking')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Gl')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ance')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' both')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ways')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' as')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' you')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Stay')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' alert')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' drivers')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' who')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' may')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' turn')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ignore')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' light')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Avoid')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' distractions')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Put')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' your')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' phone')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' down')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='—')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='don')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' text')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' wear')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' headphones')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' browse')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' while')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' crossing')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='###')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='5')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Stay')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Visible')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='W')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ear')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' bright')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='/')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='reflect')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ive')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' clothing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**,')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' especially')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' at')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' night')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' in')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' low')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' visibility')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='rain')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' fog')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=').\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Use')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' sidewalks')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' shoulders')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' if')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' no')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='walk')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='s')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' exist')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' in')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' your')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' area')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='###')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='6')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Additional')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Tips')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='If')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' stuck')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' mid')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='-way')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Stop')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' wait')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signal')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' to')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' change')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' again')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Most')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signals')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' allow')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' time')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' to')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' pause')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='B')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='uses')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='/tr')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ains')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Wait')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' until')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' vehicle')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' stops')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' entirely')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' before')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' in')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' front')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Use')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='walk')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='s')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' near')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' bus')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' stops')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='C')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ycl')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ists')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Watch')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' bike')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' lanes')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Some')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' drivers')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' may')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' not')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' notice')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cyclists')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' so')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' be')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' extra')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cautious')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Accessibility')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**:')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Use')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ramps')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' accessible')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' pedestrian')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' signals')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='APS')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=')')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' if')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' needed')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='###')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='7')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Stay')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Vig')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='il')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='ant')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Never')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' assume')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' driver')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' will')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' stop')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Always')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' confirm')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' their')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' intent')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' to')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' yield')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' Avoid')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' under')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' the')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' influence')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' of')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' drugs')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='/al')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='cohol')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' which')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' slow')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' reaction')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' times')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='.\n')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' -')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' If')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' unsure')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ask')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' local')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' or')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' wait')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' group')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' to')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' cross')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' together')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' for')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' added')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' safety')), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +. + +""" + ), + ), + PartDeltaEvent( + index=1, + delta=TextPartDelta( + content_delta="""\ +--- + +""" + ), + ), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='By')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' following')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' these')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' steps')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' you')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=IsStr())), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' minimize')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' risks')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' ensure')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' safer')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' crossing')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='!')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' 🚶')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' **')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='Stay')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' alert')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=',')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' stay')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' alive')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='!')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='**')), + PartDeltaEvent(index=1, delta=TextPartDelta(content_delta='')), + PartEndEvent( + index=1, + part=TextPart(content=IsStr()), + ), + ] ) diff --git a/tests/models/test_instrumented.py b/tests/models/test_instrumented.py index d1c03cc08b..882794d34e 100644 --- a/tests/models/test_instrumented.py +++ b/tests/models/test_instrumented.py @@ -26,6 +26,7 @@ ModelResponse, ModelResponseStreamEvent, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, SystemPromptPart, @@ -387,6 +388,7 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire): PartStartEvent(index=0, part=TextPart(content='text1')), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='text2')), + PartEndEvent(index=0, part=TextPart(content='text1text2')), ] ) diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index d2ffe7f5c3..9d5f422046 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -612,14 +612,58 @@ class MyTypedDict(TypedDict, total=False): [ {'first': 'One'}, {'first': 'One', 'second': 2}, + {'first': 'One', 'second': 2}, + {'first': 'One', 'second': 2, 'bool_value': True}, {'first': 'One', 'second': 2, 'bool_value': True}, {'first': 'One', 'second': 2, 'bool_value': True, 'nullable_value': None}, + {'first': 'One', 'second': 2, 'bool_value': True, 'nullable_value': None}, + { + 'first': 'One', + 'second': 2, + 'bool_value': True, + 'nullable_value': None, + 'array_value': ['A', 'B', 'C'], + }, + { + 'first': 'One', + 'second': 2, + 'bool_value': True, + 'nullable_value': None, + 'array_value': ['A', 'B', 'C'], + }, + { + 'first': 'One', + 'second': 2, + 'bool_value': True, + 'nullable_value': None, + 'array_value': ['A', 'B', 'C'], + 'dict_value': {'A': 'A', 'B': 'B'}, + }, + { + 'first': 'One', + 'second': 2, + 'bool_value': True, + 'nullable_value': None, + 'array_value': ['A', 'B', 'C'], + 'dict_value': {'A': 'A', 'B': 'B'}, + }, + { + 'first': 'One', + 'second': 2, + 'bool_value': True, + 'nullable_value': None, + 'array_value': ['A', 'B', 'C'], + 'dict_value': {'A': 'A', 'B': 'B'}, + 'dict_int_value': {'A': 1, 'B': 2}, + }, { 'first': 'One', 'second': 2, 'bool_value': True, 'nullable_value': None, 'array_value': ['A', 'B', 'C'], + 'dict_value': {'A': 'A', 'B': 'B'}, + 'dict_int_value': {'A': 1, 'B': 2}, }, { 'first': 'One', @@ -628,6 +672,7 @@ class MyTypedDict(TypedDict, total=False): 'nullable_value': None, 'array_value': ['A', 'B', 'C'], 'dict_value': {'A': 'A', 'B': 'B'}, + 'dict_int_value': {'A': 1, 'B': 2}, }, { 'first': 'One', @@ -719,8 +764,19 @@ class MyTypedDict(TypedDict, total=False): v = [c async for c in result.stream_output(debounce_by=None)] assert v == snapshot( [ + {'first': 'O'}, {'first': 'O'}, {'first': 'On'}, + {'first': 'On'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, {'first': 'One'}, {'first': 'One'}, {'first': 'One'}, @@ -735,9 +791,21 @@ class MyTypedDict(TypedDict, total=False): {'first': 'One'}, {'first': 'One'}, {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One'}, + {'first': 'One', 'second': ''}, {'first': 'One', 'second': ''}, {'first': 'One', 'second': 'T'}, + {'first': 'One', 'second': 'T'}, {'first': 'One', 'second': 'Tw'}, + {'first': 'One', 'second': 'Tw'}, + {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, @@ -772,7 +840,7 @@ async def test_stream_result_type_primitif_int(allow_model_requests: None): async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot([1, 1, 1]) + assert v == snapshot([1, 1, 1, 1, 1]) assert result.is_complete assert result.usage().input_tokens == 6 assert result.usage().output_tokens == 6 @@ -833,31 +901,62 @@ async def test_stream_result_type_primitif_array(allow_model_requests: None): assert v == snapshot( [ [''], + [''], + ['f'], ['f'], ['fi'], + ['fi'], + ['fir'], ['fir'], ['firs'], + ['firs'], + ['first'], ['first'], ['first'], ['first'], + ['first'], + ['first'], + ['first', ''], ['first', ''], ['first', 'O'], + ['first', 'O'], + ['first', 'On'], ['first', 'On'], ['first', 'One'], ['first', 'One'], ['first', 'One'], + ['first', 'One'], + ['first', 'One'], + ['first', 'One'], + ['first', 'One', ''], ['first', 'One', ''], ['first', 'One', 's'], + ['first', 'One', 's'], ['first', 'One', 'se'], + ['first', 'One', 'se'], + ['first', 'One', 'sec'], ['first', 'One', 'sec'], ['first', 'One', 'seco'], + ['first', 'One', 'seco'], + ['first', 'One', 'secon'], ['first', 'One', 'secon'], ['first', 'One', 'second'], ['first', 'One', 'second'], ['first', 'One', 'second'], + ['first', 'One', 'second'], + ['first', 'One', 'second'], + ['first', 'One', 'second'], ['first', 'One', 'second', ''], + ['first', 'One', 'second', ''], + ['first', 'One', 'second', 'T'], ['first', 'One', 'second', 'T'], ['first', 'One', 'second', 'Tw'], + ['first', 'One', 'second', 'Tw'], + ['first', 'One', 'second', 'Two'], + ['first', 'One', 'second', 'Two'], + ['first', 'One', 'second', 'Two'], + ['first', 'One', 'second', 'Two'], + ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], @@ -925,7 +1024,24 @@ class MyTypedBaseModel(BaseModel): assert v == snapshot( [ MyTypedBaseModel(first='O', second=''), + MyTypedBaseModel(first='O'), + MyTypedBaseModel(first='On'), MyTypedBaseModel(first='On', second=''), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), + MyTypedBaseModel(first='One'), MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second=''), @@ -942,7 +1058,13 @@ class MyTypedBaseModel(BaseModel): MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second='T'), + MyTypedBaseModel(first='One', second='T'), MyTypedBaseModel(first='One', second='Tw'), + MyTypedBaseModel(first='One', second='Tw'), + MyTypedBaseModel(first='One', second='Two'), + MyTypedBaseModel(first='One', second='Two'), + MyTypedBaseModel(first='One', second='Two'), + MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), @@ -1010,11 +1132,17 @@ class MyTypedBaseModel(BaseModel): [ MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second='T'), + MyTypedBaseModel(first='One', second='T'), + MyTypedBaseModel(first='One', second='Tw'), MyTypedBaseModel(first='One', second='Tw'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), + MyTypedBaseModel(first='One', second='Two'), + MyTypedBaseModel(first='One', second='Two'), + MyTypedBaseModel(first='One', second='Two'), + MyTypedBaseModel(first='One', second='Two'), ] ) assert result.is_complete @@ -1383,7 +1511,7 @@ async def get_location(loc_name: str) -> str: async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot([{'won': True}, {'won': True}]) + assert v == snapshot([{'won': True}, {'won': True}, {'won': True}]) assert result.is_complete assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) assert result.usage().input_tokens == 4 @@ -1492,7 +1620,7 @@ async def get_location(loc_name: str) -> str: async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot(['final ', 'final response', 'final response']) + assert v == snapshot(['final ', 'final response', 'final response', 'final response']) assert result.is_complete assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) assert result.usage().input_tokens == 6 diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 98dd811051..4886d00c47 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -504,6 +504,7 @@ async def test_stream_structured(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -533,6 +534,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -558,6 +560,7 @@ async def test_stream_native_output(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -594,6 +597,7 @@ async def test_stream_tool_call_with_empty_text(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert await result.get_output() == snapshot({'first': 'One', 'second': 'Two'}) @@ -626,6 +630,7 @@ async def test_stream_text_empty_think_tag_and_text_before_tool_call(allow_model {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, + {'first': 'One', 'second': 'Two'}, ] ) assert await result.get_output() == snapshot({'first': 'One', 'second': 'Two'}) diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 4d6f9f4f2d..13e6d24cb3 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -21,6 +21,7 @@ ModelRequest, ModelResponse, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, TextPart, @@ -1040,6 +1041,16 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request provider_name='openai', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content='', + id='rs_00a60507bf41223d0068c9d2fc927081a088e0b920cdfe3866', + signature='gAAAAABoydMADQ6HaJB8mYQXlwd-4MrCfzmKqMHXUnSAXWV3huK1UrU1h3Do3pbK4bcD4BAvNiHTH-Pn27MGZDP_53IhKj_vB0egVf6Z_Y2uFPtzmyasYtTzrTkGSfAMR0xfI4wJk99aatk3UyLPNE7EO_vWYzN6CSX5ifJNNcmY3ArW1A7XnmsnMSBys05PsWqLMHZOUFuBvM2W37QUW6QOfBXZy0TamoO5UknNUfZb_TwvSnMEDpa-lXyDn4VuzfxreEGVHGdSyz5oLN0nBr3KwHIfxMRZIf9gi9-hKCnxX7i-ZktNIfTgd_WEmNKlaPO-qjKHPlO_XPKbEfpBdMv5b2P9BIC20ZG3m6qnEc4OqafWZa1iC2szi4eKOEa6neh2ltVLsLS3MlurF4sO-EHQT4O9t-zJ-738mZsOgjsI9rTrLm_aTAJrntSSWRLcP6PI6_ILHyeAl_aN4svtnwQJZhv4_Qf62q70SZQ5fSfqoqfO1YHLcXq6Op99iH3CfAhOjH-NcgThFLpT4-VLYABl8wiWBTsWzdndZoPmvMLEOaEGJOcM6_922FC0Q-fUio3psm_pLcElaG-XIkyn4oNuk6OJQonFE-Bm6WS_1I9sMF0ncSD4gH1Ey-5y2Ayxi3Kb3XWjFvs1RKW17KFXj8sthF3vY5WHUeRKA14WtN-cHsi4lXBFYJmn2FiD3CmV-_4ErzXH8sIMJrDDsqfCoiSbHwih25INTTIj7KAPL2QtIpU6A8zbzQIK-GOKqb0n4wGeOIyf7J4C2-5jhmlF2a6HUApFXZsRcD8e3X1WqSjdTdnRu_0GzDuHhPghRQJ3DHfGwDvoZy6UK55zb2MaxpNyMHT149sMwUWkCVg0BruxnOUfziuURWhT-VJWzv5mr3Z765TFB1PfHJhznKPFiZN0MTStVtqKQlOe8nkwLevCgZY4oT1Mysg7YJhcWtkquKILXe-y6luJBHzUy_aFAgFliUbcrOhkoBk5olAbSz8Y4sSz5vWugYA1kwlIofnRm4sPcvoIXgUD_SGGI3QNsQyRWQEhf7G5mNRrxmLhZZLXAcBAzkw10nEjRfew2Fri7bdvyzJ1OS_af9fHmeqCZG5ievKIX6keUkIYQo_qm4FQFkXZSl9lMHsUSF-di4F6ws31vM0zVLMmH52u12Z3SZhvAFzIV5Vtyt_IfrMV3ANMqVF4SmS4k2qUlv1KuPQVgqGCVHvfeE1oSyYgYF6oFX8ThXNB79wxvi4Oo8fWEZLzZMFH9QEr2c7sOWHYWk-wUMP1auXTQNExEVz22pBxueZGZhRyLdpcA12v8o6vJkVuBj-2eR8GRI7P6InJdQAO9TIBhM7NtJU2NUpeP_84js3RTBVktqBT74nWPaHIddGMSfW2aGmFJovvshhxGMLtN_6XMh4wRKW0IE_-Rfbhk8_-xHKI5McYI048N_TMYOS8KqPPAmGVklRGqPZ5xXMNvQEVweThDTYTo3NoAsS0fN2yMmSwrjRYBHsgYMtil4pd6ddp8dvF_XSJUkW0nF8t6ciI_k47sug3gyw4usqspWxY9Hwbzb4OFzzrgtO_7Ll6lFFFUx2oHy8AO9sJ97Y3Fg6luuew7ZRDzA_4XMrT7mNW6YuT-o2DunaZw-jvQezNHjPN2WhaTS7fkisyhFSFTMBYE-H4psfj_sizutv-LjwbumTcX2mnYE9SZhVr8dL0c7sgwHP1831RxTSSl3ql_obE3ICDooyuM8PYE56Jx0HOOGbEeJd3w91SzNHPG_3SQfXszrZlw4BGWrEUHBbtVY2ZEnsyGNAx6vKO8lz9D-6yZ618foDJSH-Ilk56a5rhr0beWjSd9mYMsr3zpVz6HcpTLYGEgHfPxpT2eaYaC1H_znw7y1eMKamwudYmtz_azX5LrOtwc0p-pXH-kdoNe248pSz9qsmHcXA41fuj2weKQNrmBcghwtfM95B060tnmebJ_B_KkLXL4cNF-hZqi0wAHrHYrZ_WM0Dy90AFH-b7iiWuWz5M1EhZXo179iEdybM-1PgccFJ0zvOqODl7FNxSgWVyNS1k9R42aZx2PzFAfAbBtJ-KVMhUayAvGLNmi35EAT0G6FK65VBEe7A6zPFqzrrAiG8dy3Z0I0253WzIblHPNMpmxI_ca5tIx3u8Za6Nu9rx8mi0CY2jsRSKnqb7RZvLuB78Uj32lb_9jbq5_gL9_y7Bt7U7i7FospyqMFzEYQLvdyrtfNrfY0rB4zr4Mo0tDn_4YOD_d_nP5axUh9_ruqXZ_d3eVdNmlITjQZj8ALe1EfidP8a-Dl62t6STVv8d2y8v9-jy3J7wReLJbJ6gDDnygJllY7NrIVXSjR45FXiCDnpaRonu--I_0b_LRJFOoJUJX0S9YMaXAkKyHSEj-UWjiuk8cIBNcXxwlxnqqNMezvvV113MAOEbfHygDnphzjzZQxteAVbSy0ucGDR2FPi30d6z51NxGnXNS_sM7wnjBMNp4Li0hhttOp6PgvDKPSMAcgUtKLFKE8iWQAvERoUVxw5Et20hNTNXf_0sXOyh0bF0URPGDxSYz9uZI6-nlwVlo1aobdEnn7STSq2_tuTDIrQyfBGZzhv8OB0H3cj9mBs=', + provider_name='openai', + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=1, part=BuiltinToolCallPart( @@ -1047,6 +1058,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request tool_call_id='ws_00a60507bf41223d0068c9d30021d081a0962d80d50c12e317', provider_name='openai', ), + previous_part_kind='thinking', ), PartDeltaEvent( index=1, @@ -1055,15 +1067,26 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request tool_call_id='ws_00a60507bf41223d0068c9d30021d081a0962d80d50c12e317', ), ), + PartEndEvent( + index=1, + part=BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'weather: San Francisco, CA', 'type': 'search'}, + tool_call_id='ws_00a60507bf41223d0068c9d30021d081a0962d80d50c12e317', + provider_name='openai', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=2, part=BuiltinToolReturnPart( tool_name='web_search', - content={'sources': [{'type': 'api', 'url': None, 'name': 'oai-weather'}], 'status': 'completed'}, + content={'status': 'completed', 'sources': [{'type': 'api', 'url': None, 'name': 'oai-weather'}]}, tool_call_id='ws_00a60507bf41223d0068c9d30021d081a0962d80d50c12e317', timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='builtin-tool-call', ), PartStartEvent( index=3, @@ -1073,10 +1096,22 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request signature='gAAAAABoydMLww_4DcIPiCy5zW1t-Dtx57JodsdP9YkyDCvY9r0nBfoD-bwcBl8FfcFxRuq5nK5ndf90J6Xdxmvl9kGVbCUYSFaOd-kWeE4tgwXM8BwwE3jVs6ZMG3BdiWyXS3alnUO5jcE6kuXeun1LufAdZ6rWtJl3zSmqPycnTh9zoQ4cBBxLDq_qcVS1fgU4WjsWgjCZw6ZWRPWwZk8hywmU7ykCLH7SXItM4oH1m_GCEounJAS8YR4ajUh5KAdN6a1wngfGnXhYdzov98yiNLP6Nrkgr--K4vMTTqWXLTcR6fbWgkijanIeKmfSErCjMT6k5TrAbkFx0rgblHbdQii7zj8seV1BWZse92_k4sltxfc5Ocpyho1YSHhgxyGL7g442xMUEibjPCv6kwPMcW9yGu9wPMWsfPYCXpBbG6kQibQPNFJ_bEubwBRaqdSDq93Aqr1YkTYBja7Tewn8UfzZ8YYaGe5y_K4ZD47lfvDp019dOdXmOuZGC1ECRrMqKzSFYVG1CFY1VhjGdPmzobDoMcpZcLn25s1pg6lnNqNQwOk_IA4MvUcCU5HHD5YjmFkEy5-i_iRoDVu5coK0zyEMvPJ_h10y_ByszcfzS9e0ht5CSilckkFdxTBkZ5epp0YIg1e-PrZ790P-I35Ucquam9OXyULV1Y5bn9ohZa93Tv0JZRxUeTDG72_28xRj8tkJaBAZjoCC7VICw39KVmz-ZkuVN6IIX1WdNzyC4d808-2Tz4UZaU42-wxEWDnSDMD7iZu1Bi9fKKwAYBJt_OcEsJwpW63ZaUSG2PVFfm7a3wRcSMxMTUTTJB7L1Keu1hmNepif5tavn3P35nSq28D_IJyAqAgX7ZyROk2bJqjzSE4A0MddqAoBFFqKBi68n49KH09vDtDXIoh8jVWuIgowgVGr8pN3kuhLI9cir4Pr_WES0tPD7yWHPTzrD7OIJCfQbr_4Y4dEza4ixNi0RTADWzMUZBfr7bvwIsgvg6ZNuQlx_d71Go5VDsT2KI8H8AldiRvNWoLyYTFGyK9Kot97YsS5sEmSYgNAH48NU7pgnM0jNDQU1G39nTNFEjL_ziDwjDT5g3jm4S_gbQfwx-XFT3Pv-JYR-E71AqR--Lg71OsASq49rrlULfl5OENfiT-NB6x8MqnfUI6NpcCsOWLp8XfRbgqmZFutLIi43pcnxEe3cXHLWGF77qJXP6dFb-G5Ide7n9tAOoEgfsVu7hCDPEQ_xrIYRdc2DzDPUMCtXBai24E0AnQF8kxsEtlDW_YmAgGNTl9Gx0tFSGdDuUCsNx__c7v-_LOMWycXUKmH3iEr_su83oGIMapNp2PnLccN4iOxspdZQq0C6WBaR6SrdnGzK-0KwRPRoyKDLNWS8zfluR5bIgKlqd3Sbv_7eL-WO4LQXMvdKP3KS-DBt1HbA-gmyFW03iX2smPQbtVmRLWi1vG329R_07-tHMJSO9OQy6_6aiyO8Rgpbl_CHa1Q9BEkI2csonayDJRPvEXBPuk9-NPUP4VLNPB7npWBLlAqes5ZmhagnC7srTL0fFiLGLJiAxWo1f0BBiIlXjwqHdlgBjTw0KryCnEU8Ic8ATzrqEXXhs-FTBCcWInf3Bt5bzUhy20g7cTtYP-VCbsku-lXQ6wceWrfQVFtjKKICD8I4g9QusAIAvgCUm7J2rR3TLkzwOKngdTFPGQrQ1TYzlkA7q_Ew1uZpaPRckMaEioZYC6Sv_B0rgW0nyBJ0GLrB3AUN60hDrOFntyFHp0FM-Zh1SY-GKGBwZwVetOzM0ZAJ-NreFg1XVgyLTYDNjUrYJjRhr_JARsZ5t0pU4_yI6dPqM5jKO5_k4UpZspfQon6d2-NlWX0EDmz6G4CMTx0TScehYHrQZtPzpVnivc8h_pmXV3jO5GLzNeLWoB70SDPTETo1Of4txiEUaC2komu5B7MN9aR4c7VBOTv1NIjoiZcrd1HFACzZ7r1qAE-G38j1f1YhfZ0_TiMmtfR1cqjAKcFkyRM7rZMyMvvnsH7NFq59gFgWZt0dy0aAdw03XWXFNT67lrw58OYC3NcVozH4SKlmleu7TfjHNWSnJVjJ66riLn9DZWVxPeTk4zuISZn0yyaoXcdW8OMn_mJ9vP-8L1wElMyxKbtBRz-0cW7MshmJ3YXmHWDKbnqETSbDMtqcN_QyRJovopwlptJ8VzL7biuURRFw-l63Kc9vKP72Z-QWOUIPLB4q4nX4yb-IV0mkWFxIUlfv5Cze2anf7zDFyGzeU9xG0onfhJE4HFKcoUT8MzfrHZ0dDZtnEYeL5Xem3GuHpwEVGCxRE_J1joTmJfeWxSVnr2Vey9gaPmXCyRrdKS75v9xSXJFfHvcOO8Qp35Dzk-yFqL3dSOJfOEwDZbEf6QnV7VU1EhJvW4XmRS-wsRLMLCYcLrOx96NHEwb2h2l6gNfbCVJoQrMhMg68qBPnoSYLhML2ho7hWkSNZFy61yX5I-oEJV5XdtjFcBkyurmUD6uYTkJSqXyxLexQiPbT-uv49Yp9cAfFBG23sC9lUQ=', provider_name='openai', ), + previous_part_kind='builtin-tool-return', + ), + PartEndEvent( + index=3, + part=ThinkingPart( + content='', + id='rs_00a60507bf41223d0068c9d300b23481a0b77a03d911213220', + signature='gAAAAABoydMLww_4DcIPiCy5zW1t-Dtx57JodsdP9YkyDCvY9r0nBfoD-bwcBl8FfcFxRuq5nK5ndf90J6Xdxmvl9kGVbCUYSFaOd-kWeE4tgwXM8BwwE3jVs6ZMG3BdiWyXS3alnUO5jcE6kuXeun1LufAdZ6rWtJl3zSmqPycnTh9zoQ4cBBxLDq_qcVS1fgU4WjsWgjCZw6ZWRPWwZk8hywmU7ykCLH7SXItM4oH1m_GCEounJAS8YR4ajUh5KAdN6a1wngfGnXhYdzov98yiNLP6Nrkgr--K4vMTTqWXLTcR6fbWgkijanIeKmfSErCjMT6k5TrAbkFx0rgblHbdQii7zj8seV1BWZse92_k4sltxfc5Ocpyho1YSHhgxyGL7g442xMUEibjPCv6kwPMcW9yGu9wPMWsfPYCXpBbG6kQibQPNFJ_bEubwBRaqdSDq93Aqr1YkTYBja7Tewn8UfzZ8YYaGe5y_K4ZD47lfvDp019dOdXmOuZGC1ECRrMqKzSFYVG1CFY1VhjGdPmzobDoMcpZcLn25s1pg6lnNqNQwOk_IA4MvUcCU5HHD5YjmFkEy5-i_iRoDVu5coK0zyEMvPJ_h10y_ByszcfzS9e0ht5CSilckkFdxTBkZ5epp0YIg1e-PrZ790P-I35Ucquam9OXyULV1Y5bn9ohZa93Tv0JZRxUeTDG72_28xRj8tkJaBAZjoCC7VICw39KVmz-ZkuVN6IIX1WdNzyC4d808-2Tz4UZaU42-wxEWDnSDMD7iZu1Bi9fKKwAYBJt_OcEsJwpW63ZaUSG2PVFfm7a3wRcSMxMTUTTJB7L1Keu1hmNepif5tavn3P35nSq28D_IJyAqAgX7ZyROk2bJqjzSE4A0MddqAoBFFqKBi68n49KH09vDtDXIoh8jVWuIgowgVGr8pN3kuhLI9cir4Pr_WES0tPD7yWHPTzrD7OIJCfQbr_4Y4dEza4ixNi0RTADWzMUZBfr7bvwIsgvg6ZNuQlx_d71Go5VDsT2KI8H8AldiRvNWoLyYTFGyK9Kot97YsS5sEmSYgNAH48NU7pgnM0jNDQU1G39nTNFEjL_ziDwjDT5g3jm4S_gbQfwx-XFT3Pv-JYR-E71AqR--Lg71OsASq49rrlULfl5OENfiT-NB6x8MqnfUI6NpcCsOWLp8XfRbgqmZFutLIi43pcnxEe3cXHLWGF77qJXP6dFb-G5Ide7n9tAOoEgfsVu7hCDPEQ_xrIYRdc2DzDPUMCtXBai24E0AnQF8kxsEtlDW_YmAgGNTl9Gx0tFSGdDuUCsNx__c7v-_LOMWycXUKmH3iEr_su83oGIMapNp2PnLccN4iOxspdZQq0C6WBaR6SrdnGzK-0KwRPRoyKDLNWS8zfluR5bIgKlqd3Sbv_7eL-WO4LQXMvdKP3KS-DBt1HbA-gmyFW03iX2smPQbtVmRLWi1vG329R_07-tHMJSO9OQy6_6aiyO8Rgpbl_CHa1Q9BEkI2csonayDJRPvEXBPuk9-NPUP4VLNPB7npWBLlAqes5ZmhagnC7srTL0fFiLGLJiAxWo1f0BBiIlXjwqHdlgBjTw0KryCnEU8Ic8ATzrqEXXhs-FTBCcWInf3Bt5bzUhy20g7cTtYP-VCbsku-lXQ6wceWrfQVFtjKKICD8I4g9QusAIAvgCUm7J2rR3TLkzwOKngdTFPGQrQ1TYzlkA7q_Ew1uZpaPRckMaEioZYC6Sv_B0rgW0nyBJ0GLrB3AUN60hDrOFntyFHp0FM-Zh1SY-GKGBwZwVetOzM0ZAJ-NreFg1XVgyLTYDNjUrYJjRhr_JARsZ5t0pU4_yI6dPqM5jKO5_k4UpZspfQon6d2-NlWX0EDmz6G4CMTx0TScehYHrQZtPzpVnivc8h_pmXV3jO5GLzNeLWoB70SDPTETo1Of4txiEUaC2komu5B7MN9aR4c7VBOTv1NIjoiZcrd1HFACzZ7r1qAE-G38j1f1YhfZ0_TiMmtfR1cqjAKcFkyRM7rZMyMvvnsH7NFq59gFgWZt0dy0aAdw03XWXFNT67lrw58OYC3NcVozH4SKlmleu7TfjHNWSnJVjJ66riLn9DZWVxPeTk4zuISZn0yyaoXcdW8OMn_mJ9vP-8L1wElMyxKbtBRz-0cW7MshmJ3YXmHWDKbnqETSbDMtqcN_QyRJovopwlptJ8VzL7biuURRFw-l63Kc9vKP72Z-QWOUIPLB4q4nX4yb-IV0mkWFxIUlfv5Cze2anf7zDFyGzeU9xG0onfhJE4HFKcoUT8MzfrHZ0dDZtnEYeL5Xem3GuHpwEVGCxRE_J1joTmJfeWxSVnr2Vey9gaPmXCyRrdKS75v9xSXJFfHvcOO8Qp35Dzk-yFqL3dSOJfOEwDZbEf6QnV7VU1EhJvW4XmRS-wsRLMLCYcLrOx96NHEwb2h2l6gNfbCVJoQrMhMg68qBPnoSYLhML2ho7hWkSNZFy61yX5I-oEJV5XdtjFcBkyurmUD6uYTkJSqXyxLexQiPbT-uv49Yp9cAfFBG23sC9lUQ=', + provider_name='openai', + ), + next_part_kind='text', ), PartStartEvent( index=4, part=TextPart(content='San Francisco', id='msg_00a60507bf41223d0068c9d30b055481a0b0ee28a021919c94'), + previous_part_kind='thinking', ), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' weather')), @@ -1122,6 +1157,13 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' for the')), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' cooler evening')), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='. ')), + PartEndEvent( + index=4, + part=TextPart( + content='San Francisco weather today (Tuesday, September 16, 2025): Mostly sunny and pleasant. Current conditions around 71°F; expected high near 73°F and low around 58°F. A light jacket is useful for the cooler evening. ', + id='msg_00a60507bf41223d0068c9d30b055481a0b0ee28a021919c94', + ), + ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='web_search', @@ -2840,6 +2882,20 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( provider_name='openai', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content="""\ +**Calculating a large integer** + +I need to compute 123456 raised to the power of 123. That's an enormous integer, and the user probably wants the exact value. I can use Python's ability to handle big integers, but the output will likely be extremely long — potentially hundreds of digits. I should consider that and prepare to return the result as plain text, even if it ends up being around 627 digits. So, let's go ahead and compute that!\ +""", + id='rs_68c3509b2ee0819eba32735182d275ad0f2d670b80edc507', + signature='gAAAAABow1CfwMTF6GjgPzWVr8oKbF3qM2qnldMGM_sXMoJ2SSXHrcL4lsIK69rnKn43STNM_YZ3f5AcwxF4oThzCOPl1g9-u4GGFd5sISVWJYruCukTVDPaEEzdmJqCU1JMSIZvlvqo7b5PsUGyQU5ldX4KXDq8zs4NmRyLIJe-34SCmDG3BYVWR_O-CtcjH0tF9e3XnJ5T9TvxioDEGbASqXMKx5XB9P_b1ser8P9WIQk6hxZ8YX-FAmWSt-sad-zScdeTmyPcakDb7Z4NVcXmL_I-hoQYH_lu-HPFVwcXU8R7yeXU-7YF3vZBE84cmFuv25lftyojbdGq2A7uxGJZBPMCoUBDGBNG2_7mVvKyGz_ZZ6vXIO0GVDhHdW4Y012pkoDfLp6B-B9CGvANOH3ORlcbhB8aT9qN5bY773wW44JIxRU3umkmNzwF7lkbmuMCbGybHYSzqtkOrMIRgqxaXOx3bGbsreM4kGwgD3EXWqQ1PVye_K7gRkToVQpfpID5iuH4jJZDkvNjjJI09JR2yqlR6QkQayVg2x1y8VHXoMYjNdQdZeP62AguqYbgrlBRcjaUnw78KcWscQHaNsg0MfxL_5Q-pZR1OPVsFppHRTzrVK8458d05yEhDmun345oI9ScBrtXFRdHXPy0dQaayfjxM9H0grPrIogMw_zz4jAcFqWxE_C7GPMnNIJ_uEAhkPOetpNb-izd-iY4pGYKs8pmCB5czrAlKC1MXTnowrlWcwf5_kuD5SzWlzlWOoKWCeBDOZuKTDVJKXh_QCtQfftomQazDFCiCSgaQMuP7GaPcDuS1jdQoMQBcFfKuWoq-3eQBOCiEOAERH81zR4hz1x02T_910jGreSpfgxSqt4Td0pDDSmlEV6CwaUDQvrPc67d8_Wtx8YKv4eBH544_p1k9T8tHo3Q7xvgE37ZCdd_AVhC2ed1b5oUI95tM570HAVugFilcHJICa1RbFzIlRkNgI4k2JvsVWtD5_h3x6ZaEFTomwIXlochYgsegh8RJIRRCNKO9ebsvTrkdl8n1mb3hLrz7puwCkRFyUkxYBGT9zUjuKrjp_IjTvvov29v6pwYHg2Xd0nAfLP4WWWPBLNx3oV1-yOfXStRGHMZTB6iN9d0Bxi2QS7dk-rPPXml5HxrSo1TG06EdBXQ1VgrkWIxG1TF97-gK9oWWT9S5aaYKZAOdaqDvi7qO8I-4VwExtIq4Do3BHnWrgKNHfyuAobQK4H_CFMElYibJHwA9t-UGujMic07AxS-2XjXaCtjf7LnW_aXE2rQDqzHiTiLmTqT6jYHP0WHGSqFTOFkNmzqy6uVfU-TbdT91zDBeesc8XpzCXWBVKqxEzuQGdJrYk6ieZaxL76Kjs4jyo838LMJCXzhcF8enukz_llnoxAV59hTDAn0MUQvstGlDX0ToI7C8Oc0NZfZU5Pi4gs8u0He_Nw5UsoV7sA-jk4M45sFt6g3u00kJFP3gIcdvOzHcRK5z3Sfb9JF0bnvIYSbUFUidEJxSOAcRlxofOJPnkPtWCYiiv3zSVxZXX77-wtc8yrOYFzH1k_8P6CDpcfzOW7Yl1Tajgcm20nygmPlFtXF3RNFPztW1V5GwQHc99FvT4ZAex3fQ_UBDKyXnyGoySgpZbHQIvhzUhDEGm77EiYw5FoF6JgnHGGUCbfXr2EudtpbGW8MRHop2ytonb8Hq7w10yQSginBbH_w3bwtd7cwgDKcp6wIPotjpEC-N1YDsRqhPuqxVA==', + provider_name='openai', + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=1, part=BuiltinToolCallPart( @@ -2847,6 +2903,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( tool_call_id='ci_68c3509faff0819e96f6d45e6faf78490f2d670b80edc507', provider_name='openai', ), + previous_part_kind='thinking', ), PartDeltaEvent( index=1, @@ -2945,6 +3002,16 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( args_delta='"}', tool_call_id='ci_68c3509faff0819e96f6d45e6faf78490f2d670b80edc507' ), ), + PartEndEvent( + index=1, + part=BuiltinToolCallPart( + tool_name='code_execution', + args='{"container_id":"cntr_68c3509aa0348191ad0bfefe24878dbb0deaa35a4e39052e","code":"n = pow(123456, 123)\\nlen(str(n))"}', + tool_call_id='ci_68c3509faff0819e96f6d45e6faf78490f2d670b80edc507', + provider_name='openai', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=2, part=BuiltinToolReturnPart( @@ -2954,6 +3021,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='builtin-tool-call', ), PartStartEvent( index=3, @@ -2962,6 +3030,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( tool_call_id='ci_68c350a41d2c819ebb23bdfb9ff322770f2d670b80edc507', provider_name='openai', ), + previous_part_kind='builtin-tool-return', ), PartDeltaEvent( index=3, @@ -3048,6 +3117,16 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( args_delta='"}', tool_call_id='ci_68c350a41d2c819ebb23bdfb9ff322770f2d670b80edc507' ), ), + PartEndEvent( + index=3, + part=BuiltinToolCallPart( + tool_name='code_execution', + args='{"container_id":"cntr_68c3509aa0348191ad0bfefe24878dbb0deaa35a4e39052e","code":"str(n)[:100], str(n)[-100:]"}', + tool_call_id='ci_68c350a41d2c819ebb23bdfb9ff322770f2d670b80edc507', + provider_name='openai', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=4, part=BuiltinToolReturnPart( @@ -3057,6 +3136,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='builtin-tool-call', ), PartStartEvent( index=5, @@ -3065,6 +3145,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( tool_call_id='ci_68c350a5e1f8819eb082eccb870199ec0f2d670b80edc507', provider_name='openai', ), + previous_part_kind='builtin-tool-return', ), PartDeltaEvent( index=5, @@ -3085,6 +3166,16 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( args_delta='"}', tool_call_id='ci_68c350a5e1f8819eb082eccb870199ec0f2d670b80edc507' ), ), + PartEndEvent( + index=5, + part=BuiltinToolCallPart( + tool_name='code_execution', + args='{"container_id":"cntr_68c3509aa0348191ad0bfefe24878dbb0deaa35a4e39052e","code":"n"}', + tool_call_id='ci_68c350a5e1f8819eb082eccb870199ec0f2d670b80edc507', + provider_name='openai', + ), + next_part_kind='builtin-tool-return', + ), PartStartEvent( index=6, part=BuiltinToolReturnPart( @@ -3094,9 +3185,12 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='builtin-tool-call', ), PartStartEvent( - index=7, part=TextPart(content='123', id='msg_68c350a75ddc819ea5406470460be7850f2d670b80edc507') + index=7, + part=TextPart(content='123', id='msg_68c350a75ddc819ea5406470460be7850f2d670b80edc507'), + previous_part_kind='builtin-tool-return', ), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=7, delta=TextPartDelta(content_delta='456')), @@ -3313,6 +3407,16 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( PartDeltaEvent(index=7, delta=TextPartDelta(content_delta='854')), PartDeltaEvent(index=7, delta=TextPartDelta(content_delta='332')), PartDeltaEvent(index=7, delta=TextPartDelta(content_delta='416')), + PartEndEvent( + index=7, + part=TextPart( + content="""\ +123456^123 equals: +180302106304044807508140927865938572807342688638559680488440159857958502360813732502197826969863225730871630436419794758932074350380367697649814626542926602664707275874269201777743912313197516323690221274713845895457748735309484337191373255527928271785206382967998984330482105350942229970677054940838210936952303939401656756127607778599667243702814072746219431942293005416411635076021296045493305133645615566590735965652587934290425473827719935012870093575987789431818047013404691795773170405764614646054949298846184678296813625595333311611385251735244505448443050050547161779229749134489643622579100908331839817426366854332416\ +""", + id='msg_68c350a75ddc819ea5406470460be7850f2d670b80edc507', + ), + ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='code_execution', @@ -3841,6 +3945,16 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r provider_name='openai', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content='', + id='rs_06c1a26fd89d07f20068dd936ae09c8197b90141e9bf8c36b1', + signature='gAAAAABo3ZN28TIB2hESP9n7FpWJJ4vj1KEPIVHYTNh64J3S9rOSRfmmTK_uSNB79wwlv3ur6X9Yl9sPe6moHK4nud8jgeScuOeCDq70JGXZ6xH_NBdiDWzeMis1WIDsyJrADdADGQRhjb8sXi6lz3nNvjeqXD-oZJkxTJ9FeJsCNNPBHX-ZYRIYZ7vGKLPfmi5qNS7V6VVGvwEWOBwW75ptObu5E8g2TqhPlUzsVoZsIZiczRXq6zQpDtMPAtv6Mz8puaq-o65P5-vZMywmEjyi0Dd2M9ozUfhWfhpEhCsAiItesA802-TSBQCKeP62riRAMJvfD3PEGLYL9d_7mUvJYSsiOADU0K6wfI6y8bRL-UaWUvn60KfPvqfBFm9-hwP1NS77OKoZABIuGz5sc3BuAh6ebKrJkfNHq7W0BA09S2gt3wLPzflpVl-wJ74L9UGnaKpmG3XRFogff_SNgDhO0_Cb4-1PYJi2NpqnCwTG2c8EFxXiP4trdynbpgRD5hKDj65FU46cBjR0g00bCShqwsseAzw_lAxbawcjF0zmAyz68Km2jCRKHRGgeMpbT-YQWs04IizKYsWfF-8pXX2vwSqk3Kb51OysuPN0K3gihF9v2tPnK2qFzkvNics__CDabCmafEKQlLp6TDRc5RY4ZcSHNwUM_dybJStzoH6qed1GQNt05wBhDZg39N7pJ8_dG7wXCSGHY5CRORZm19UGTd9DoZMzr8JmtxmRgJoKCHW_gavpt4__zifPVxqLUWj6GBaQRT8pR_Tym27HcsC0GbHLR1nel9hC6RzydTU5y7LWY_NoGUE4WZX5rHe5t73lFNSMwd9-6i9Qlj60_rBZ5z9oTAl_Ksywgo68AG7dFdSeI3VLnOyzhqeePn0ywaMp3HqO-FIXW3fjqtM2XMMMMn2Cje5rZhJ9JNmMqnxpltITkVdHMo7Yr1WFTkwLByEOb3M4LCq5B3dM1s1pVmqWAc9YNjpB7Fbi6fG90EAYFNEM4ubOE7y2d5E4hco0MbEKg-Fh0ubh1I2Y1kthZFEmPQLm6fFaljJKPtYojEZZ2cZ7sN3UaVg8Zpf3A7WS9kM2--lL5LuBnVDebf8Xrzv9dTmJvOtwWzJsY4RxWdnzfl_ZokHmg_HDNbeZpHsVI0gqHGr7YTlFJ0NUXW9mzZMx9e_VTrrf34XwRue3xVCqzsspRMjMIlAoDp0Rp0L2tJWAbKs_btqVpqjz8p-64CzSRq65BmSP6i86G0cJ9WLSD3gL3wR-Zt2HyvUvecHVmgKhXgY3F-RchYRO7TarJgyZY5bP2EEpHUwSWx4uWjYfzXMGYn8gNwgwl89qog-inK88qSG0DbqJQPwYNuRjS7Mu01O6eV39Zu7Njsn2io-kPc5HLRrbbhN7qCSki8yPWE_7yPtbIKlwWKOlEYx8_SGgE7waBFRem7ElsE9wvCX5KknilmN5_d9L4Sos0oT5NHAhApvVVDcygz9VGYBAmWfMOynDnOiTIpsAdjHmuZG7GJNAtUEYx7U7pNqbD2FJMIeN0L-3uqhxisRzeX64JZkVHWYL8HjeC1zHiUMZXKW1KXIvIU2_BCtqay22FtBskeMXZAReKhv3eX2oQlWL2Ps9VOk2imzjqBbFLzJgDq0iFoaHdOXGqo54GYZIxfWi10uo65s-3gOGmqPPE02FHEMjK7VHFjMh91FPhh8TmpWjOfa9QEcpEHSZJ6ipUMTVfRHHHshB6Sb74x-Jfr6Ioq2RnWd3E32GpE3kd1poqOssBi5jCqsA86tIMt0m8p_CDu_ANvMNKTiGTQdejm2rUhccpdbp8uLBPnqWxyGOCTlREglHPeh2EzjEMbtIaFp2NhHE6UlJ_nw40CDa5PA7C4lgUkn-4KtPy6rSaMu0mWM4vPO-5ksdtB3E5PkCdIB8j7htbhZH_MTv9RL7loDNkRVlJRSBiAC_qCGgVPyP4l1w4imdey-_HuVCKBD2vaXUz2l2efn-jLSlhty5vBOR-kr0EsU02_NYZtOKgBR1zIslAlnhM8lTxJWH4osSXHa4fIx9O9tyALjvxhooYww_Die_8iCH4u5cF53z3mvoK3Knzeada3jglwQyL3_uUQegcFKpvZwVAcguVMvrsbNgdR9VeKmYq8U7yBvziP-_vpj1UZcf3QxlNK_oOgDg9lxP3vsSKzxliW422svFDiyPkWPh1DWmry1xBD4Pldemf8OEvgSHSDAlegWoBnfOHljDcPf6kT0PaC-jHrKn8t1cQgWk1-1oxiW4zKIlKGoRvmo4lCcUfqGXb5EPuZM1qRFWxv4roAVoxdLV0Pz53L_Q-grQWvbKH_Rl6Dw1BysU55Klt8vn_XBL5Zw_UlbT9FrszDRjJ56F7zElzqVYunI5uJaPWTwQyO-4dvM94CqiUU59iFkfZqaSulYktZrgZeXe0lw59ecQnL_pR2xwkialTgDoqtPksIjTuWVzkiW9hIL5t9sHyCdJ9nqmwZRZU-JuTPXswmrJEJ23GhvtH9kWsswLd0qvmY5mV3cwr7hlFNWEf8_5e3LoCa9uHQgIa0uquekJ3St9dLOXpkcRv74nCpxkcjems_2ZC71DRU63NILFjKC5ffsUPOZ4NfevDMUDbYHdeyVV6E2f-_1yMYCWI_sws69fWQkWUIv33hk7Gm55NaNgLD4RYCUBTO7v1FtEZiVYAU5ab7NvvnTJ3FaEHo9G9eTzN1I_MmPzqlYX539YF_DDedh0ThnSoJl7PYD-7LhRRG1215KmsTWbqDGmtTsHePAVRSh464XHgiZ6cNPNogtMl4ym6r6nsMbzFP2krBR1f-u0tHfQFxAeLyBWij01Z1WBz4GBh3bpdLrB85AlvFeY7R46PPydAHxwwanYVyxpS0UmS7Y2S37EVRdFzai1izvoy3-wA05YKcnRiUKR-oMcLf-BmB3HHZnY77YOuqQBUZNI7OR8B6lvTARQuoJbK26ONmXEsH-VoBJR7C-hNiXMVh1jHfhuaBAj6Dg9g1Vs2kGxfoJUXB5dlFmR42mnyGcT96N8ZAIdIoQSrBzai6bQbuvOb3OAcG2lEhOZHZiwFRCzpHMfu5dctZ_wcTUhYZwgOcBNIo4WELyjv0Yx22AHSHcrUzFezOwibs-heUF_ciKWkGv9OaabaAGTaTVncfCnS7rOcD3Xum89EAVegpYiQzK0DZ_VKooPoddgHs6diYOEn4iJyvE54vaVi72NAy0Tf9poRlidKaM009FImefEtZqwD1MmaeVbjcClv5Xwyh-KCQ2hCZmrnJ2P_e0bWIsE0MAJOK8iU6Q3zxbntbZAQAKZHqqauT8kkRYxk6oBicV5BS-whqDN_GoNZrnRLTNkjk1a8mnqg_kucvC1mCQRbvP367DYqZGuAd2EQWVLSBQibHoVIUcYAFbsfRHfsQ-uiZVZsjZ-xGM-ZcTzCJ6p-hFi9IQXKqOioM_xzRl4TSY-AEbGja_RY0puxi8BeZXvSxx8eYsJ0TRtIIQwloZzKpbx1OwyK-Ibfj01PU5NIurJL10PKXcnc7ImXN-b_p8wfzEVN12lSbQ8m-Rs0tx32jfvviXyHtWYfHuNqP0eL3Xjuka6FGnuDOeOAIzy4xj1vqhXd8UN2tiFOObl4Rza5pKzF-0IcEsKX36v4iN8oYxOoCxCxLwvFw3znYiAKe6CVky4e46LxZOI3bGM6MSrypwblPMA2gC_ogfMiYViJe8gsgld9UvgQaFfj0EEgfc0BWfxVw2i6Yv3OcH3T1jaHnCVgvcDpTXI4-ZeeWKl6fhH9ukYAG4-Y2mGiJhxJ7cjSg8CwU0KDmNRwoXGB2FT0bKWovkcFYM5ueMbXFTZ4FFcgfWcOzXFZka82HFB_iqD1XvOYMFQNiz3jdtuOr8o66rtCVAjJnuoTQDmbSrWPU0-utUMJx-4QAlZM8hdtXGfNBp0JRxctMZdxR4BAzF7JH_ETYi3itZkgDLEs9JBdty6gUiM0NdR6F_7mxsHCik3rpb5bauJKP89gV03mnBQuSUQTauNxdzXqw55SPDAHMBWg8QwyffzWwmyTAjl_R1QiFsTOv31U-HditYAeYMhLAP0mIs97T0inLsTUri1s2b1s7j6-I-NLXuT4VKiBO8lqVicTbQdQwiXehHQsi18e0H6T9XM0xBQK2t1dd4Jz2oLUGroSB3XuNbcaaxsffqRQgk43KIMEw9VsUA3FOTEpdM_xYIYEFM_-ApjDQJ15JyMRspfmu7HDdd-ybcXZ-C8WASJUPV8tFEfP4xgUcZeu-mExkryebbdMExq78yj7GlwWaeqBYfEXsvG6FIOqL9iFVcc3iIelrly0oM_xJmLOB_CCkGylDmHLxZZydf5v0RDh0KOXd7J-QYepcALXYoXmToj2JPrJPkaznH-2tI5xwp_M-mktoYNOhWrOepFjceXDSF5G5ILomGd9mHLnkq514ayZJCeE437I2geH4s6upgSAaqc07IVvdU3WjorhBw9fvefI5NnYwMiUSk_LC-JiQZDJ0bMLttvwKDx0TmOnMDJqxDr06_MWXn3i0zLQlAjItS2foksr6EMeK2InZznVZtgjcbD0exqZuzjCAqKz4PLQl62xyuJx8trJe0uHbQk-NweJthN5xcj41kJTcDuXbA1bA9HerCBWMX0RW3RXAKTvltGaqyMyUsJ_uOb40D0m56SqOmxnyA-mauiV2R11KC5Hh7YSS587NxkWUx2t7G9uio6WgWyx-HvhXYVi8wejyZw51z70YEa-aUDS2G_N0e6BV2B6dMGyd3lzTkMY6Ncs127IwQmXkV4VGL0stfchFf7rhXc1CZmFm7NZOMQPgb3_Heb39gZfMa4EYUVLuvfSpuM8wHZcQa57_uj6wmGp7NBBVpcgTee9ADvJXxjlmAj6gm9TiCl_GYbBLCdoTRAgsgsy1r4WijYr2sA_zch6EbDpTjQy6ER5GINZ4zi0VDy9avZcxhGmOEHYvKzcLB5PANOAW-8FLFHGgDWvf0cEMCD0UpSLAJVIX6rMjMJC3N_cgWmmv_zbllaW-vDVNFPyZOW32zU-l7r46_5IuF9Vc5choUlWOGLADSnXReau9WC4rfGF05CAvLe5Q0dex4K14SHJTEJuBWhGTaaXzONQSGtU9LJexoI1ijcnz9X59VvXxFX0oHmLvgTAim6nN96X5kllHFvrdDjMOiZKQTXtodUI-3ZcjfA5booJk7tnFeni0H2L1sqvpGy8JDlfl0fds8hST0vtXscfD5jDC-i6btLnRgpOpRDQMebCkqRlisZScBXb0nxoHK7CHtnQy4aCQq4oCBgMXdbwHOnbBygBSAg-HCpK53YoT-R5NUdESGmGCX5uJ0qlmGaXSshFbNW_NpQItJIrD7NW3VmqfWvSB1VL-nyVLOmc_wPmUhY7dSGArYKYQFKL4cBOSfHHHuftrRXy356_mTcDeFsHzqH3RXPaXhiad_lmQ9Bcw0OD_BotHvYfvVCaETpweH3eHl3RPBiUHlc5Da4nprHbXrvQL675qwVLiwLwOvPULU4VdGU-jIfSMkRUbJhSt349C1poj4aM-aD3s5iJy-3YDRYzmqMmFFr9CoKMah6hmn6n0oKSwg0YpLOc9JRDhBfp87_NNsWdRkpNw_DC7OaIF6VNxc6o2t9jExqmAiAbyRSkW2x-UiZl6kbB3uqffgAYWNylgJDZ-UPQNki30zURQFl1anKa8xhIGOgH7piVerG2LO8X7pFxa3DlYxFm37HC6irFtBwsFbvNGicua6MfUD3dV2MhE9x-sOlG9O08DKObUwBTpTzfAe-P_jGWHnyOsLXbaiV_cwxgWkEw9rKuFpI1SPuPrdO8_iSYdH36TqIREPLVbRcSJvHrsWP2Bf-Bb04SIonHV4Olu9KEYWVCOltRx7JFjp3eVQZLAGwjtxG_vDlublMpybM6TZdg1UYaCU4ZqLKss3iWO3wBNwC2usITNSjaiiLSH96fOHpAyXMhhodFDS9X-frLB46hilqE3PwoIyiR5R1dAdM7oiWa5qD6KH_dISw5H-uO6ZrUFo6i14E4RcCtRBBKALvVnApLxA_lcpnFR9_TZkstK-6klIEiSttNhxhHhv36XJw_J6jUTHnxRBr4JyXLL3-NmDZy8mplsbS4OXl7gg0vuIOBBHarKFvCEdvZv8ikxbDeftTz2je9mrCNCAHKTeNQWKf7Q7HFfPcza_BwhSqrd64DndvGVkfLlYBrbVSZp5nxPF13qBWIw9bbXTU5z8Wna72Lh4HqL-cUDsKbKBpst1VuBgaA7Va', + provider_name='openai', + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=1, part=BuiltinToolCallPart( @@ -3848,6 +3962,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r tool_call_id='ci_06c1a26fd89d07f20068dd937636948197b6c45865da36d8f7', provider_name='openai', ), + previous_part_kind='thinking', ), PartDeltaEvent( index=1, @@ -5134,16 +5249,26 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r args_delta='"}', tool_call_id='ci_06c1a26fd89d07f20068dd937636948197b6c45865da36d8f7' ), ), + PartEndEvent( + index=1, + part=BuiltinToolCallPart( + tool_name='code_execution', + args="{\"container_id\":\"cntr_68dd936a4cfc81908bdd4f2a2f542b5c0a0e691ad2bfd833\",\"code\":\"import numpy as np\\r\\nimport matplotlib.pyplot as plt\\r\\n\\r\\n# Data\\r\\nx = np.linspace(-5, 5, 1001)\\r\\ny = x**2\\r\\n\\r\\n# Plot\\r\\nfig, ax = plt.subplots(figsize=(6, 4))\\r\\nax.plot(x, y, label='y = x^2', color='#1f77b4')\\r\\nxi = np.arange(-5, 6)\\r\\nyi = xi**2\\r\\nax.scatter(xi, yi, color='#d62728', s=30, zorder=3, label='integer points')\\r\\n\\r\\nax.set_xlabel('x')\\r\\nax.set_ylabel('y')\\r\\nax.set_title('Parabola y = x^2 for x in [-5, 5]')\\r\\nax.grid(True, alpha=0.3)\\r\\nax.set_xlim(-5, 5)\\r\\nax.set_ylim(0, 26)\\r\\nax.legend()\\r\\n\\r\\nplt.tight_layout()\\r\\n\\r\\n# Save image\\r\\nout_path = '/mnt/data/y_eq_x_squared_plot.png'\\r\\nfig.savefig(out_path, dpi=200)\\r\\n\\r\\nout_path\"}", + tool_call_id='ci_06c1a26fd89d07f20068dd937636948197b6c45865da36d8f7', + provider_name='openai', + ), + next_part_kind='file', + ), PartStartEvent( index=2, part=FilePart( content=BinaryImage( data=IsBytes(), media_type='image/png', - identifier='df0d78', ), id='ci_06c1a26fd89d07f20068dd937636948197b6c45865da36d8f7', ), + previous_part_kind='builtin-tool-call', ), FinalResultEvent(tool_name=None, tool_call_id=None), PartStartEvent( @@ -5155,9 +5280,12 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='file', ), PartStartEvent( - index=4, part=TextPart(content='Here', id='msg_06c1a26fd89d07f20068dd937ecbd48197bd91dc501bd4a4d4') + index=4, + part=TextPart(content='Here', id='msg_06c1a26fd89d07f20068dd937ecbd48197bd91dc501bd4a4d4'), + previous_part_kind='builtin-tool-return', ), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=IsStr())), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' the')), @@ -5198,6 +5326,16 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='_plot')), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='.png')), PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=')')), + PartEndEvent( + index=4, + part=TextPart( + content="""\ +Here's the chart of y = x^2 for x from -5 to 5. \n\ +Download the image: [Download the chart](sandbox:/mnt/data/y_eq_x_squared_plot.png)\ +""", + id='msg_06c1a26fd89d07f20068dd937ecbd48197bd91dc501bd4a4d4', + ), + ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( tool_name='code_execution', @@ -5465,6 +5603,16 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No provider_name='openai', ), ), + PartEndEvent( + index=0, + part=ThinkingPart( + content='', + id='rs_00d13c4dbac420df0068dd91a321d8819faab4a11031f79355', + signature='gAAAAABo3ZGveBi351h31WQM2aG_dbN1N74J4X3Lf1SbUrUhElKaT5odbh4N1liwG5Hjip3Ci1illQSsd4n035fOOIV3sZzAMvV3ypncux4WDBpQ9NbeuFMNSyNOPTxJLg4j66UbW2ptw3u1VP3j0vCHvV5MoDhErheYZsWKhYVtkUNkKSVLWkS_yK0pOltSwHfRy3tbrkxnqD99BuVbCjV1nWSzTAmJLicBtjDaH0NjjD_vMyFiUe83-eZRs-Q_6njWasZNCmTcOq4zlpFoJ_AGeaTbaLIC1OwDV3sNT7pXvo7YI7jmsYEhHAKa8BjZmMjzBPLDRu9TMWtXMnO6nyVYqMxsyQPdNmP-BDNfr_8Rmo_uI5egfE0qRKgAc5MrOGd1fSgtUqeKah3kbLMyCD0_-jWmVInb2Y4LfPcX0iOeTGum2IRKwy6G1tdY8C_gEOnIGAUKOT2sEF98Ythy9auV27BCbcjfCBJlH0rOir_OiQjUIXZqqY0My1kVENBbXj2-VIFIqG-CcxCldFG2Mq0NGo86h1igQIFmLItXLPTS_QnaWADSD9La8JWpg8CuWg-yB3UqYaG5_f2Cl5jRDQdIYavTBvD-lp54y8aEnGA6HksQaCtB7jHX0ZM0pqYu7LvLjeHxAJWsnF4NN0HPz3d307muS0TtrXUxqeZFdTNoqdBOxfuJ2-Ym_LmeubnEzh5wHAguJKZ6S_jcEFM3Jdb1R8Gk9dv2y7BUz1hKSFF7peXc9Ear00JjPHAlR1x0ECqONTSD9Kda80pQlDSh05ITKQ2viOy1jmCsWeSsll6EJPcGEMfAcZ1UMgHYX3sBa5Oz3DS28Ur8yk-I62nUWbcj8n7IsZmZL0CWc2qgCtj2TzFZaVEx8iumUKpU0hmML_kF3JPH2Ie8nB1ko18HZV7A_-n9XGDZzwsfPD9pu4P-fb68KNqU_qQBfe8msYvuFuljC-0kyGrQIQH2X6stwEkyme1TuJfxIZ2t2q2l02gEUVN8LN8qX98hp7DBxXepgdKvqWVOM7icvtW0mPACf1b4izSDqEgqhqx4tNsjixoHcM9M8awzss_y2_jZ3V7gY3pbPgwWKHyyTUzA1ogPfMkjxxUrVLNyHRPmnklUeQdV-vytip3BzNOq4yTUz7jVFrudSDcr_KM6Ie806OkgKF81l-W-40qzx6bGg2DAcZf5hfbTzk-ho51sRBwDp7RJrx2SXSBGXA3ArYzgq-2iat368uDLiQhhbunzKm3_6CFWggpbUO8Kp3FP7-k4Z4CRbHkg8WVT0HhH6w0ysoi-P6_ZH-IKI7XG-GT1kq4yje3qlfRUT0-0_LPsr8LyM6AbOYj4NiWHP3XJ2qa978VVOLJQtY-qG3VX9kMq13C-uU8PDOsOEidYZl2gqFtXhxkXivwACbLMnvzJayXJRev1QkoNxIg1Stl9II4D_ndHfNYeAvMvOnSNafoCOmMzCBp1klovMP_31YvR2B3af1TYanbbHoJt2UR1GRR_Aqr7G6RukNkXAl63LPlDQSYm5BB6zD9iNX9hJ8MSZ1IFIcbM0L32tAWsyKKAEWyr9MGckicDa_hES9adeXuunqqKhUctd94J1dsXLiWCGIet57YIUj5WoF_FQ6D6FY9rB00KhCDlHr1Ot5NCMmn6y-u6TYJUhpl7elEErYGXaGPhUtKUSbAIzOXzBIAKb_MiMVvo6a2VYwsxwZV14X8TYkKw_Y7w5Wt6JA_wTOoen7Cc0eFyc7FZA4NjIMkIUOXymtjzOSkFJz1eMBqp9diET9VYKGsn6GxviD8jWM6-RCWcFurewcn4d6TeTclAt7G_LZrJ9bZtMVlieSJT-3vWr9qVt8OGBUJEJRVOzpr5FBnEceqK8s7D_s8EZwTaGwyAuuaZThy2PNWJhpE4c0UeKgh0ec36Q0ZRN8DF8Khne8Epe1rehOrsfeyFFRuQ5CDGdHimhtOAIbDyg_5PPCp8fgiU4R9xqtizCVTR4ej1VPIClmebUErOl3TN-IyoSc8rv--Vi0ATn69Q8tSPweI07KVEzRJpDtxbnGcbbilPN5_liJcQrLMf5ikaWBoq42s6FXDjr-ASD0h7IlNGHxnN8q__iO6jA9-2PTywI2bbBJsie2L7OaGGehO5zv_rWv_6rbk4HLVcQafi2nC5w1GNeDaXWSz0RjiTfXxjBh98302CQxiM-e1Lvt1Pe6Mqv-pAgXlFrSHDrqw8s4NSS2YpLDTUIOcOx8UutAJOgVpyZm2sQcvtOsGsSUBIyNI_4huseO9EuXF4TUQ-yzQRsimtXaDa6VId0y6qG7dWxTP30SWZkft2iW2_Nz_56MiioY7xACIjzo4s2aGLM352ufd4nEeU-K3UQd5hvhdIWUZn6KTyCUnqgChyIlB0Sto24VwIIj74DYisSiu-d8EYsVr5gZaQ_NaW4T7M_ZB0TJ0ptlU0X_h8uLu0ro2Vc_s7D8nkIKSzhGuuHO4lOjvZ-qLsPxG-pBa6jGvv1hOyng_x99icZ0oM7G7FmDl7SjP1pdLiZAA1hMPPU9b8Uk0j8hb4AFtfoXSfwZBQ8sYlT0_QmcSBgGxfZKXv4RcFSnAEGDNUn1V-P2uNoj06MOwzroZVjTuzVy284Hqe-08Gtt_bvZDmfsHonbEw5DrthsP9SzoC62hc6pcVs_ApQE5LwHgODxT-oejDppixNCr--hJ1IYVj4rRsHsmBv33H5kJP0rwmkdJ-I8rLj66jLf_Qu_OEh02dJqf3XSYsG7io3XCVjA-d-jUhLJSqcPS_3y5thCtWUcG_ucT64ADWdtOH0EkmzN0o7HmOJ48pkGhttNScjXlQUmOdkeBV55dTdXAzAyjKZsxP5ZK1F9m_1TMWDJX6nT4rRFrzv3PQByEyc3Rje7ZUdGa3Qky1-T5uhu1dk4ty_I92CbMDCM-jGZorhg5MX10B_zZ03DFrYTrdcDILS5i_BSOlGT8Du4aSMvwvUC5FLOYQFQdM_ZNIRIGhOSWsvObmVYh0j70YKqitDudSIm1V_Yw6qsW3ZPpLDgBju176FVDJJBn1Wx-DeQ6FrYtOjFHctqJN-2mjWQi_7lAzKbTLsB-9c4iZ4_efWXsHncmAeqvt0gvglQHDhY6cM4yZurpHkrE-lb5-vDLYamv-Du7Cs0pAaynEcbT-f3_F_WOgoXFy2WYOTt2KkSQZnW6ZPHzl3gfVOsHfAkWalMJ6vXa8FuoYfMmgZJpqtee5J6AxJaUea8xQ0VlVwuXmcK8EOPcwF1pWg8w5_SweA9jZ0fh5PaFW-BNlzGDmhRR-8Up0TCUTsdnZN7bABJBlxeQ5GEcwOjgT0UBF0_zXZo5fbk34TSDoEgfdQydVLlOGda8McmvsnNzDSq77a-Vj_8BeVacM1PPG9rp9F_-PQgpM7_7YsNoWMXha4b4_H58q7vPOvMK1zxRzNrq-sm9QhQ1LkzPgt158Gf2IPq8D3rh9YCmJvg1Ju7roShfnVdV_UO73MLnDhoqaUZEdq10723KFpescGNTRpsuWDE8qBiu58rbOzjmpy7nJfuOtfrv_qSjaFRTkShLV5PW2neHjNLlvQlWy-q3yjJXq-2zM-iRehbFIxI3ATcCq-SgThDeQ1qnTg9G0Jtsx3qBNZtCIi8x1oVsyavVJcqvo36UC-IXaXA1vpjuwER1dcZ999sP9MUnXcMTO9ba-GM3dslKvDtuZ5b8x_u7eCJfawzUPItU8iwISYKDWW8wTNOS8Iukujq3-IDOEFqmCOAlkdv6-AWNc7ZVOmyvvgDCpSN5nSkjpJhWI5kP13FJtJNHkNtP4RQkhRhwRh2ei308TvNgT8YSaa4E_BJ-QWQ_9PMNBsfAYSGIl1VaQinZF0qdvNhRIlonuZMV58aEEzsLk6hS7CGlbFwBMwAzZ5Q6PANavXDFiPGeIadxTE4r-iZLQ3CdvJWUiUv3AL3lzYraXX8BGDpEVAAIqoRYZEpR2QgIUui5b3gkCSlG-YdKqJ4HZ_6VCFqpywKsgPCX_c8pVD_6eJhgt9o5Vc0ARsfc1IG_XC-nFWOV4caiARMobX0y4qXDFulrAZInqBZ9Pq5MmbbhBmLLdT-y5fdPpB5UxsIHGqb3pip4ZaKS80IqAt8t7HPXSNza7zb1TwrjNlYcO_KhbLQBB0hMmKULnEJPWLDPKf_9NeAsN3U9AWyj1WpAKjSfpjjbXn37qpTMdgd-Js9-_FDaXDFH_aOYXI0GY1AMpvSSQzx_f6Erq4qyS5TAuAtXbvUm-iVJcHaZTIy7buGJqOUBb7BC1L33KpeQEZuCg6QyAdzn4bZUKvwjXuxNykpZA9LZWaFVdx2QfwCV_yqN2TTvLFmSj5SjldGwbBndjmtHs5kkDcV2mDlm3huEfbEJqf9sdxXaYhIfmUIkFDtYTpE1C0qSol-A6Yagtx_aNfWTL7F2lFI0OusuBwnDfkNow5mPsKqGMIqx5eJA2InLcpV7GTyCxT3BjVsggtSb1-4Zz2TYzBz7iYe8NPe-rxF6XWyHf1N0nyyCY8Y0_CqJS9OPFpsd53a6qY7xlhh1kwBOM8nJWb3OEJjMVspTUfwF90O8D9fDNS293vnG8SArU6d-1L4u0LalQbKXDRzcze8W8R3KWv1N0LXrWwfArPrO1WnpdEkJnbFfc1eUHqThJ39c7RAInK66VtNe5xtUVzuNZDfPKsIfD4Ms5xqMKEOWQt8RIciRapDo9aoWv5l-YCkuTrWp4pWP4b7eu9fizM5ZuzmRCj3Ecc7ZT2uvxe9sP045dqTH6lSeBNW1eW-pmb3oQ-g_mYL6SU60NmDp_mMa5HFuTdGSAAI9jP11k8KQUX6oGGGhx24w9seLaY98N_0v-cWsiNMQSnwR_SsGs6tPYqltHguz_azu0qsQuuXTQK9B06oEDR8tyb6CTqfX8pcumXIXC_DMFYfQ3pBK5R37G_oXTtX9srpw9vSulg4z52GhuvfT09ukMmdNGoIAS0551PjpZRz7-sI_nNTJQKpGgbhiH_zvA3U5hxue7fpAnQYXd6DYxR_y7QXSleoqQhZ2iVQW90Lwqp5MIDJaAx14bn27WBmQSLcuMpgnwpothMYFMmmNMdWYnGcQ0MIjhlOoykau7DRBFsLOKZ88y_9Pke7k9ISeTmArge2IdC1Ma7-GiJ90YVwwXDBSs9ssae8F1kWgyYV9rFxNbpF4uiWdQkVvASmW-QUNWzsHAtfuvrt-TR1SQ1Z-mMP_zF8mVjC14pAP5Z4pYkolLBinwy9V7DjcN0kymIM6fwpLt2h0LgfC1eLK3sutJcJJP9fFd8tTLIskEvUly-TeEct-syQebPxjxpxae7UPmqeDrOtvPi8-JWiHeIoJrUQnnw3ik2ULXvX1VFSnzDcKBAs_xZzdtjRlCGZWD-hgPPRTmG-YWeyovXZDp5Wv06AEL-hJlk4z1ZEt3yA0H6Ni7zE8jQ0_c6zJCWk6YtPhFk0ARZfjjdYSOFwJvx6rIrteH39b5W1yE8X0bm_cdeA1Q6TluBBkwv-9liCSOGT4ctzwaK3-cb0b4ko_apEEtpYkevu2ulqZoFi1S9g1joFZ5ooBLpxYGntuXXbALvq-zZniOJOtTdbpgsFQPS6Ae9kWXddWChNeyv_CEdkwXCkM__ua4GiH_Ce9WlqCzDCEoCYFpr7PyJP3gNg9Q_vkiLQa9V9bc3VtA5z4cjWB3rU5X9fLDZ0xzwO9krtGmsK9r2gkENMMu5Yy5BxGo94n6wRef0eMY6_GTzi7QsRuQSqNQLa98UdN4QGDa2c_-uDpENkMya7_hgM1z_RyUGtqVgpCHrld-jfSIGLPUI6kKWUDZ3USldXuep47KNuO3-BEOP2QEAKgHVlS9g2viG5r6wdeMl7Njs6iMsjs1KnHaqHlfZww8egAuOxAJjFxUYPy5djKn8n5lgPdk9ISeMZfxW5LcP80kPQekLohUbHcJ_JC2rTI76ckZvwuEGDUQTwGHR0B7YonoiTVzrhOWeqndwk3EBp0cr2mIc8vsWANK1WechMxunFVn7RuwV926PZhqFrnoep4ytDP8h4nJ4Z5zr9cXQCDv624H3JdPUYBBoxJ_7-QDM0fpuFXuRArtuezy6PV0a21CHLFtNq3DCWp56o4xgGm8x_8r2NtKTXxSwUY4_5cBHWd80aXF84Z42ldtGkAXayyFsv5er4VvWTzjwfEc39qkUGDQ5feVJb3YhfsT2qFyUnhb167hdJIPkI8rud4vLu3e9eu6xNLcw-LEjHptgEtfxOqiAPrBZLfWgkhfpU-encYtxg9cing8f_bAkf4-sP1tEaczGkdkMD-0orT-aN46m-8Dyn82fQgQdvov6n7KIuQipYomIQ3mJh5mSl2BAGMFlvLY297s3dCkBD3pGbRb6AAqu-5l8yCCVtg7FvzUWoQ3gL8FcP2cK_fYoJf7Z2YbgTNI_5SHiaAb-qxWuIP8ICEsxCHJJWIOfL6UnBXXctp8B_TiSbOFfGFrQPTJDUvKPyN9_mzO4mzXlOXLXu2VRG9J4NMSYTJT6-Q269vzse4SGqnULEUnpm2zQz9b9W97ahoMFYfV8xaVeFZK5ZU8LpyaN6v4mOJuuHm_vZuVircckh7UVVEK67jvRMi5JcKv-hDQhy1EmSRNCiZ4WHmGi7wcLEcJaUVFBRi84nU50Gjjs2kTslgVAnR9MyGqL2N4xvTAjoi4o-SCvDIvgWDnRCHXSD6ghfQagEUVGldGzk3EKEQF7VO5KTdheZ9FDiSXaaJJKit9NnohzmxM651VFC-AW0Ghklj52C5yvHScmJrIpMv4IjFAKj7erMRDjvYJ0v0PZDE0guTvoUFHrZd6umnB68QFINJogoy5GeT1hUs87OjZVQzPrxZqO6rzJK9m3meI2dFvdbgyAdbUx7fJRAu4yf2LC4dh0QaS5z24wuND3y-jHEsOvjUyIklRGeoH8EdGTBI8ZJIYKXJ8Ow797VYFI3FBzKNiPxJH-VFjpw0aqTLXVrAvCxwVK3awVAoWpwWMHN5yT57TOn3kpAbnBdAXG80kwTuOAAagePIVGrzENRGWVPGhvBFi55TDrQFXyymCP6c5q01KY04VU0udmOSe2Bwd-jMk2pjT3CLHb95G4PUVgy-l-occtk0mNRX4k3P9ETjeyOuA05c2rzMDthoHcFUnMqofePnvVK3eliJjh1uoNOrbx1rJuGsDZFEGxUfkjc5z5BW9zVw5YS7mlXjACPSDgMgreTTygsKTL0xhvSPsmu18K-cGz19v8ho7ix5B1WmPDsL75qXEqKsiO0ry1Ka23z8c4omngareIMqyM6OANeslUhQ7M_4o-OSaHUKQ3kAmJ3c_iPpedZUCo8GALcrgifqgd_ckfBRBpYssZhFQkxPNKJZhncuoRkdjxeAzANinaBUCxZ-Bg5DRQI6GCHgzUiUFMIWEqi21FF5UEiq0G2PM7PTE-RRO7wu8qg==', + provider_name='openai', + ), + next_part_kind='builtin-tool-call', + ), PartStartEvent( index=1, part=BuiltinToolCallPart( @@ -5472,6 +5620,16 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No tool_call_id='ig_00d13c4dbac420df0068dd91af3070819f86da82a11b9239c2', provider_name='openai', ), + previous_part_kind='thinking', + ), + PartEndEvent( + index=1, + part=BuiltinToolCallPart( + tool_name='image_generation', + tool_call_id='ig_00d13c4dbac420df0068dd91af3070819f86da82a11b9239c2', + provider_name='openai', + ), + next_part_kind='file', ), PartStartEvent( index=2, @@ -5479,10 +5637,10 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No content=BinaryImage( data=IsBytes(), media_type='image/png', - identifier='69eaa4', ), id='ig_00d13c4dbac420df0068dd91af3070819f86da82a11b9239c2', ), + previous_part_kind='builtin-tool-call', ), FinalResultEvent(tool_name=None, tool_call_id=None), PartStartEvent( @@ -5495,6 +5653,7 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No ), id='ig_00d13c4dbac420df0068dd91af3070819f86da82a11b9239c2', ), + previous_part_kind='file', ), PartStartEvent( index=3, @@ -5511,6 +5670,7 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='file', ), BuiltinToolCallEvent( # pyright: ignore[reportDeprecated] part=BuiltinToolCallPart( diff --git a/tests/test_agent.py b/tests/test_agent.py index b8ef102025..84ad79929e 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -3616,6 +3616,7 @@ def get_image() -> BinaryContent: BinaryContent( data=b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDATx\x9cc```\x00\x00\x00\x04\x00\x01\xf6\x178\x00\x00\x00\x00IEND\xaeB`\x82', media_type='image/png', + _identifier='image_id_1', identifier='image_id_1', ), ], @@ -3660,13 +3661,15 @@ def get_files(): UserPromptPart( content=[ 'This is file img_001:', - ImageUrl(url='https://example.com/image.jpg', identifier='img_001'), + ImageUrl(url='https://example.com/image.jpg', _identifier='img_001', identifier='img_001'), 'This is file vid_002:', - VideoUrl(url='https://example.com/video.mp4', identifier='vid_002'), + VideoUrl(url='https://example.com/video.mp4', _identifier='vid_002', identifier='vid_002'), 'This is file aud_003:', - AudioUrl(url='https://example.com/audio.mp3', identifier='aud_003'), + AudioUrl(url='https://example.com/audio.mp3', _identifier='aud_003', identifier='aud_003'), 'This is file doc_004:', - DocumentUrl(url='https://example.com/document.pdf', identifier='doc_004'), + DocumentUrl( + url='https://example.com/document.pdf', _identifier='doc_004', identifier='doc_004' + ), ], timestamp=IsNow(tz=timezone.utc), ), diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 537b76e03d..f2e97c3a96 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -27,6 +27,7 @@ ModelRequest, ModelResponse, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, RunContext, @@ -180,7 +181,7 @@ async def text_stream(_messages: list[ModelMessage], agent_info: AgentInfo) -> A response_data = await result.validate_response_output(structured_response, allow_partial=not last) chunks.append(response_data) - assert chunks == snapshot([[1], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert chunks == snapshot([[1], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) async with agent.run_stream('Hello') as result: with pytest.raises(UserError, match=r'stream_text\(\) can only be used with text responses'): @@ -243,6 +244,7 @@ def upcase(text: str) -> str: 'THE CAT SAT ON THE ', 'THE CAT SAT ON THE MAT.', 'THE CAT SAT ON THE MAT.', + 'THE CAT SAT ON THE MAT.', ] ) @@ -298,6 +300,13 @@ def upcase(text: str) -> str: timestamp=IsNow(tz=timezone.utc), provider_name='test', ), + ModelResponse( + parts=[TextPart(content='The cat sat on the mat.')], + usage=RequestUsage(input_tokens=51, output_tokens=7), + model_name='test', + timestamp=IsDatetime(), + provider_name='test', + ), ] ) @@ -1126,16 +1135,19 @@ def output_validator_simple(data: str) -> str: assert run.next_node == End(data=FinalResult(output='The bat sat on the mat.', tool_name=None, tool_call_id=None)) assert run.usage() == stream_usage == RunUsage(requests=1, input_tokens=51, output_tokens=7) - assert messages == [ - '', - 'The ', - 'The cat ', - 'The bat sat ', - 'The bat sat on ', - 'The bat sat on the ', - 'The bat sat on the mat.', - 'The bat sat on the mat.', - ] + assert messages == snapshot( + [ + '', + 'The ', + 'The cat ', + 'The bat sat ', + 'The bat sat on ', + 'The bat sat on the ', + 'The bat sat on the mat.', + 'The bat sat on the mat.', + 'The bat sat on the mat.', + ] + ) async def test_iter_stream_responses(): @@ -1176,6 +1188,7 @@ def output_validator_simple(data: str) -> str: 'The cat sat on ', 'The cat sat on the ', 'The cat sat on the mat.', + 'The cat sat on the mat.', ] ] @@ -1202,7 +1215,9 @@ def output_validator(data: OutputType | NotOutputType) -> OutputType | NotOutput async with node.stream(run.ctx) as stream: async for output in stream.stream_output(debounce_by=None): outputs.append(output) - assert outputs == [OutputType(value='a (validated)'), OutputType(value='a (validated)')] + assert outputs == snapshot( + [OutputType(value='a (validated)'), OutputType(value='a (validated)'), OutputType(value='a (validated)')] + ) async def test_unknown_tool_call_events(): @@ -1341,6 +1356,7 @@ class CityLocation(BaseModel): CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), CityLocation(city='Mexico City', country='Mexico'), + CityLocation(city='Mexico City', country='Mexico'), ] ) assert result.is_complete @@ -1366,6 +1382,7 @@ class CityLocation(BaseModel): CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), CityLocation(city='Mexico City', country='Mexico'), + CityLocation(city='Mexico City', country='Mexico'), ] ) @@ -1402,6 +1419,7 @@ async def text_stream(_messages: list[ModelMessage], agent_info: AgentInfo) -> A CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), CityLocation(city='Mexico City', country='Mexico'), + CityLocation(city='Mexico City', country='Mexico'), ] ) @@ -1575,9 +1593,23 @@ def my_other_tool(x: int) -> int: part=ToolCallPart(tool_name='my_tool', args={'x': 0}, tool_call_id=IsStr()), ), FinalResultEvent(tool_name=None, tool_call_id=None), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='my_tool', args={'x': 0}, tool_call_id='pyd_ai_tool_call_id__my_tool'), + next_part_kind='tool-call', + ), PartStartEvent( index=1, - part=ToolCallPart(tool_name='my_other_tool', args={'x': 0}, tool_call_id=IsStr()), + part=ToolCallPart( + tool_name='my_other_tool', args={'x': 0}, tool_call_id='pyd_ai_tool_call_id__my_other_tool' + ), + previous_part_kind='tool-call', + ), + PartEndEvent( + index=1, + part=ToolCallPart( + tool_name='my_other_tool', args={'x': 0}, tool_call_id='pyd_ai_tool_call_id__my_other_tool' + ), ), FunctionToolCallEvent(part=ToolCallPart(tool_name='my_tool', args={'x': 0}, tool_call_id=IsStr())), FunctionToolCallEvent(part=ToolCallPart(tool_name='my_other_tool', args={'x': 0}, tool_call_id=IsStr())), @@ -1615,9 +1647,23 @@ def my_other_tool(x: int) -> int: index=0, part=ToolCallPart(tool_name='my_tool', args={'x': 0}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='my_tool', args={'x': 0}, tool_call_id='pyd_ai_tool_call_id__my_tool'), + next_part_kind='tool-call', + ), PartStartEvent( index=1, - part=ToolCallPart(tool_name='my_other_tool', args={'x': 0}, tool_call_id=IsStr()), + part=ToolCallPart( + tool_name='my_other_tool', args={'x': 0}, tool_call_id='pyd_ai_tool_call_id__my_other_tool' + ), + previous_part_kind='tool-call', + ), + PartEndEvent( + index=1, + part=ToolCallPart( + tool_name='my_other_tool', args={'x': 0}, tool_call_id='pyd_ai_tool_call_id__my_other_tool' + ), ), FunctionToolCallEvent(part=ToolCallPart(tool_name='my_tool', args={'x': 0}, tool_call_id=IsStr())), FunctionToolCallEvent(part=ToolCallPart(tool_name='my_other_tool', args={'x': 0}, tool_call_id=IsStr())), @@ -1657,6 +1703,10 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen index=0, part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id='pyd_ai_tool_call_id__ret_a'), + ), FunctionToolCallEvent(part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())), FunctionToolResultEvent( result=ToolReturnPart( @@ -1670,6 +1720,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='{"ret_a":')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='"a-apple"}')), + PartEndEvent(index=0, part=TextPart(content='{"ret_a":"a-apple"}')), ] ) @@ -1698,6 +1749,10 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen index=0, part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id='pyd_ai_tool_call_id__ret_a'), + ), FunctionToolCallEvent(part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())), FunctionToolResultEvent( result=ToolReturnPart( @@ -1711,6 +1766,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='{"ret_a":')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='"a-apple"}')), + PartEndEvent(index=0, part=TextPart(content='{"ret_a":"a-apple"}')), ] ) @@ -1733,7 +1789,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen async with test_agent.run_stream('Hello', event_stream_handler=event_stream_handler) as result: assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['{"ret_a":', '{"ret_a":"a-apple"}', '{"ret_a":"a-apple"}'] + ['{"ret_a":', '{"ret_a":"a-apple"}', '{"ret_a":"a-apple"}', '{"ret_a":"a-apple"}'] ) assert events == snapshot( @@ -1742,6 +1798,10 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen index=0, part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id='pyd_ai_tool_call_id__ret_a'), + ), FunctionToolCallEvent(part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())), FunctionToolResultEvent( result=ToolReturnPart( @@ -1781,6 +1841,10 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen index=0, part=ToolCallPart(tool_name='get_image', args={}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='get_image', args={}, tool_call_id='pyd_ai_tool_call_id__get_image'), + ), FunctionToolCallEvent(part=ToolCallPart(tool_name='get_image', args={}, tool_call_id=IsStr())), FunctionToolResultEvent( result=ToolReturnPart( @@ -1802,6 +1866,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='{"get_image":"See ')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='file ')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='bd38f5"}')), + PartEndEvent(index=0, part=TextPart(content='{"get_image":"See file bd38f5"}')), ] ) @@ -1824,6 +1889,10 @@ async def ret_a(x: str) -> str: index=0, part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr()), ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id='pyd_ai_tool_call_id__ret_a'), + ), FunctionToolCallEvent(part=ToolCallPart(tool_name='ret_a', args={'x': 'a'}, tool_call_id=IsStr())), FunctionToolResultEvent( result=ToolReturnPart( @@ -1837,6 +1906,7 @@ async def ret_a(x: str) -> str: FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='{"ret_a":')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='"a-apple"}')), + PartEndEvent(index=0, part=TextPart(content='{"ret_a":"a-apple"}')), AgentRunResultEvent(result=AgentRunResult(output='{"ret_a":"a-apple"}')), ] ) From f97d6c8557b00379862eb733451de93202c991f0 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 15 Oct 2025 13:08:14 +0000 Subject: [PATCH 16/46] update snapshots --- docs/agents.md | 3 ++ docs/output.md | 2 + .../pydantic_ai/agent/abstract.py | 3 ++ pydantic_ai_slim/pydantic_ai/direct.py | 12 +++++ .../pydantic_ai/durable_exec/dbos/_agent.py | 3 ++ .../durable_exec/prefect/_agent.py | 3 ++ .../durable_exec/temporal/_agent.py | 3 ++ tests/models/test_openai_responses.py | 11 +---- tests/test_dbos.py | 47 ++++++++++++++++-- tests/test_direct.py | 3 ++ tests/test_prefect.py | 20 ++++++-- tests/test_temporal.py | 48 +++++++++++++++++-- 12 files changed, 137 insertions(+), 21 deletions(-) diff --git a/docs/agents.md b/docs/agents.md index cd917feafc..1a3ae11274 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -103,6 +103,9 @@ async def main(): FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='Mexico is Mexico ')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='City.')), + PartEndEvent( + index=0, part=TextPart(content='The capital of Mexico is Mexico City.') + ), AgentRunResultEvent( result=AgentRunResult(output='The capital of Mexico is Mexico City.') ), diff --git a/docs/output.md b/docs/output.md index c8dc1741c3..bf31effac2 100644 --- a/docs/output.md +++ b/docs/output.md @@ -615,6 +615,7 @@ async def main(): #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyr'} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} + #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} ``` _(This example is complete, it can be run "as is" — you'll need to add `asyncio.run(main())` to run `main`)_ @@ -662,6 +663,7 @@ async def main(): #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyr'} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} + #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} ``` 1. [`stream_responses`][pydantic_ai.result.StreamedRunResult.stream_responses] streams the data as [`ModelResponse`][pydantic_ai.messages.ModelResponse] objects, thus iteration can't fail with a `ValidationError`. diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 72be9a4f7a..454d7580ed 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -642,6 +642,9 @@ async def main(): PartStartEvent(index=0, part=TextPart(content='The capital of ')), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')), + PartEndEvent( + index=0, part=TextPart(content='The capital of France is Paris. ') + ), AgentRunResultEvent( result=AgentRunResult(output='The capital of France is Paris. ') ), diff --git a/pydantic_ai_slim/pydantic_ai/direct.py b/pydantic_ai_slim/pydantic_ai/direct.py index 137f39fa3a..b66a0ba1ae 100644 --- a/pydantic_ai_slim/pydantic_ai/direct.py +++ b/pydantic_ai_slim/pydantic_ai/direct.py @@ -172,6 +172,12 @@ async def main(): index=0, delta=TextPartDelta(content_delta='a German-born theoretical ') ), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')), + PartEndEvent( + index=0, + part=TextPart( + content='Albert Einstein was a German-born theoretical physicist.' + ), + ), ] ''' ``` @@ -229,6 +235,12 @@ def model_request_stream_sync( index=0, delta=TextPartDelta(content_delta='a German-born theoretical ') ), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')), + PartEndEvent( + index=0, + part=TextPart( + content='Albert Einstein was a German-born theoretical physicist.' + ), + ), ] ''' ``` diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index 2b9b0c6b62..2f4a19752c 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -640,6 +640,9 @@ async def main(): PartStartEvent(index=0, part=TextPart(content='The capital of ')), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')), + PartEndEvent( + index=0, part=TextPart(content='The capital of France is Paris. ') + ), AgentRunResultEvent( result=AgentRunResult(output='The capital of France is Paris. ') ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 9b570292ed..6d622e56f4 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -598,6 +598,9 @@ async def main(): PartStartEvent(index=0, part=TextPart(content='The capital of ')), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')), + PartEndEvent( + index=0, part=TextPart(content='The capital of France is Paris. ') + ), AgentRunResultEvent( result=AgentRunResult(output='The capital of France is Paris. ') ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 88536b92aa..958a43d1bb 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -669,6 +669,9 @@ async def main(): PartStartEvent(index=0, part=TextPart(content='The capital of ')), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='France is Paris. ')), + PartEndEvent( + index=0, part=TextPart(content='The capital of France is Paris. ') + ), AgentRunResultEvent( result=AgentRunResult(output='The capital of France is Paris. ') ), diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 13e6d24cb3..b353da4ba8 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -2885,11 +2885,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( PartEndEvent( index=0, part=ThinkingPart( - content="""\ -**Calculating a large integer** - -I need to compute 123456 raised to the power of 123. That's an enormous integer, and the user probably wants the exact value. I can use Python's ability to handle big integers, but the output will likely be extremely long — potentially hundreds of digits. I should consider that and prepare to return the result as plain text, even if it ends up being around 627 digits. So, let's go ahead and compute that!\ -""", + content=IsStr(), id='rs_68c3509b2ee0819eba32735182d275ad0f2d670b80edc507', signature='gAAAAABow1CfwMTF6GjgPzWVr8oKbF3qM2qnldMGM_sXMoJ2SSXHrcL4lsIK69rnKn43STNM_YZ3f5AcwxF4oThzCOPl1g9-u4GGFd5sISVWJYruCukTVDPaEEzdmJqCU1JMSIZvlvqo7b5PsUGyQU5ldX4KXDq8zs4NmRyLIJe-34SCmDG3BYVWR_O-CtcjH0tF9e3XnJ5T9TvxioDEGbASqXMKx5XB9P_b1ser8P9WIQk6hxZ8YX-FAmWSt-sad-zScdeTmyPcakDb7Z4NVcXmL_I-hoQYH_lu-HPFVwcXU8R7yeXU-7YF3vZBE84cmFuv25lftyojbdGq2A7uxGJZBPMCoUBDGBNG2_7mVvKyGz_ZZ6vXIO0GVDhHdW4Y012pkoDfLp6B-B9CGvANOH3ORlcbhB8aT9qN5bY773wW44JIxRU3umkmNzwF7lkbmuMCbGybHYSzqtkOrMIRgqxaXOx3bGbsreM4kGwgD3EXWqQ1PVye_K7gRkToVQpfpID5iuH4jJZDkvNjjJI09JR2yqlR6QkQayVg2x1y8VHXoMYjNdQdZeP62AguqYbgrlBRcjaUnw78KcWscQHaNsg0MfxL_5Q-pZR1OPVsFppHRTzrVK8458d05yEhDmun345oI9ScBrtXFRdHXPy0dQaayfjxM9H0grPrIogMw_zz4jAcFqWxE_C7GPMnNIJ_uEAhkPOetpNb-izd-iY4pGYKs8pmCB5czrAlKC1MXTnowrlWcwf5_kuD5SzWlzlWOoKWCeBDOZuKTDVJKXh_QCtQfftomQazDFCiCSgaQMuP7GaPcDuS1jdQoMQBcFfKuWoq-3eQBOCiEOAERH81zR4hz1x02T_910jGreSpfgxSqt4Td0pDDSmlEV6CwaUDQvrPc67d8_Wtx8YKv4eBH544_p1k9T8tHo3Q7xvgE37ZCdd_AVhC2ed1b5oUI95tM570HAVugFilcHJICa1RbFzIlRkNgI4k2JvsVWtD5_h3x6ZaEFTomwIXlochYgsegh8RJIRRCNKO9ebsvTrkdl8n1mb3hLrz7puwCkRFyUkxYBGT9zUjuKrjp_IjTvvov29v6pwYHg2Xd0nAfLP4WWWPBLNx3oV1-yOfXStRGHMZTB6iN9d0Bxi2QS7dk-rPPXml5HxrSo1TG06EdBXQ1VgrkWIxG1TF97-gK9oWWT9S5aaYKZAOdaqDvi7qO8I-4VwExtIq4Do3BHnWrgKNHfyuAobQK4H_CFMElYibJHwA9t-UGujMic07AxS-2XjXaCtjf7LnW_aXE2rQDqzHiTiLmTqT6jYHP0WHGSqFTOFkNmzqy6uVfU-TbdT91zDBeesc8XpzCXWBVKqxEzuQGdJrYk6ieZaxL76Kjs4jyo838LMJCXzhcF8enukz_llnoxAV59hTDAn0MUQvstGlDX0ToI7C8Oc0NZfZU5Pi4gs8u0He_Nw5UsoV7sA-jk4M45sFt6g3u00kJFP3gIcdvOzHcRK5z3Sfb9JF0bnvIYSbUFUidEJxSOAcRlxofOJPnkPtWCYiiv3zSVxZXX77-wtc8yrOYFzH1k_8P6CDpcfzOW7Yl1Tajgcm20nygmPlFtXF3RNFPztW1V5GwQHc99FvT4ZAex3fQ_UBDKyXnyGoySgpZbHQIvhzUhDEGm77EiYw5FoF6JgnHGGUCbfXr2EudtpbGW8MRHop2ytonb8Hq7w10yQSginBbH_w3bwtd7cwgDKcp6wIPotjpEC-N1YDsRqhPuqxVA==', provider_name='openai', @@ -5329,10 +5325,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r PartEndEvent( index=4, part=TextPart( - content="""\ -Here's the chart of y = x^2 for x from -5 to 5. \n\ -Download the image: [Download the chart](sandbox:/mnt/data/y_eq_x_squared_plot.png)\ -""", + content=IsStr(), id='msg_06c1a26fd89d07f20068dd937ecbd48197bd91dc501bd4a4d4', ), ), diff --git a/tests/test_dbos.py b/tests/test_dbos.py index 37f4adcc05..256aba83fb 100644 --- a/tests/test_dbos.py +++ b/tests/test_dbos.py @@ -26,6 +26,7 @@ ModelResponse, ModelSettings, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, RunContext, @@ -317,17 +318,23 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D children=[ BasicSpan(content='ctx.run_step=1'), BasicSpan( - content='{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), BasicSpan( - content='{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_country","args":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","id":null,"part_kind":"tool-call"},"next_part_kind":"tool-call","event_kind":"part_end"}' + ), + BasicSpan( + content='{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","id":null,"part_kind":"tool-call"},"previous_part_kind":"tool-call","event_kind":"part_start"}' ), BasicSpan( content='{"index":1,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":1,"part":{"tool_name":"get_product_name","args":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -391,7 +398,7 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D children=[ BasicSpan(content='ctx.run_step=2'), BasicSpan( - content='{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' @@ -411,6 +418,9 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":0,"part":{"tool_name":"get_weather","args":"{\\"city\\":\\"Mexico City\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -452,7 +462,7 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D children=[ BasicSpan(content='ctx.run_step=3'), BasicSpan( - content='{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"tool_name":"final_result","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","event_kind":"final_result"}' @@ -577,6 +587,9 @@ async def test_complex_agent_run_in_workflow(allow_model_requests: None, dbos: D BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"]}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":0,"part":{"tool_name":"final_result","args":"{\\"answers\\":[{\\"label\\":\\"Capital of the country\\",\\"answer\\":\\"Mexico City\\"},{\\"label\\":\\"Weather in the capital\\",\\"answer\\":\\"Sunny\\"},{\\"label\\":\\"Product Name\\",\\"answer\\":\\"Pydantic AI\\"}]}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -624,13 +637,25 @@ async def event_stream_handler( PartDeltaEvent( index=0, delta=ToolCallPartDelta(args_delta='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z') ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='get_country', args='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z'), + next_part_kind='tool-call', + ), PartStartEvent( index=1, part=ToolCallPart(tool_name='get_product_name', args='', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5'), + previous_part_kind='tool-call', ), PartDeltaEvent( index=1, delta=ToolCallPartDelta(args_delta='{}', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5') ), + PartEndEvent( + index=1, + part=ToolCallPart( + tool_name='get_product_name', args='{}', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5' + ), + ), FunctionToolCallEvent( part=ToolCallPart(tool_name='get_country', args='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z') ), @@ -675,6 +700,12 @@ async def event_stream_handler( PartDeltaEvent( index=0, delta=ToolCallPartDelta(args_delta='"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') ), + PartEndEvent( + index=0, + part=ToolCallPart( + tool_name='get_weather', args='{"city":"Mexico City"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv' + ), + ), FunctionToolCallEvent( part=ToolCallPart( tool_name='get_weather', args='{"city":"Mexico City"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv' @@ -852,6 +883,14 @@ async def event_stream_handler( PartDeltaEvent( index=0, delta=ToolCallPartDelta(args_delta=']}', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') ), + PartEndEvent( + index=0, + part=ToolCallPart( + tool_name='final_result', + args='{"answers":[{"label":"Capital","answer":"The capital of Mexico is Mexico City."},{"label":"Weather","answer":"The weather in Mexico City is currently sunny."},{"label":"Product Name","answer":"The product name is Pydantic AI."}]}', + tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn', + ), + ), ] ) diff --git a/tests/test_direct.py b/tests/test_direct.py index 883a73ea30..0bb0603dc0 100644 --- a/tests/test_direct.py +++ b/tests/test_direct.py @@ -22,6 +22,7 @@ ModelRequest, ModelResponse, PartDeltaEvent, + PartEndEvent, PartStartEvent, TextPart, TextPartDelta, @@ -92,6 +93,7 @@ def test_model_request_stream_sync(): PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='(no ')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='tool ')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='calls)')), + PartEndEvent(index=0, part=TextPart(content='success (no tool calls)')), ] ) assert stream.response == snapshot( @@ -120,6 +122,7 @@ async def test_model_request_stream(): PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='(no ')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='tool ')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='calls)')), + PartEndEvent(index=0, part=TextPart(content='success (no tool calls)')), ] ) diff --git a/tests/test_prefect.py b/tests/test_prefect.py index 832ff8e619..ab0a2b2bf5 100644 --- a/tests/test_prefect.py +++ b/tests/test_prefect.py @@ -269,11 +269,14 @@ async def run_complex_agent() -> Response: children=[ BasicSpan(content='ctx.run_step=1'), BasicSpan( - content='{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_rI3WKPYvVwlOgCGRjsPP2hEx","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_rI3WKPYvVwlOgCGRjsPP2hEx","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_rI3WKPYvVwlOgCGRjsPP2hEx","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":0,"part":{"tool_name":"get_country","args":"{}","tool_call_id":"call_rI3WKPYvVwlOgCGRjsPP2hEx","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -315,7 +318,7 @@ async def run_complex_agent() -> Response: children=[ BasicSpan(content='ctx.run_step=2'), BasicSpan( - content='{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_NS4iQj14cDFwc0BnrKqDHavt","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_NS4iQj14cDFwc0BnrKqDHavt","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"ci","tool_call_id":"call_NS4iQj14cDFwc0BnrKqDHavt","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' @@ -333,11 +336,17 @@ async def run_complex_agent() -> Response: content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"ty\\"}","tool_call_id":"call_NS4iQj14cDFwc0BnrKqDHavt","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), BasicSpan( - content='{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_SkGkkGDvHQEEk0CGbnAh2AQw","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_weather","args":"{\\"city\\": \\"Mexico City\\"}","tool_call_id":"call_NS4iQj14cDFwc0BnrKqDHavt","id":null,"part_kind":"tool-call"},"next_part_kind":"tool-call","event_kind":"part_end"}' + ), + BasicSpan( + content='{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_SkGkkGDvHQEEk0CGbnAh2AQw","id":null,"part_kind":"tool-call"},"previous_part_kind":"tool-call","event_kind":"part_start"}' ), BasicSpan( content='{"index":1,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_SkGkkGDvHQEEk0CGbnAh2AQw","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":1,"part":{"tool_name":"get_product_name","args":"{}","tool_call_id":"call_SkGkkGDvHQEEk0CGbnAh2AQw","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -410,7 +419,7 @@ async def run_complex_agent() -> Response: children=[ BasicSpan(content='ctx.run_step=3'), BasicSpan( - content='{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_QcKhHXwXzqOXJUUHJb1TB2V5","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_QcKhHXwXzqOXJUUHJb1TB2V5","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"tool_name":"final_result","tool_call_id":"call_QcKhHXwXzqOXJUUHJb1TB2V5","event_kind":"final_result"}' @@ -535,6 +544,9 @@ async def run_complex_agent() -> Response: BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"]}","tool_call_id":"call_QcKhHXwXzqOXJUUHJb1TB2V5","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":0,"part":{"tool_name":"final_result","args":"{\\"answers\\":[{\\"label\\":\\"Capital of the country\\",\\"answer\\":\\"Mexico City\\"},{\\"label\\":\\"Weather in the capital\\",\\"answer\\":\\"Sunny\\"},{\\"label\\":\\"Product name\\",\\"answer\\":\\"Pydantic AI\\"}]}","tool_call_id":"call_QcKhHXwXzqOXJUUHJb1TB2V5","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], diff --git a/tests/test_temporal.py b/tests/test_temporal.py index a3dc312ee7..233b112a5d 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -27,6 +27,7 @@ ModelResponse, ModelSettings, PartDeltaEvent, + PartEndEvent, PartStartEvent, RetryPromptPart, RunContext, @@ -368,17 +369,23 @@ async def test_complex_agent_run_in_workflow( children=[ BasicSpan(content='ctx.run_step=1'), BasicSpan( - content='{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_country","args":"","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), BasicSpan( - content='{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_country","args":"{}","tool_call_id":"call_3rqTYrA6H21AYUaRGP4F66oq","id":null,"part_kind":"tool-call"},"next_part_kind":"tool-call","event_kind":"part_end"}' + ), + BasicSpan( + content='{"index":1,"part":{"tool_name":"get_product_name","args":"","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","id":null,"part_kind":"tool-call"},"previous_part_kind":"tool-call","event_kind":"part_start"}' ), BasicSpan( content='{"index":1,"delta":{"tool_name_delta":null,"args_delta":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":1,"part":{"tool_name":"get_product_name","args":"{}","tool_call_id":"call_Xw9XMKBJU48kAAd78WgIswDx","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -481,7 +488,7 @@ async def test_complex_agent_run_in_workflow( children=[ BasicSpan(content='ctx.run_step=2'), BasicSpan( - content='{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"get_weather","args":"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"{\\"","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' @@ -501,6 +508,9 @@ async def test_complex_agent_run_in_workflow( BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":0,"part":{"tool_name":"get_weather","args":"{\\"city\\":\\"Mexico City\\"}","tool_call_id":"call_Vz0Sie91Ap56nH0ThKGrZXT7","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -572,7 +582,7 @@ async def test_complex_agent_run_in_workflow( children=[ BasicSpan(content='ctx.run_step=3'), BasicSpan( - content='{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","id":null,"part_kind":"tool-call"},"event_kind":"part_start"}' + content='{"index":0,"part":{"tool_name":"final_result","args":"","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","id":null,"part_kind":"tool-call"},"previous_part_kind":null,"event_kind":"part_start"}' ), BasicSpan( content='{"tool_name":"final_result","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","event_kind":"final_result"}' @@ -697,6 +707,9 @@ async def test_complex_agent_run_in_workflow( BasicSpan( content='{"index":0,"delta":{"tool_name_delta":null,"args_delta":"]}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","part_delta_kind":"tool_call"},"event_kind":"part_delta"}' ), + BasicSpan( + content='{"index":0,"part":{"tool_name":"final_result","args":"{\\"answers\\":[{\\"label\\":\\"Capital of the country\\",\\"answer\\":\\"Mexico City\\"},{\\"label\\":\\"Weather in the capital\\",\\"answer\\":\\"Sunny\\"},{\\"label\\":\\"Product Name\\",\\"answer\\":\\"Pydantic AI\\"}]}","tool_call_id":"call_4kc6691zCzjPnOuEtbEGUvz2","id":null,"part_kind":"tool-call"},"next_part_kind":null,"event_kind":"part_end"}' + ), ], ) ], @@ -745,13 +758,25 @@ async def event_stream_handler( PartDeltaEvent( index=0, delta=ToolCallPartDelta(args_delta='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z') ), + PartEndEvent( + index=0, + part=ToolCallPart(tool_name='get_country', args='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z'), + next_part_kind='tool-call', + ), PartStartEvent( index=1, part=ToolCallPart(tool_name='get_product_name', args='', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5'), + previous_part_kind='tool-call', ), PartDeltaEvent( index=1, delta=ToolCallPartDelta(args_delta='{}', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5') ), + PartEndEvent( + index=1, + part=ToolCallPart( + tool_name='get_product_name', args='{}', tool_call_id='call_b51ijcpFkDiTQG1bQzsrmtW5' + ), + ), FunctionToolCallEvent( part=ToolCallPart(tool_name='get_country', args='{}', tool_call_id='call_q2UyBRP7eXNTzAoR8lEhjc9Z') ), @@ -796,6 +821,12 @@ async def event_stream_handler( PartDeltaEvent( index=0, delta=ToolCallPartDelta(args_delta='"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv') ), + PartEndEvent( + index=0, + part=ToolCallPart( + tool_name='get_weather', args='{"city":"Mexico City"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv' + ), + ), FunctionToolCallEvent( part=ToolCallPart( tool_name='get_weather', args='{"city":"Mexico City"}', tool_call_id='call_LwxJUB9KppVyogRRLQsamRJv' @@ -973,6 +1004,14 @@ async def event_stream_handler( PartDeltaEvent( index=0, delta=ToolCallPartDelta(args_delta=']}', tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn') ), + PartEndEvent( + index=0, + part=ToolCallPart( + tool_name='final_result', + args='{"answers":[{"label":"Capital","answer":"The capital of Mexico is Mexico City."},{"label":"Weather","answer":"The weather in Mexico City is currently sunny."},{"label":"Product Name","answer":"The product name is Pydantic AI."}]}', + tool_call_id='call_CCGIWaMeYWmxOQ91orkmTvzn', + ), + ), ] ) @@ -1140,6 +1179,7 @@ async def test_temporal_agent_run_stream_events(allow_model_requests: None): PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' Mexico')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' City')), PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='.')), + PartEndEvent(index=0, part=TextPart(content='The capital of Mexico is Mexico City.')), AgentRunResultEvent(result=AgentRunResult(output='The capital of Mexico is Mexico City.')), ] ) From 6caa37e1995cc65c784fabf0cf0e3fe804f32cce Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 15 Oct 2025 14:19:03 +0000 Subject: [PATCH 17/46] Remove extra item in stream_output iteration --- pydantic_ai_slim/pydantic_ai/result.py | 5 +++++ tests/test_streaming.py | 12 ++---------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index f5b542953e..055abb5472 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -60,7 +60,12 @@ def __post_init__(self): async def stream_output(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[OutputDataT]: """Asynchronously stream the (validated) agent outputs.""" + last_response: _messages.ModelResponse | None = None async for response in self.stream_responses(debounce_by=debounce_by): + if response == last_response: + continue + last_response = response + if self._raw_stream_response.final_result_event is not None: try: yield await self.validate_response_output(response, allow_partial=True) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index f2e97c3a96..7b3123f7c4 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -244,7 +244,6 @@ def upcase(text: str) -> str: 'THE CAT SAT ON THE ', 'THE CAT SAT ON THE MAT.', 'THE CAT SAT ON THE MAT.', - 'THE CAT SAT ON THE MAT.', ] ) @@ -1137,7 +1136,6 @@ def output_validator_simple(data: str) -> str: assert messages == snapshot( [ - '', 'The ', 'The cat ', 'The bat sat ', @@ -1145,7 +1143,6 @@ def output_validator_simple(data: str) -> str: 'The bat sat on the ', 'The bat sat on the mat.', 'The bat sat on the mat.', - 'The bat sat on the mat.', ] ) @@ -1215,9 +1212,7 @@ def output_validator(data: OutputType | NotOutputType) -> OutputType | NotOutput async with node.stream(run.ctx) as stream: async for output in stream.stream_output(debounce_by=None): outputs.append(output) - assert outputs == snapshot( - [OutputType(value='a (validated)'), OutputType(value='a (validated)'), OutputType(value='a (validated)')] - ) + assert outputs == snapshot([OutputType(value='a (validated)')]) async def test_unknown_tool_call_events(): @@ -1356,7 +1351,6 @@ class CityLocation(BaseModel): CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), CityLocation(city='Mexico City', country='Mexico'), - CityLocation(city='Mexico City', country='Mexico'), ] ) assert result.is_complete @@ -1382,7 +1376,6 @@ class CityLocation(BaseModel): CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), CityLocation(city='Mexico City', country='Mexico'), - CityLocation(city='Mexico City', country='Mexico'), ] ) @@ -1419,7 +1412,6 @@ async def text_stream(_messages: list[ModelMessage], agent_info: AgentInfo) -> A CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), CityLocation(city='Mexico City', country='Mexico'), - CityLocation(city='Mexico City', country='Mexico'), ] ) @@ -1789,7 +1781,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen async with test_agent.run_stream('Hello', event_stream_handler=event_stream_handler) as result: assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['{"ret_a":', '{"ret_a":"a-apple"}', '{"ret_a":"a-apple"}', '{"ret_a":"a-apple"}'] + ['{"ret_a":', '{"ret_a":"a-apple"}', '{"ret_a":"a-apple"}'] ) assert events == snapshot( From a09f6cee00cb73b86d62cd91ca14ce03cef0dac8 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 15 Oct 2025 14:33:03 +0000 Subject: [PATCH 18/46] resolve some todos --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 2 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 4 ++-- .../pydantic_ai/ui/vercel_ai/_adapter.py | 1 - .../pydantic_ai/ui/vercel_ai/_event_stream.py | 2 +- tests/test_ag_ui.py | 15 +-------------- 5 files changed, 5 insertions(+), 19 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 10426e12b9..12b354811c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -122,7 +122,7 @@ class BaseAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT]): agent: AbstractAgent[AgentDepsT] """The Pydantic AI agent to run.""" - request: RunRequestT # TODO (DouweM): rename + request: RunRequestT """The protocol-specific request object.""" @classmethod diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index c576b5be6a..21f8d05516 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -231,6 +231,8 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A content=output, ) + # ToolCallResultEvent.content may hold user parts (e.g. text, images) that AG-UI does not currently have events for + if isinstance(result, ToolReturnPart): # Check for AG-UI events returned by tool calls. possible_event = result.metadata or result.content @@ -243,5 +245,3 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A for item in possible_event: # type: ignore[reportUnknownMemberType] if isinstance(item, BaseEvent): # pragma: no branch yield item - - # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 6ce73075d0..0d5d66462d 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -202,7 +202,6 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # ) if result_part: - # TODO (DouweM): Combine consecutive tool calls and results request_parts = [result_part] result.append(ModelRequest(parts=request_parts)) response_parts = None diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 317db9c27c..3c57e69e2c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -203,4 +203,4 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A else: yield ToolOutputAvailableChunk(tool_call_id=result.tool_call_id, output=result.content) - # TODO (DouweM): Stream ToolCallResultEvent.content as user parts? + # ToolCallResultEvent.content may hold user parts (e.g. text, images) that Vercel AI does not currently have events for diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 14eac2e3f4..e024944125 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1440,20 +1440,7 @@ async def test_messages() -> None: ), ] - adapter = AGUIAdapter( - Agent(TestModel()), - # TODO (DouweM): Have a convenience method so the useless args aren't necessary - request=RunAgentInput( - messages=messages, - thread_id='test_thread', - run_id='test_run', - state={}, - tools=[], - context=[], - forwarded_props={}, - ), - ) - assert adapter.messages == snapshot( + assert AGUIAdapter.load_messages(messages) == snapshot( [ ModelRequest( parts=[ From b9019e0b6a7ac093354bddd681f32e0e9fcd5ae3 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 15 Oct 2025 14:39:31 +0000 Subject: [PATCH 19/46] resolve some todos, fix snapshots --- docs/output.md | 1 - tests/models/test_anthropic.py | 4 -- tests/models/test_gemini.py | 5 +- tests/models/test_groq.py | 8 +-- tests/models/test_huggingface.py | 1 - tests/models/test_mistral.py | 119 +------------------------------ 6 files changed, 7 insertions(+), 131 deletions(-) diff --git a/docs/output.md b/docs/output.md index bf31effac2..dd281525dc 100644 --- a/docs/output.md +++ b/docs/output.md @@ -615,7 +615,6 @@ async def main(): #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyr'} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} - #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} ``` _(This example is complete, it can be run "as is" — you'll need to add `asyncio.run(main())` to run `main`)_ diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 1e24feedfa..0b86599596 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -5392,13 +5392,10 @@ async def test_anthropic_text_parts_ahead_of_built_in_tool_call(allow_model_requ '', '', '', - '', - '', 'Here', "Here's one notable historical event that occurred on September", "Here's one notable historical event that occurred on September 18th: ", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marke", - "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marke", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its", @@ -5406,7 +5403,6 @@ async def test_anthropic_text_parts_ahead_of_built_in_tool_call(allow_model_requ "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", - "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", ] ) diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index 1a7308cac1..b4512ff960 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -825,7 +825,6 @@ async def test_stream_text(get_gemini_client: GetGeminiClient): # This last value is repeated due to the debounce_by=None combined with the need to emit # a final empty chunk to signal the end of the stream 'Hello world', - 'Hello world', ] ) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) @@ -864,7 +863,7 @@ async def test_stream_invalid_unicode_text(get_gemini_client: GetGeminiClient): async with agent.run_stream('Hello') as result: chunks = [chunk async for chunk in result.stream_output(debounce_by=None)] - assert chunks == snapshot(['abc', 'abc€def', 'abc€def', 'abc€def']) + assert chunks == snapshot(['abc', 'abc€def', 'abc€def']) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) @@ -894,7 +893,7 @@ async def test_stream_structured(get_gemini_client: GetGeminiClient): async with agent.run_stream('Hello') as result: chunks = [chunk async for chunk in result.stream_output(debounce_by=None)] - assert chunks == snapshot([(1, 2), (1, 2), (1, 2)]) + assert chunks == snapshot([(1, 2), (1, 2)]) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index 401dc1b1f4..6551831e6c 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -410,7 +410,7 @@ async def test_stream_text(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world', 'hello world'] + ['hello ', 'hello world', 'hello world'] ) assert result.is_complete @@ -424,7 +424,7 @@ async def test_stream_text_finish_reason(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world.', 'hello world.', 'hello world.'] + ['hello ', 'hello world', 'hello world.', 'hello world.'] ) assert result.is_complete @@ -477,7 +477,6 @@ async def test_stream_structured(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -534,7 +533,6 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -549,7 +547,7 @@ async def test_no_delta(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world', 'hello world'] + ['hello ', 'hello world', 'hello world'] ) assert result.is_complete diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index c6729545b7..329c102fc4 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -598,7 +598,6 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 9d5f422046..ddad98a623 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -612,18 +612,8 @@ class MyTypedDict(TypedDict, total=False): [ {'first': 'One'}, {'first': 'One', 'second': 2}, - {'first': 'One', 'second': 2}, - {'first': 'One', 'second': 2, 'bool_value': True}, {'first': 'One', 'second': 2, 'bool_value': True}, {'first': 'One', 'second': 2, 'bool_value': True, 'nullable_value': None}, - {'first': 'One', 'second': 2, 'bool_value': True, 'nullable_value': None}, - { - 'first': 'One', - 'second': 2, - 'bool_value': True, - 'nullable_value': None, - 'array_value': ['A', 'B', 'C'], - }, { 'first': 'One', 'second': 2, @@ -639,32 +629,6 @@ class MyTypedDict(TypedDict, total=False): 'array_value': ['A', 'B', 'C'], 'dict_value': {'A': 'A', 'B': 'B'}, }, - { - 'first': 'One', - 'second': 2, - 'bool_value': True, - 'nullable_value': None, - 'array_value': ['A', 'B', 'C'], - 'dict_value': {'A': 'A', 'B': 'B'}, - }, - { - 'first': 'One', - 'second': 2, - 'bool_value': True, - 'nullable_value': None, - 'array_value': ['A', 'B', 'C'], - 'dict_value': {'A': 'A', 'B': 'B'}, - 'dict_int_value': {'A': 1, 'B': 2}, - }, - { - 'first': 'One', - 'second': 2, - 'bool_value': True, - 'nullable_value': None, - 'array_value': ['A', 'B', 'C'], - 'dict_value': {'A': 'A', 'B': 'B'}, - 'dict_int_value': {'A': 1, 'B': 2}, - }, { 'first': 'One', 'second': 2, @@ -764,21 +728,8 @@ class MyTypedDict(TypedDict, total=False): v = [c async for c in result.stream_output(debounce_by=None)] assert v == snapshot( [ - {'first': 'O'}, {'first': 'O'}, {'first': 'On'}, - {'first': 'On'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, {'first': 'One'}, {'first': 'One'}, {'first': 'One'}, @@ -793,23 +744,14 @@ class MyTypedDict(TypedDict, total=False): {'first': 'One'}, {'first': 'One'}, {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One'}, - {'first': 'One', 'second': ''}, {'first': 'One', 'second': ''}, {'first': 'One', 'second': 'T'}, - {'first': 'One', 'second': 'T'}, - {'first': 'One', 'second': 'Tw'}, {'first': 'One', 'second': 'Tw'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -840,7 +782,7 @@ async def test_stream_result_type_primitif_int(allow_model_requests: None): async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot([1, 1, 1, 1, 1]) + assert v == snapshot([1, 1, 1, 1]) assert result.is_complete assert result.usage().input_tokens == 6 assert result.usage().output_tokens == 6 @@ -900,62 +842,32 @@ async def test_stream_result_type_primitif_array(allow_model_requests: None): v = [c async for c in result.stream_output(debounce_by=None)] assert v == snapshot( [ - [''], [''], ['f'], - ['f'], - ['fi'], ['fi'], ['fir'], - ['fir'], ['firs'], - ['firs'], - ['first'], - ['first'], ['first'], ['first'], ['first'], - ['first'], - ['first', ''], ['first', ''], ['first', 'O'], - ['first', 'O'], ['first', 'On'], - ['first', 'On'], - ['first', 'One'], - ['first', 'One'], ['first', 'One'], ['first', 'One'], ['first', 'One'], - ['first', 'One'], - ['first', 'One', ''], ['first', 'One', ''], ['first', 'One', 's'], - ['first', 'One', 's'], ['first', 'One', 'se'], - ['first', 'One', 'se'], - ['first', 'One', 'sec'], ['first', 'One', 'sec'], ['first', 'One', 'seco'], - ['first', 'One', 'seco'], ['first', 'One', 'secon'], - ['first', 'One', 'secon'], - ['first', 'One', 'second'], - ['first', 'One', 'second'], ['first', 'One', 'second'], ['first', 'One', 'second'], ['first', 'One', 'second'], - ['first', 'One', 'second'], - ['first', 'One', 'second', ''], ['first', 'One', 'second', ''], ['first', 'One', 'second', 'T'], - ['first', 'One', 'second', 'T'], ['first', 'One', 'second', 'Tw'], - ['first', 'One', 'second', 'Tw'], - ['first', 'One', 'second', 'Two'], - ['first', 'One', 'second', 'Two'], - ['first', 'One', 'second', 'Two'], - ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], @@ -1024,24 +936,7 @@ class MyTypedBaseModel(BaseModel): assert v == snapshot( [ MyTypedBaseModel(first='O', second=''), - MyTypedBaseModel(first='O'), - MyTypedBaseModel(first='On'), MyTypedBaseModel(first='On', second=''), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), - MyTypedBaseModel(first='One'), MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second=''), @@ -1058,12 +953,7 @@ class MyTypedBaseModel(BaseModel): MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second='T'), - MyTypedBaseModel(first='One', second='T'), MyTypedBaseModel(first='One', second='Tw'), - MyTypedBaseModel(first='One', second='Tw'), - MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), @@ -1132,17 +1022,12 @@ class MyTypedBaseModel(BaseModel): [ MyTypedBaseModel(first='One', second=''), MyTypedBaseModel(first='One', second='T'), - MyTypedBaseModel(first='One', second='T'), - MyTypedBaseModel(first='One', second='Tw'), MyTypedBaseModel(first='One', second='Tw'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), ] ) assert result.is_complete @@ -1511,7 +1396,7 @@ async def get_location(loc_name: str) -> str: async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot([{'won': True}, {'won': True}, {'won': True}]) + assert v == snapshot([{'won': True}, {'won': True}]) assert result.is_complete assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) assert result.usage().input_tokens == 4 From a5c205b33cc6430f181d1fbc2a2bb4fd113bb009 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 15 Oct 2025 15:14:47 +0000 Subject: [PATCH 20/46] Deduplicate more stream_output messages --- docs/output.md | 1 - pydantic_ai_slim/pydantic_ai/result.py | 23 +++++++++++------- tests/models/test_anthropic.py | 4 +--- tests/models/test_gemini.py | 14 +++-------- tests/models/test_groq.py | 26 ++++----------------- tests/models/test_huggingface.py | 17 ++------------ tests/models/test_mistral.py | 32 +++----------------------- tests/models/test_openai.py | 9 +------- tests/test_streaming.py | 19 ++++----------- 9 files changed, 34 insertions(+), 111 deletions(-) diff --git a/docs/output.md b/docs/output.md index dd281525dc..f6744b49d6 100644 --- a/docs/output.md +++ b/docs/output.md @@ -614,7 +614,6 @@ async def main(): #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the '} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyr'} #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} - #> {'name': 'Ben', 'dob': date(1990, 1, 28), 'bio': 'Likes the chain the dog and the pyramid'} ``` _(This example is complete, it can be run "as is" — you'll need to add `asyncio.run(main())` to run `main`)_ diff --git a/pydantic_ai_slim/pydantic_ai/result.py b/pydantic_ai_slim/pydantic_ai/result.py index 055abb5472..549333c716 100644 --- a/pydantic_ai_slim/pydantic_ai/result.py +++ b/pydantic_ai_slim/pydantic_ai/result.py @@ -62,17 +62,24 @@ async def stream_output(self, *, debounce_by: float | None = 0.1) -> AsyncIterat """Asynchronously stream the (validated) agent outputs.""" last_response: _messages.ModelResponse | None = None async for response in self.stream_responses(debounce_by=debounce_by): - if response == last_response: + if self._raw_stream_response.final_result_event is None or ( + last_response and response.parts == last_response.parts + ): continue last_response = response - if self._raw_stream_response.final_result_event is not None: - try: - yield await self.validate_response_output(response, allow_partial=True) - except ValidationError: - pass - if self._raw_stream_response.final_result_event is not None: # pragma: no branch - yield await self.validate_response_output(self.response) + try: + yield await self.validate_response_output(response, allow_partial=True) + except ValidationError: + pass + + response = self.response + if self._raw_stream_response.final_result_event is None or ( + last_response and response.parts == last_response.parts + ): + return + + yield await self.validate_response_output(response) async def stream_responses(self, *, debounce_by: float | None = 0.1) -> AsyncIterator[_messages.ModelResponse]: """Asynchronously stream the (unvalidated) model responses for the agent.""" diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 0b86599596..3c14fd6130 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -696,7 +696,7 @@ async def my_tool(first: str, second: str) -> int: # The tool output doesn't echo any content to the stream, so we only get the final payload once when # the block starts and once when it ends. - assert chunks == snapshot(['FINAL_PAYLOAD', 'FINAL_PAYLOAD', 'FINAL_PAYLOAD']) + assert chunks == snapshot(['FINAL_PAYLOAD']) assert result.is_complete assert result.usage() == snapshot( RunUsage( @@ -5401,8 +5401,6 @@ async def test_anthropic_text_parts_ahead_of_built_in_tool_call(allow_model_requ "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally", "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", - "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", - "Here's one notable historical event that occurred on September 18th: On September 18, 1793, President George Washington marked the location for the Capitol Building in Washington DC, and he would return periodically to oversee its construction personally.", ] ) diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index b4512ff960..37e171ccad 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -818,15 +818,7 @@ async def test_stream_text(get_gemini_client: GetGeminiClient): async with agent.run_stream('Hello') as result: chunks = [chunk async for chunk in result.stream_output(debounce_by=None)] - assert chunks == snapshot( - [ - 'Hello ', - 'Hello world', - # This last value is repeated due to the debounce_by=None combined with the need to emit - # a final empty chunk to signal the end of the stream - 'Hello world', - ] - ) + assert chunks == snapshot(['Hello ', 'Hello world']) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) async with agent.run_stream('Hello') as result: @@ -863,7 +855,7 @@ async def test_stream_invalid_unicode_text(get_gemini_client: GetGeminiClient): async with agent.run_stream('Hello') as result: chunks = [chunk async for chunk in result.stream_output(debounce_by=None)] - assert chunks == snapshot(['abc', 'abc€def', 'abc€def']) + assert chunks == snapshot(['abc', 'abc€def']) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) @@ -893,7 +885,7 @@ async def test_stream_structured(get_gemini_client: GetGeminiClient): async with agent.run_stream('Hello') as result: chunks = [chunk async for chunk in result.stream_output(debounce_by=None)] - assert chunks == snapshot([(1, 2), (1, 2)]) + assert chunks == snapshot([(1, 2)]) assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=1, output_tokens=2)) diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index 6551831e6c..a493a35dbe 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -409,9 +409,7 @@ async def test_stream_text(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete - assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world'] - ) + assert [c async for c in result.stream_output(debounce_by=None)] == snapshot(['hello ', 'hello world']) assert result.is_complete @@ -424,7 +422,7 @@ async def test_stream_text_finish_reason(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world.', 'hello world.'] + ['hello ', 'hello world', 'hello world.'] ) assert result.is_complete @@ -471,13 +469,7 @@ async def test_stream_structured(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [dict(c) async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {}, - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{}, {'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert result.is_complete @@ -527,13 +519,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [dict(c) async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert result.is_complete @@ -546,9 +532,7 @@ async def test_no_delta(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete - assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['hello ', 'hello world', 'hello world'] - ) + assert [c async for c in result.stream_output(debounce_by=None)] == snapshot(['hello ', 'hello world']) assert result.is_complete diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index 329c102fc4..1235fcfcbf 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -562,14 +562,7 @@ async def test_stream_structured(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [dict(c) async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {}, - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{}, {'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert result.is_complete assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=20, output_tokens=10)) @@ -592,13 +585,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [dict(c) async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert result.is_complete diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index ddad98a623..76ae344c5b 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -647,24 +647,6 @@ class MyTypedDict(TypedDict, total=False): 'dict_value': {'A': 'A', 'B': 'B'}, 'dict_int_value': {'A': 1, 'B': 2}, }, - { - 'first': 'One', - 'second': 2, - 'bool_value': True, - 'nullable_value': None, - 'array_value': ['A', 'B', 'C'], - 'dict_value': {'A': 'A', 'B': 'B'}, - 'dict_int_value': {'A': 1, 'B': 2}, - }, - { - 'first': 'One', - 'second': 2, - 'bool_value': True, - 'nullable_value': None, - 'array_value': ['A', 'B', 'C'], - 'dict_value': {'A': 'A', 'B': 'B'}, - 'dict_int_value': {'A': 1, 'B': 2}, - }, ] ) assert result.is_complete @@ -750,8 +732,6 @@ class MyTypedDict(TypedDict, total=False): {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, ] ) assert result.is_complete @@ -782,7 +762,7 @@ async def test_stream_result_type_primitif_int(allow_model_requests: None): async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot([1, 1, 1, 1]) + assert v == snapshot([1, 1]) assert result.is_complete assert result.usage().input_tokens == 6 assert result.usage().output_tokens == 6 @@ -872,8 +852,6 @@ async def test_stream_result_type_primitif_array(allow_model_requests: None): ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], ['first', 'One', 'second', 'Two'], - ['first', 'One', 'second', 'Two'], - ['first', 'One', 'second', 'Two'], ] ) assert result.is_complete @@ -957,8 +935,6 @@ class MyTypedBaseModel(BaseModel): MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), ] ) assert result.is_complete @@ -1026,8 +1002,6 @@ class MyTypedBaseModel(BaseModel): MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), - MyTypedBaseModel(first='One', second='Two'), ] ) assert result.is_complete @@ -1396,7 +1370,7 @@ async def get_location(loc_name: str) -> str: async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot([{'won': True}, {'won': True}]) + assert v == snapshot([{'won': True}]) assert result.is_complete assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) assert result.usage().input_tokens == 4 @@ -1505,7 +1479,7 @@ async def get_location(loc_name: str) -> str: async with agent.run_stream('User prompt value') as result: assert not result.is_complete v = [c async for c in result.stream_output(debounce_by=None)] - assert v == snapshot(['final ', 'final response', 'final response', 'final response']) + assert v == snapshot(['final ', 'final response']) assert result.is_complete assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) assert result.usage().input_tokens == 6 diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index 4886d00c47..c0c7b05669 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -528,14 +528,7 @@ async def test_stream_structured_finish_reason(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [dict(c) async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert result.is_complete diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 7b3123f7c4..0860319f27 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -205,7 +205,7 @@ async def test_streamed_text_stream(): # typehint to test (via static typing) that the stream type is correctly inferred chunks: list[str] = [c async for c in result.stream_output()] # two chunks with `stream()` due to not-final vs. final - assert chunks == snapshot(['The cat sat on the mat.', 'The cat sat on the mat.']) + assert chunks == snapshot(['The cat sat on the mat.']) assert result.is_complete async with agent.run_stream('Hello') as result: @@ -236,15 +236,7 @@ def upcase(text: str) -> str: async with agent.run_stream('Hello', output_type=TextOutput(upcase)) as result: assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - 'THE ', - 'THE CAT ', - 'THE CAT SAT ', - 'THE CAT SAT ON ', - 'THE CAT SAT ON THE ', - 'THE CAT SAT ON THE MAT.', - 'THE CAT SAT ON THE MAT.', - ] + ['THE ', 'THE CAT ', 'THE CAT SAT ', 'THE CAT SAT ON ', 'THE CAT SAT ON THE ', 'THE CAT SAT ON THE MAT.'] ) async with agent.run_stream('Hello') as result: @@ -1136,13 +1128,13 @@ def output_validator_simple(data: str) -> str: assert messages == snapshot( [ + '', 'The ', 'The cat ', 'The bat sat ', 'The bat sat on ', 'The bat sat on the ', 'The bat sat on the mat.', - 'The bat sat on the mat.', ] ) @@ -1350,7 +1342,6 @@ class CityLocation(BaseModel): CityLocation(city='Mexico City'), CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), - CityLocation(city='Mexico City', country='Mexico'), ] ) assert result.is_complete @@ -1375,7 +1366,6 @@ class CityLocation(BaseModel): CityLocation(city='Mexico City'), CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), - CityLocation(city='Mexico City', country='Mexico'), ] ) @@ -1411,7 +1401,6 @@ async def text_stream(_messages: list[ModelMessage], agent_info: AgentInfo) -> A CityLocation(city='Mexico City'), CityLocation(city='Mexico City'), CityLocation(city='Mexico City', country='Mexico'), - CityLocation(city='Mexico City', country='Mexico'), ] ) @@ -1781,7 +1770,7 @@ async def event_stream_handler(ctx: RunContext[None], stream: AsyncIterable[Agen async with test_agent.run_stream('Hello', event_stream_handler=event_stream_handler) as result: assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - ['{"ret_a":', '{"ret_a":"a-apple"}', '{"ret_a":"a-apple"}'] + ['{"ret_a":', '{"ret_a":"a-apple"}'] ) assert events == snapshot( From ab246e847d8c78d17094a72658239c9aaa188175 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 15 Oct 2025 15:30:53 +0000 Subject: [PATCH 21/46] update snapshots --- tests/models/test_openai.py | 34 ++++------------------------------ 1 file changed, 4 insertions(+), 30 deletions(-) diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index c0c7b05669..9699cbf27c 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -498,14 +498,7 @@ async def test_stream_structured(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [dict(c) async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {}, - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{}, {'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert result.is_complete assert result.usage() == snapshot(RunUsage(requests=1, input_tokens=20, output_tokens=10)) @@ -548,13 +541,7 @@ async def test_stream_native_output(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [dict(c) async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert result.is_complete @@ -585,13 +572,7 @@ async def test_stream_tool_call_with_empty_text(allow_model_requests: None): async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert await result.get_output() == snapshot({'first': 'One', 'second': 'Two'}) @@ -617,14 +598,7 @@ async def test_stream_text_empty_think_tag_and_text_before_tool_call(allow_model async with agent.run_stream('') as result: assert not result.is_complete assert [c async for c in result.stream_output(debounce_by=None)] == snapshot( - [ - {}, - {'first': 'One'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - {'first': 'One', 'second': 'Two'}, - ] + [{}, {'first': 'One'}, {'first': 'One', 'second': 'Two'}, {'first': 'One', 'second': 'Two'}] ) assert await result.get_output() == snapshot({'first': 'One', 'second': 'Two'}) From f02834caa075344993bd2d57449615283321b490 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 09:22:07 +0000 Subject: [PATCH 22/46] add coverage todos --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 2 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 6 +- .../pydantic_ai/ui/event_stream.py | 62 ++++++++++--------- .../pydantic_ai/ui/vercel_ai/_adapter.py | 26 ++++---- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 34 +++++----- 5 files changed, 69 insertions(+), 61 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 12b354811c..3ad905621a 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -198,7 +198,7 @@ async def process_stream( await on_complete(result) else: await _utils.run_in_executor(on_complete, result) - except Exception as e: + except Exception as e: # TODO (DouweM): coverage async for event in event_stream.on_error(e): yield event diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 21f8d05516..a29706da2f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -112,7 +112,7 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" if follows_text: - message_id = self.message_id + message_id = self.message_id # TODO (DouweM): coverage else: message_id = self.new_message_id() yield TextMessageStartEvent(message_id=message_id) @@ -127,7 +127,7 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEve async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at end.""" - if not followed_by_text: + if not followed_by_text: # TODO (DouweM): coverage branch yield TextMessageEndEvent(message_id=self.message_id) async def handle_thinking_start( @@ -145,7 +145,7 @@ async def handle_thinking_start( async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: """Handle a ThinkingPartDelta.""" if not delta.content_delta: - return + return # pragma: no cover if not self._thinking_text: yield ThinkingTextMessageStartEvent(type=EventType.THINKING_TEXT_MESSAGE_START) diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 4652c3dfa1..940ea305b3 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -115,10 +115,10 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat if turn != next_turn: if turn == 'request': async for e in self.after_request(): - yield e + yield e # TODO (DouweM): coverage elif turn == 'response': async for e in self.after_response(): - yield e + yield e # TODO (DouweM): coverage turn = next_turn @@ -127,7 +127,7 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat yield e elif turn == 'response': async for e in self.before_response(): - yield e + yield e # TODO (DouweM): coverage async for e in self.handle_event(event): yield e @@ -136,10 +136,12 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat yield e else: if turn == 'request': - async for e in self.after_request(): + async for ( + e + ) in self.after_request(): # TODO (DouweM): coverage. does this make sense here? should it be finally? yield e elif turn == 'response': - async for e in self.after_response(): + async for e in self.after_response(): # TODO (DouweM): coverage yield e async for e in self.after_stream(): @@ -158,7 +160,7 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no Protocol-specific events. """ async for e in self.before_event(event): - yield e + yield e # TODO (DouweM): coverage match event: case PartStartEvent(): @@ -173,10 +175,10 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no case FinalResultEvent(): self._final_result_event = event async for e in self.handle_final_result(event): - yield e + yield e # TODO (DouweM): coverage case FunctionToolCallEvent(): async for e in self.handle_function_tool_call(event): - yield e + yield e # TODO (DouweM): coverage case FunctionToolResultEvent(): async for e in self.handle_function_tool_result(event): yield e @@ -185,7 +187,7 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no self._final_result_event and (tool_call_id := self._final_result_event.tool_call_id) and (tool_name := self._final_result_event.tool_name) - ): + ): # TODO (DouweM): coverage self._final_result_event = None output_tool_result_event = FunctionToolResultEvent( result=ToolReturnPart( @@ -199,12 +201,12 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no self.result = event.result async for e in self.handle_run_result(event): - yield e + yield e # TODO (DouweM): coverage case _: pass async for e in self.after_event(event): - yield e + yield e # TODO (DouweM): coverage async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT]: """Handle a PartStartEvent. @@ -230,7 +232,7 @@ async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT case BuiltinToolReturnPart(): async for e in self.handle_builtin_tool_return(part): yield e - case FilePart(): + case FilePart(): # TODO (DouweM): coverage async for e in self.handle_file(part): yield e @@ -248,7 +250,7 @@ async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[EventT case ThinkingPartDelta(): async for e in self.handle_thinking_delta(delta): yield e - case ToolCallPartDelta(): + case ToolCallPartDelta(): # TODO (DouweM): coverage branch async for e in self.handle_tool_call_delta(delta): yield e @@ -273,7 +275,7 @@ async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[EventT]: case BuiltinToolCallPart(): async for e in self.handle_builtin_tool_call_end(part): yield e - case BuiltinToolReturnPart() | FilePart(): + case BuiltinToolReturnPart() | FilePart(): # pragma: no cover # These don't have deltas, so they don't need to be ended. pass @@ -287,7 +289,7 @@ async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: @@ -299,7 +301,7 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT] Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[EventT]: @@ -312,7 +314,7 @@ async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: @@ -336,7 +338,7 @@ async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[Even Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: @@ -348,7 +350,7 @@ async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> Asy Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: @@ -360,7 +362,7 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: @@ -372,7 +374,7 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: @@ -384,29 +386,29 @@ async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[EventT]: """Handle the end of a TextPart.""" - return + return # pragma: no cover yield # Make this an async generator async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[EventT]: """Handle the end of a ThinkingPart.""" - return + return # pragma: no cover yield # Make this an async generator async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[EventT]: """Handle the end of a ToolCallPart.""" - return + return # pragma: no cover yield # Make this an async generator async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: """Handle the end of a BuiltinToolCallPart.""" - return + return # pragma: no cover yield # Make this an async generator async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: @@ -442,7 +444,7 @@ async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> A Yields: Protocol-specific events. """ - return + return # pragma: no cover yield # Make this an async generator async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: @@ -506,7 +508,7 @@ async def before_stream(self) -> AsyncIterator[EventT]: Yields: Protocol-specific events to emit before streaming. """ - return + return # pragma: no cover yield # Make this an async generator async def after_stream(self) -> AsyncIterator[EventT]: @@ -518,7 +520,7 @@ async def after_stream(self) -> AsyncIterator[EventT]: Yields: Protocol-specific events to emit after streaming. """ - return + return # pragma: no cover yield # Make this an async generator async def on_error(self, error: Exception) -> AsyncIterator[EventT]: @@ -530,5 +532,5 @@ async def on_error(self, error: Exception) -> AsyncIterator[EventT]: Yields: Protocol-specific error events. """ - return + return # pragma: no cover yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 0d5d66462d..c654571aae 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -69,7 +69,7 @@ class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT] @classmethod async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" - return request_data_ta.validate_json(await request.body()) + return request_data_ta.validate_json(await request.body()) # TODO (DouweM): coverage @property def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: @@ -78,7 +78,7 @@ def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: @property def response_headers(self) -> Mapping[str, str] | None: """Get the response headers for the adapter.""" - return VERCEL_AI_DSP_HEADERS + return VERCEL_AI_DSP_HEADERS # TODO (DouweM): coverage @cached_property def messages(self) -> list[ModelMessage]: @@ -104,7 +104,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # for msg in messages: if msg.role in ('system', 'user'): - if request_parts is None: + if request_parts is None: # TODO (DouweM): coverage branch request_parts = [] result.append(ModelRequest(parts=request_parts)) response_parts = None @@ -112,10 +112,10 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # for part in msg.parts: if isinstance(part, TextUIPart): if msg.role == 'system': - request_parts.append(SystemPromptPart(content=part.text)) + request_parts.append(SystemPromptPart(content=part.text)) # TODO (DouweM): coverage else: request_parts.append(UserPromptPart(content=part.text)) - elif isinstance(part, FileUIPart): + elif isinstance(part, FileUIPart): # TODO (DouweM): coverage try: file = BinaryContent.from_data_uri(part.url) except ValueError: @@ -131,7 +131,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # file = DocumentUrl(url=part.url, media_type=part.media_type) request_parts.append(UserPromptPart(content=[file])) - elif msg.role == 'assistant': + elif msg.role == 'assistant': # TODO (DouweM): coverage branch for part in msg.parts: if response_parts is None: response_parts = [] @@ -141,8 +141,8 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # if isinstance(part, TextUIPart): response_parts.append(TextPart(content=part.text)) elif isinstance(part, ReasoningUIPart): - response_parts.append(ThinkingPart(content=part.text)) - elif isinstance(part, FileUIPart): + response_parts.append(ThinkingPart(content=part.text)) # TODO (DouweM): coverage + elif isinstance(part, FileUIPart): # TODO (DouweM): coverage try: file = BinaryContent.from_data_uri(part.url) except ValueError as e: @@ -154,8 +154,8 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # elif isinstance(part, DataUIPart): # Not currently supported pass - elif isinstance(part, ToolUIPart | DynamicToolUIPart): - if isinstance(part, DynamicToolUIPart): + elif isinstance(part, ToolUIPart | DynamicToolUIPart): # TODO (DouweM): coverage branch + if isinstance(part, DynamicToolUIPart): # TODO (DouweM): coverage tool_name = part.tool_name builtin_tool = False else: @@ -165,7 +165,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # tool_call_id = part.tool_call_id args = part.input - if builtin_tool: + if builtin_tool: # TODO (DouweM): coverage call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) response_parts.append(call_part) @@ -196,12 +196,12 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # result_part = ToolReturnPart( tool_name=tool_name, tool_call_id=tool_call_id, content=part.output ) - elif part.state == 'output-error': + elif part.state == 'output-error': # TODO (DouweM): coverage result_part = RetryPromptPart( tool_name=tool_name, tool_call_id=tool_call_id, content=part.error_text ) - if result_part: + if result_part: # TODO (DouweM): coverage branch request_parts = [result_part] result.append(ModelRequest(parts=request_parts)) response_parts = None diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 3c57e69e2c..cbdff933f8 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -78,13 +78,13 @@ async def before_request(self) -> AsyncIterator[BaseChunk]: async def after_response(self) -> AsyncIterator[BaseChunk]: """Yield events after the response is processed.""" - if self._step_started: + if self._step_started: # TODO (DouweM): coverage yield FinishStepChunk() self._step_started = False async def after_stream(self) -> AsyncIterator[BaseChunk]: """Yield events after agent streaming completes.""" - if self._step_started: + if self._step_started: # TODO (DouweM): coverage branch yield FinishStepChunk() yield FinishChunk() @@ -92,27 +92,27 @@ async def after_stream(self) -> AsyncIterator[BaseChunk]: async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: """Handle errors during streaming.""" - yield ErrorChunk(error_text=str(error)) + yield ErrorChunk(error_text=str(error)) # TODO (DouweM): coverage async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" if follows_text: - message_id = self.message_id + message_id = self.message_id # TODO (DouweM): coverage else: message_id = self.new_message_id() yield TextStartChunk(id=message_id) - if part.content: + if part.content: # TODO (DouweM): coverage branch yield TextDeltaChunk(id=message_id, delta=part.content) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]: """Handle a TextPartDelta.""" - if delta.content_delta: + if delta.content_delta: # TODO (DouweM): coverage branch yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at end.""" - if not followed_by_text: + if not followed_by_text: # TODO (DouweM): coverage branch yield TextEndChunk(id=self.message_id) async def handle_thinking_start( @@ -122,11 +122,11 @@ async def handle_thinking_start( message_id = self.new_message_id() yield ReasoningStartChunk(id=message_id) if part.content: - yield ReasoningDeltaChunk(id=message_id, delta=part.content) + yield ReasoningDeltaChunk(id=message_id, delta=part.content) # TODO (DouweM): coverage async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPartDelta.""" - if delta.content_delta: + if delta.content_delta: # TODO (DouweM): coverage yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) async def handle_thinking_end( @@ -137,7 +137,7 @@ async def handle_thinking_end( def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - return self._handle_tool_call_start(part) + return self._handle_tool_call_start(part) # TODO (DouweM): coverage def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolCallEvent, emitting tool input events.""" @@ -157,7 +157,9 @@ async def _handle_tool_call_start( provider_executed=provider_executed, ) if part.args: - yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str()) + yield ToolInputDeltaChunk( + tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str() + ) # TODO (DouweM): coverage async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPartDelta.""" @@ -170,7 +172,9 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart at end.""" - yield ToolInputAvailableChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name, input=part.args) + yield ToolInputAvailableChunk( + tool_call_id=part.tool_call_id, tool_name=part.tool_name, input=part.args + ) # TODO (DouweM): coverage async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolCallPart at end.""" @@ -193,9 +197,11 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async async def handle_file(self, part: FilePart) -> AsyncIterator[BaseChunk]: """Handle a FilePart.""" file = part.content - yield FileChunk(url=file.data_uri, media_type=file.media_type) + yield FileChunk(url=file.data_uri, media_type=file.media_type) # TODO (DouweM): coverage - async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: + async def handle_function_tool_result( + self, event: FunctionToolResultEvent + ) -> AsyncIterator[BaseChunk]: # TODO (DouweM): coverage """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result if isinstance(result, RetryPromptPart): From c9dec7157148ff5f5b425d0682f1cbac145e9d36 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 09:26:10 +0000 Subject: [PATCH 23/46] Add warning for scenario in https://github.com/pydantic/pydantic-ai/issues/3108 --- pydantic_ai_slim/pydantic_ai/models/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pydantic_ai_slim/pydantic_ai/models/__init__.py b/pydantic_ai_slim/pydantic_ai/models/__init__.py index 6d07c58b75..7a52747773 100644 --- a/pydantic_ai_slim/pydantic_ai/models/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/models/__init__.py @@ -38,6 +38,7 @@ ModelResponse, ModelResponsePart, ModelResponseStreamEvent, + PartDeltaEvent, PartEndEvent, PartStartEvent, TextPart, @@ -517,7 +518,7 @@ class StreamedResponse(ABC): _event_iterator: AsyncIterator[ModelResponseStreamEvent] | None = field(default=None, init=False) _usage: RequestUsage = field(default_factory=RequestUsage, init=False) - def __aiter__(self) -> AsyncIterator[ModelResponseStreamEvent]: + def __aiter__(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 """Stream the response as an async iterable of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s. This proxies the `_event_iterator()` and emits all events, while also checking for matches @@ -574,6 +575,12 @@ def part_end_event(next_part: ModelResponsePart | None = None) -> PartEndEvent | event.previous_part_kind = last_start_event.part.part_kind last_start_event = event + if isinstance(event, PartDeltaEvent) and last_start_event and last_start_event.index != event.index: + warnings.warn( + f'Part index mismatch. Delta: {event!r}. Last start event: {last_start_event!r}', + UserWarning, + ) + yield event end_event = part_end_event() From 3ffd5ed8c6a5c5f06143fbdcf889b1cfe9a2aa4b Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 10:42:41 +0000 Subject: [PATCH 24/46] Start test UI Adapter and EventStream --- docs/install.md | 1 + .../pydantic_ai/agent/abstract.py | 1 + pydantic_ai_slim/pydantic_ai/ui/adapter.py | 31 +- .../pydantic_ai/ui/event_stream.py | 86 ++--- pydantic_ai_slim/pyproject.toml | 2 + pyproject.toml | 2 +- tests/test_ag_ui.py | 1 - tests/test_ui.py | 315 ++++++++++++++++++ uv.lock | 10 +- 9 files changed, 384 insertions(+), 65 deletions(-) create mode 100644 tests/test_ui.py diff --git a/docs/install.md b/docs/install.md index 600f81bd68..8df6d49d17 100644 --- a/docs/install.md +++ b/docs/install.md @@ -56,6 +56,7 @@ pip/uv-add "pydantic-ai-slim[openai]" * `cli` - installs `rich` [PyPI ↗](https://pypi.org/project/rich){:target="_blank"}, `prompt-toolkit` [PyPI ↗](https://pypi.org/project/prompt-toolkit){:target="_blank"}, and `argcomplete` [PyPI ↗](https://pypi.org/project/argcomplete){:target="_blank"} * `mcp` - installs `mcp` [PyPI ↗](https://pypi.org/project/mcp){:target="_blank"} * `a2a` - installs `fasta2a` [PyPI ↗](https://pypi.org/project/fasta2a){:target="_blank"} +* `ui` - installs `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} * `ag-ui` - installs `ag-ui-protocol` [PyPI ↗](https://pypi.org/project/ag-ui-protocol){:target="_blank"} and `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} * `dbos` - installs [`dbos`](durable_execution/dbos.md) [PyPI ↗](https://pypi.org/project/dbos){:target="_blank"} * `prefect` - installs [`prefect`](durable_execution/prefect.md) [PyPI ↗](https://pypi.org/project/prefect){:target="_blank"} diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 454d7580ed..057b4a23a1 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -743,6 +743,7 @@ async def run_agent() -> AgentRunResult[Any]: yield message result = await task + # TODO (DouweM): Consider adding this to every event stream, if we're adding new events anyway yield AgentRunResultEvent(result) @overload diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 3ad905621a..0b0286a998 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -12,6 +12,7 @@ from functools import cached_property from http import HTTPStatus from typing import ( + TYPE_CHECKING, Any, ClassVar, Generic, @@ -22,7 +23,6 @@ ) from pydantic import BaseModel, ValidationError -from starlette.responses import StreamingResponse from .. import DeferredToolRequests, DeferredToolResults, _utils from ..agent import AbstractAgent, AgentDepsT, AgentRunResult @@ -36,14 +36,9 @@ from ..usage import RunUsage, UsageLimits from .event_stream import BaseEventStream, SourceEvent -try: +if TYPE_CHECKING: from starlette.requests import Request from starlette.responses import Response -except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' - 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' - ) from e __all__ = [ @@ -126,19 +121,22 @@ class BaseAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT]): """The protocol-specific request object.""" @classmethod + @abstractmethod async def validate_request(cls, request: Request) -> RunRequestT: """Validate the request and return the validated request.""" - raise NotImplementedError('validate_request is not implemented') + raise NotImplementedError @classmethod @abstractmethod def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: """Load messages from the request and return the loaded messages.""" + raise NotImplementedError @property @abstractmethod def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT]: """Create an event stream for the adapter.""" + raise NotImplementedError @cached_property @abstractmethod @@ -151,6 +149,7 @@ def messages(self) -> list[ModelMessage]: Returns: List of Pydantic AI ModelMessage objects. """ + raise NotImplementedError @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: @@ -285,6 +284,14 @@ async def stream_response(self, stream: AsyncIterator[EventT], accept: str | Non stream: The stream of events to encode. accept: The accept header value for encoding format. """ + try: + from starlette.responses import StreamingResponse + except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `BaseAdapter.stream_response()` method, ' + 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' + ) from e + return StreamingResponse( self.encode_stream( stream, @@ -335,6 +342,14 @@ async def dispatch_request( Returns: A streaming Starlette response with AG-UI protocol events. """ + try: + from starlette.responses import Response + except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `BaseAdapter.dispatch_request()` method, ' + 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' + ) from e + try: request_data = await cls.validate_request(request) except ValidationError as e: # pragma: no cover diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 940ea305b3..84a1cd6f8e 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -14,7 +14,9 @@ from ..messages import ( AgentStreamEvent, + BuiltinToolCallEvent, # pyright: ignore[reportDeprecated] BuiltinToolCallPart, + BuiltinToolResultEvent, # pyright: ignore[reportDeprecated] BuiltinToolReturnPart, FilePart, FinalResultEvent, @@ -129,6 +131,10 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.before_response(): yield e # TODO (DouweM): coverage + if isinstance(event, BuiltinToolCallEvent | BuiltinToolResultEvent): # pyright: ignore[reportDeprecated] + # The events were deprecated before this feature was introduced + continue + async for e in self.handle_event(event): yield e except Exception as e: @@ -159,9 +165,6 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no Yields: Protocol-specific events. """ - async for e in self.before_event(event): - yield e # TODO (DouweM): coverage - match event: case PartStartEvent(): async for e in self.handle_part_start(event): @@ -205,9 +208,6 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no case _: pass - async for e in self.after_event(event): - yield e # TODO (DouweM): coverage - async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT]: """Handle a PartStartEvent. @@ -304,6 +304,11 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT] return # pragma: no cover yield # Make this an async generator + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[EventT]: + """Handle the end of a TextPart.""" + return # pragma: no cover + yield # Make this an async generator + async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[EventT]: """Handle a ThinkingPart at start. @@ -329,6 +334,13 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator return yield # Make this an async generator + async def handle_thinking_end( + self, part: ThinkingPart, followed_by_thinking: bool = False + ) -> AsyncIterator[EventT]: + """Handle the end of a ThinkingPart.""" + return # pragma: no cover + yield # Make this an async generator + async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[EventT]: """Handle a ToolCallPart at start. @@ -341,11 +353,11 @@ async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[Even return # pragma: no cover yield # Make this an async generator - async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: - """Handle a BuiltinToolCallPart at start. + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: + """Handle a ToolCallPartDelta. Args: - part: The tool call part. + delta: The ToolCallPartDelta. Yields: Protocol-specific events. @@ -353,11 +365,16 @@ async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> Asy return # pragma: no cover yield # Make this an async generator - async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: - """Handle a ToolCallPartDelta. + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[EventT]: + """Handle the end of a ToolCallPart.""" + return # pragma: no cover + yield # Make this an async generator + + async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle a BuiltinToolCallPart at start. Args: - delta: The ToolCallPartDelta. + part: The tool call part. Yields: Protocol-specific events. @@ -365,6 +382,11 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato return # pragma: no cover yield # Make this an async generator + async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: + """Handle the end of a BuiltinToolCallPart.""" + return # pragma: no cover + yield # Make this an async generator + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: """Handle a BuiltinToolReturnPart. @@ -389,28 +411,6 @@ async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: return # pragma: no cover yield # Make this an async generator - async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[EventT]: - """Handle the end of a TextPart.""" - return # pragma: no cover - yield # Make this an async generator - - async def handle_thinking_end( - self, part: ThinkingPart, followed_by_thinking: bool = False - ) -> AsyncIterator[EventT]: - """Handle the end of a ThinkingPart.""" - return # pragma: no cover - yield # Make this an async generator - - async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[EventT]: - """Handle the end of a ToolCallPart.""" - return # pragma: no cover - yield # Make this an async generator - - async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: - """Handle the end of a BuiltinToolCallPart.""" - return # pragma: no cover - yield # Make this an async generator - async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: """Handle a FinalResultEvent. @@ -461,24 +461,6 @@ async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[E # Lifecycle hooks (optional overrides) - async def before_event(self, event: SourceEvent) -> AsyncIterator[EventT]: - """Handle an event before it is processed. - - Args: - event: The event to handle. - """ - return - yield # Make this an async generator - - async def after_event(self, event: SourceEvent) -> AsyncIterator[EventT]: - """Handle an event after it is processed. - - Args: - event: The event to handle. - """ - return - yield # Make this an async generator - async def before_request(self) -> AsyncIterator[EventT]: """Handle a request before it is processed.""" return diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml index b6c5bf5682..1724ca79c4 100644 --- a/pydantic_ai_slim/pyproject.toml +++ b/pydantic_ai_slim/pyproject.toml @@ -90,6 +90,8 @@ cli = [ mcp = ["mcp>=1.12.3"] # Evals evals = ["pydantic-evals=={{ version }}"] +# UI +ui = ["starlette>=0.45.3"] # A2A a2a = ["fasta2a>=0.4.1"] # AG-UI diff --git a/pyproject.toml b/pyproject.toml index b09a172045..c12da1d092 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ requires-python = ">=3.10" [tool.hatch.metadata.hooks.uv-dynamic-versioning] dependencies = [ - "pydantic-ai-slim[openai,vertexai,google,groq,anthropic,mistral,cohere,bedrock,huggingface,cli,mcp,evals,ag-ui,retries,temporal,logfire]=={{ version }}", + "pydantic-ai-slim[openai,vertexai,google,groq,anthropic,mistral,cohere,bedrock,huggingface,cli,mcp,evals,ag-ui,retries,temporal,logfire,ui]=={{ version }}", ] [tool.hatch.metadata.hooks.uv-dynamic-versioning.optional-dependencies] diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index e024944125..2f43a072d9 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -1525,7 +1525,6 @@ async def stream_function( } yield { 0: DeltaToolCall( - name=WebSearchTool.kind, json_args='"Hello world"}', tool_call_id='search_1', ) diff --git a/tests/test_ui.py b/tests/test_ui.py new file mode 100644 index 0000000000..10a59f6113 --- /dev/null +++ b/tests/test_ui.py @@ -0,0 +1,315 @@ +from __future__ import annotations + +from collections.abc import AsyncIterator, Sequence +from dataclasses import dataclass, field +from functools import cached_property +from typing import Any + +import pytest +from inline_snapshot import snapshot +from pydantic import BaseModel + +from pydantic_ai import Agent +from pydantic_ai.builtin_tools import WebSearchTool +from pydantic_ai.messages import ( + BuiltinToolCallPart, + BuiltinToolReturnPart, + FilePart, + FinalResultEvent, + FunctionToolCallEvent, + FunctionToolResultEvent, + ModelMessage, + ModelRequest, + PartDeltaEvent, + PartEndEvent, + PartStartEvent, + TextPart, + TextPartDelta, + ThinkingPart, + ThinkingPartDelta, + ToolCallPart, + ToolCallPartDelta, +) +from pydantic_ai.models.function import AgentInfo, BuiltinToolCallsReturns, DeltaToolCall, DeltaToolCalls, FunctionModel +from pydantic_ai.run import AgentRunResultEvent +from pydantic_ai.tools import ToolDefinition +from pydantic_ai.toolsets import AbstractToolset, ExternalToolset +from pydantic_ai.ui.adapter import BaseAdapter +from pydantic_ai.ui.event_stream import BaseEventStream, SourceEvent + +from .conftest import try_import + +with try_import() as starlette_import_successful: + from starlette.requests import Request + +pytestmark = [ + pytest.mark.anyio, + pytest.mark.vcr, + pytest.mark.filterwarnings( + 'ignore:`BuiltinToolCallEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolCallPart` instead.:DeprecationWarning' + ), + pytest.mark.filterwarnings( + 'ignore:`BuiltinToolResultEvent` is deprecated, look for `PartStartEvent` and `PartDeltaEvent` with `BuiltinToolReturnPart` instead.:DeprecationWarning' + ), +] + + +class UIRequest(BaseModel): + messages: list[ModelMessage] = field(default_factory=list) + tool_defs: list[ToolDefinition] = field(default_factory=list) + state: dict[str, Any] = field(default_factory=dict) + + +class UIState(BaseModel): + country: str | None = None + + +@dataclass +class UIDeps: + state: UIState + + +class UIAdapter(BaseAdapter[UIRequest, ModelMessage, str, UIDeps]): + @classmethod + async def validate_request(cls, request: Request) -> UIRequest: + return UIRequest.model_validate(await request.json()) + + @classmethod + def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: + return list(messages) + + @property + def event_stream(self) -> UIEventStream: + return UIEventStream(self.request) + + @cached_property + def messages(self) -> list[ModelMessage]: + return self.request.messages + + @cached_property + def state(self) -> dict[str, Any] | None: + return self.request.state + + @cached_property + def toolset(self) -> AbstractToolset[UIDeps] | None: + return ExternalToolset(self.request.tool_defs) if self.request.tool_defs else None + + +@dataclass(kw_only=True) +class UIEventStream(BaseEventStream[UIRequest, str, UIDeps]): + def encode_event(self, event: str, accept: str | None = None) -> str: + return event + + async def handle_event(self, event: SourceEvent) -> AsyncIterator[str]: + # yield f'[{event.event_kind}]' + async for e in super().handle_event(event): + yield e + + async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[str]: + # yield f'[{event.part.part_kind}]' + async for e in super().handle_part_start(event): + yield e + + async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[str]: + # yield f'[>{event.delta.part_delta_kind}]' + async for e in super().handle_part_delta(event): + yield e + + async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[str]: + # yield f'[/{event.part.part_kind}]' + async for e in super().handle_part_end(event): + yield e + + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[str]: + yield f'{part.content}' + async for e in super().handle_text_start(part, follows_text): + yield e + + async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[str]: + yield delta.content_delta + async for e in super().handle_text_delta(delta): + yield e + + async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[str]: + yield '' + async for e in super().handle_text_end(part, followed_by_text): + yield e + + async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[str]: + yield f'{part.content}' + async for e in super().handle_thinking_start(part, follows_thinking): + yield e + + async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[str]: + yield str(delta.content_delta) + async for e in super().handle_thinking_delta(delta): + yield e + + async def handle_thinking_end(self, part: ThinkingPart, followed_by_thinking: bool = False) -> AsyncIterator[str]: + yield '' + async for e in super().handle_thinking_end(part, followed_by_thinking): + yield e + + async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[str]: + yield f'{part.args}' + async for e in super().handle_tool_call_start(part): + yield e + + async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[str]: + yield str(delta.args_delta) + async for e in super().handle_tool_call_delta(delta): + yield e + + async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[str]: + yield f'' + async for e in super().handle_tool_call_end(part): + yield e + + async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[str]: + yield f'{part.args}' + async for e in super().handle_builtin_tool_call_start(part): + yield e + + async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[str]: + yield f'' + async for e in super().handle_builtin_tool_call_end(part): + yield e + + async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[str]: + yield f'{part.content}' + async for e in super().handle_builtin_tool_return(part): + yield e + + async def handle_file(self, part: FilePart) -> AsyncIterator[str]: + yield f'' + async for e in super().handle_file(part): + yield e + + async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[str]: + yield '' + async for e in super().handle_final_result(event): + yield e + + async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[str]: + yield f'{event.part.args}' + async for e in super().handle_function_tool_call(event): + yield e + + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[str]: + yield f'{event.result.content}' + async for e in super().handle_function_tool_result(event): + yield e + + async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[str]: + yield f'{event.result.output}' + async for e in super().handle_run_result(event): + yield e + + async def before_request(self) -> AsyncIterator[str]: + yield '' + async for e in super().before_request(): + yield e + + async def after_request(self) -> AsyncIterator[str]: + yield '' + async for e in super().after_request(): + yield e + + async def before_response(self) -> AsyncIterator[str]: + yield '' + async for e in super().before_response(): + yield e + + async def after_response(self) -> AsyncIterator[str]: + yield '' + async for e in super().after_response(): + yield e + + async def before_stream(self) -> AsyncIterator[str]: + yield '' + async for e in super().before_stream(): + yield e + + async def after_stream(self) -> AsyncIterator[str]: + yield '' + async for e in super().after_stream(): + yield e + + async def on_error(self, error: Exception) -> AsyncIterator[str]: + yield f'on_error({error.__class__.__name__}({str(error)!r}))' + async for e in super().on_error(error): + yield e + + +async def test_event_stream_text(): + # text + # - back to back + # thinking + # - back to back + # tool call + # builtin tool call + # file + # error + # output tool + pass + + +async def test_event_stream_builtin_tool_call(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[BuiltinToolCallsReturns | DeltaToolCalls | str]: + yield { + 0: BuiltinToolCallPart( + tool_name=WebSearchTool.kind, + args='{"query":', + tool_call_id='search_1', + provider_name='function', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } + yield { + 1: BuiltinToolReturnPart( + tool_name=WebSearchTool.kind, + content={ + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + }, + tool_call_id='search_1', + provider_name='function', + ) + } + yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + + agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + deps = UIDeps(state=UIState()) + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps)] + + assert events == snapshot( + [ + '', + '', + '{"query":', + '"Hello world"}', + "", + "{'results': [{'title': '\"Hello, World!\" program', 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program'}]}", + 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', + '', + '', + '', + 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', + '', + ] + ) diff --git a/uv.lock b/uv.lock index 249754eb89..26680d97ee 100644 --- a/uv.lock +++ b/uv.lock @@ -3651,7 +3651,7 @@ wheels = [ name = "pydantic-ai" source = { editable = "." } dependencies = [ - { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"] }, + { name = "pydantic-ai-slim", extra = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "ui", "vertexai"] }, ] [package.optional-dependencies] @@ -3715,7 +3715,7 @@ lint = [ requires-dist = [ { name = "fasta2a", marker = "extra == 'a2a'", specifier = ">=0.4.1" }, { name = "pydantic-ai-examples", marker = "extra == 'examples'", editable = "examples" }, - { name = "pydantic-ai-slim", extras = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "vertexai"], editable = "pydantic_ai_slim" }, + { name = "pydantic-ai-slim", extras = ["ag-ui", "anthropic", "bedrock", "cli", "cohere", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "retries", "temporal", "ui", "vertexai"], editable = "pydantic_ai_slim" }, { name = "pydantic-ai-slim", extras = ["dbos"], marker = "extra == 'dbos'", editable = "pydantic_ai_slim" }, { name = "pydantic-ai-slim", extras = ["prefect"], marker = "extra == 'prefect'", editable = "pydantic_ai_slim" }, ] @@ -3883,6 +3883,9 @@ tavily = [ temporal = [ { name = "temporalio" }, ] +ui = [ + { name = "starlette" }, +] vertexai = [ { name = "google-auth" }, { name = "requests" }, @@ -3920,12 +3923,13 @@ requires-dist = [ { name = "requests", marker = "extra == 'vertexai'", specifier = ">=2.32.2" }, { name = "rich", marker = "extra == 'cli'", specifier = ">=13" }, { name = "starlette", marker = "extra == 'ag-ui'", specifier = ">=0.45.3" }, + { name = "starlette", marker = "extra == 'ui'", specifier = ">=0.45.3" }, { name = "tavily-python", marker = "extra == 'tavily'", specifier = ">=0.5.0" }, { name = "temporalio", marker = "extra == 'temporal'", specifier = "==1.18.0" }, { name = "tenacity", marker = "extra == 'retries'", specifier = ">=8.2.3" }, { name = "typing-inspection", specifier = ">=0.4.0" }, ] -provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "duckduckgo", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "prefect", "retries", "tavily", "temporal", "vertexai"] +provides-extras = ["a2a", "ag-ui", "anthropic", "bedrock", "cli", "cohere", "dbos", "duckduckgo", "evals", "google", "groq", "huggingface", "logfire", "mcp", "mistral", "openai", "prefect", "retries", "tavily", "temporal", "ui", "vertexai"] [[package]] name = "pydantic-core" From 0bf0c973875d1424374c1b86e018a994d85967de Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 10:52:38 +0000 Subject: [PATCH 25/46] Fix Groq thinking out of order --- .../pydantic_ai/models/__init__.py | 9 +- pydantic_ai_slim/pydantic_ai/models/groq.py | 10 +- tests/models/test_groq.py | 580 +++++++++--------- 3 files changed, 305 insertions(+), 294 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/models/__init__.py b/pydantic_ai_slim/pydantic_ai/models/__init__.py index 7a52747773..6d07c58b75 100644 --- a/pydantic_ai_slim/pydantic_ai/models/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/models/__init__.py @@ -38,7 +38,6 @@ ModelResponse, ModelResponsePart, ModelResponseStreamEvent, - PartDeltaEvent, PartEndEvent, PartStartEvent, TextPart, @@ -518,7 +517,7 @@ class StreamedResponse(ABC): _event_iterator: AsyncIterator[ModelResponseStreamEvent] | None = field(default=None, init=False) _usage: RequestUsage = field(default_factory=RequestUsage, init=False) - def __aiter__(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 + def __aiter__(self) -> AsyncIterator[ModelResponseStreamEvent]: """Stream the response as an async iterable of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s. This proxies the `_event_iterator()` and emits all events, while also checking for matches @@ -575,12 +574,6 @@ def part_end_event(next_part: ModelResponsePart | None = None) -> PartEndEvent | event.previous_part_kind = last_start_event.part.part_kind last_start_event = event - if isinstance(event, PartDeltaEvent) and last_start_event and last_start_event.index != event.index: - warnings.warn( - f'Part index mismatch. Delta: {event!r}. Last start event: {last_start_event!r}', - UserWarning, - ) - yield event end_event = part_end_event() diff --git a/pydantic_ai_slim/pydantic_ai/models/groq.py b/pydantic_ai_slim/pydantic_ai/models/groq.py index be0b057fe2..4523ee3b1a 100644 --- a/pydantic_ai_slim/pydantic_ai/models/groq.py +++ b/pydantic_ai_slim/pydantic_ai/models/groq.py @@ -524,6 +524,8 @@ class GroqStreamedResponse(StreamedResponse): async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 try: executed_tool_call_id: str | None = None + reasoning_index = 0 + reasoning = False async for chunk in self._response: self._usage += _map_usage(chunk) @@ -540,10 +542,16 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) if choice.delta.reasoning is not None: + if not reasoning: + reasoning_index += 1 + reasoning = True + # NOTE: The `reasoning` field is only present if `groq_reasoning_format` is set to `parsed`. yield self._parts_manager.handle_thinking_delta( - vendor_part_id='reasoning', content=choice.delta.reasoning + vendor_part_id=f'reasoning-{reasoning_index}', content=choice.delta.reasoning ) + else: + reasoning = False if choice.delta.executed_tools: for tool in choice.delta.executed_tools: diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index a493a35dbe..13e64fd079 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -1004,102 +1004,6 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro search(What is the weather in San Francisco today?) - -Title: Weather in San Francisco -URL: https://www.weatherapi.com/ -Content: {'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1758144075, 'localtime': '2025-09-17 14:21'}, 'current': {'last_updated_epoch': 1758143700, 'last_updated': '2025-09-17 14:15', 'temp_c': 17.4, 'temp_f': 63.3, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 7.8, 'wind_kph': 12.6, 'wind_degree': 264, 'wind_dir': 'W', 'pressure_mb': 1014.0, 'pressure_in': 29.95, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 94, 'cloud': 75, 'feelslike_c': 17.4, 'feelslike_f': 63.3, 'windchill_c': 17.7, 'windchill_f': 63.9, 'heatindex_c': 17.7, 'heatindex_f': 63.9, 'dewpoint_c': 15.3, 'dewpoint_f': 59.6, 'vis_km': 13.0, 'vis_miles': 8.0, 'uv': 6.8, 'gust_mph': 14.4, 'gust_kph': 23.1}} -Score: 0.9655 - -Title: San Francisco, CA | Weather Forecasts Now, Live Radar Maps ... -URL: https://www.weatherbug.com/weather-forecast/now/san-francisco-ca-94103 -Content: Today's Weather - San Francisco, CA. September 17, 2025 10:00 AM. Exploratorium. 61°. Feels Like 61°. Hi 69°F Lo 56°F. Mostly Sunny. -Score: 0.9512 - -Title: San Francisco, CA Weather Conditions | Weather Underground -URL: https://www.wunderground.com/weather/us/ca/san-francisco -Content: access_time 10:56 AM PDT on September 17, 2025 (GMT -7) | Updated 10 seconds ago. 76° | 59°. 74 °F. like 75°. icon. Sunny. N. 0. Today's temperature is forecast -Score: 0.9272 - -Title: Weather for San Francisco, California, USA - Time and Date -URL: https://www.timeanddate.com/weather/usa/san-francisco -Content: Weather in San Francisco, California, USA ; Sep 17, 2025 at 8:56 am · 10 mi · 29.98 "Hg · 87% · 57 °F -Score: 0.9224 - -Title: San Francisco - 14-Day Forecast: Temperature, Wind & Radar -URL: https://www.ventusky.com/san-francisco -Content: ... Current time: 01:50 2025/09/17. Current Weather; Forecast; Sun and Moon. partly cloudy, 16 °C. Wind speed 22 km/h. Humidity, 90 %. Air pressure, 1014 hPa. -Score: 0.9118 - -Title: Bay Area forecast discussion - National Weather Service -URL: https://forecast.weather.gov/product.php?format=ci&glossary=1&issuedby=mtr&product=afd&site=mtr&version=1 -Content: 723 FXUS66 KMTR 171146 AFDMTR Area Forecast Discussion National Weather Service San Francisco ... Issued at 406 AM PDT Wed Sep 17 2025 (Today and tonight) -Score: 0.8015 - -Title: Weather in San Francisco in September 2025 -URL: https://world-weather.info/forecast/usa/san_francisco/september-2025/ -Content: Detailed ⚡ San Francisco Weather Forecast for September 2025 – day/night 🌡️ temperatures, precipitations – World-Weather.info. -Score: 0.7647 - -Title: San Francisco weather in September 2025 | Weather25.com -URL: https://www.weather25.com/north-america/usa/california/san-francisco?page=month&month=September -Content: Full weather forecast for San Francisco in September 2025. Check the temperatures, chance of rain and more in San Francisco during September. -Score: 0.7192 - -Title: San Francisco, CA Weather Forecast - AccuWeather -URL: https://www.accuweather.com/en/us/san-francisco/94103/weather-forecast/347629 -Content: 10-Day Weather Forecast ; Today. 9/17. 76° · Partly sunny ; Thu. 9/18. 68° · Rather cloudy ; Fri. 9/19. 73° · Partly sunny and pleasant ; Sat. 9/20. 71° · Mostly sunny -Score: 0.6832 - -Title: AccuWeather Forecast: 1 more day of hot temperatures away from ... -URL: https://abc7news.com/post/weather-bay-area-forecast-temperatures/39468/ -Content: We have one more day of hot weather away from the coast today. A dense fog ... 2025 ABC, Inc., KGO-TV San Francisco. All Rights Reserved. -Score: 0.6164 - -Title: San Francisco Bay Area weather and First Alert Weather forecasts -URL: https://www.cbsnews.com/sanfrancisco/weather/ -Content: Wednesday morning First Alert weather forecast with Jessica Burch - 9/17/25 ... National - Current Temperatures · National - First Alert Doppler. Latest -Score: 0.6011 - -Title: 10-Day Weather Forecast for San Francisco, CA -URL: https://weather.com/weather/tenday/l/USCA0987:1:US -Content: 10 Day Weather-San Francisco, CA. As of 2:31 pm PDT. Today. 67°/58°. 2%. Day. 67°. 2%. W 17 mph. Plentiful sunshine. High 67F. Winds W at 10 to 20 mph. -Score: 0.5229 - -Title: 10-Day Weather Forecast for San Francisco, CA -URL: https://weather.com/weather/tenday/l/94112:4:US -Content: 10 Day Weather-San Francisco, CA. As of 5:34 pm PDT. Tonight. --/58°. 18%. Night. 58°. 18%. W 15 mph. Partly cloudy early with increasing clouds overnight. -Score: 0.4822 - -Title: Past Weather in San Francisco, California, USA - Time and Date -URL: https://www.timeanddate.com/weather/usa/san-francisco/historic -Content: Night Sky · TodayHourly14 DaysPastClimate. Currently: 61 °F. Passing clouds. (Weather station: San Francisco International Airport, USA). See more current -Score: 0.4242 - -Title: Monthly Weather Forecast for San Francisco, CA -URL: https://weather.com/weather/monthly/l/69bedc6a5b6e977993fb3e5344e3c06d8bc36a1fb6754c3ddfb5310a3c6d6c87 -Content: Considerable cloudiness. Low 56F. Winds WSW at 10 to 15 mph. Record Low52°. -Score: 0.3279 - -Title: San Francisco, CA Hourly Weather Forecast - Weather Underground -URL: https://www.wunderground.com/hourly/us/ca/san-francisco -Content: San Francisco Weather Forecasts. Weather Underground provides local & long-range weather ... Hourly Forecast for Today, Wednesday 09/17Hourly for Today, Wed 09/17. -Score: 0.2700 - - - - -Based on the search results, I can see that the current weather in San Francisco is as follows: - -- The temperature is around 61°F to 63°F (17°C). -- It is partly cloudy to mostly sunny. -- The humidity is around 90-94%. -- The wind speed is around 7-22 km/h. - -So, the current weather in San Francisco is partly cloudy with a temperature of 61°F (17°C) and high humidity. \n\ - -Now, I will provide the final answer to the user. \n\ - -The weather in San Francisco today is partly cloudy with a temperature of 61°F (17°C) and high humidity.\ """ ), BuiltinToolCallPart( @@ -1215,6 +1119,7 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro timestamp=IsDatetime(), provider_name='groq', ), + ThinkingPart(content=IsStr()), TextPart( content='The weather in San Francisco today is partly cloudy with a temperature of 61°F (17°C) and high humidity. The current conditions include a wind speed of around 7-22 km/h and a humidity level of 90-94%.' ), @@ -1422,10 +1327,10 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro ), previous_part_kind='builtin-tool-call', ), - PartDeltaEvent( - index=0, - delta=ThinkingPartDelta( - content_delta="""\ + PartStartEvent( + index=3, + part=ThinkingPart( + content="""\ Title: Weather in San Francisco URL: https://www.weatherapi.com/ @@ -1510,11 +1415,12 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro """ ), + previous_part_kind='builtin-tool-return', ), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' @@ -1522,27 +1428,27 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro """ ), ), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Based')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' on')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' search')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' results')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' can')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' see')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' that')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' current')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' weather')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' in')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' San')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Francisco')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' as')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' follows')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='Based')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' on')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' search')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' results')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' I')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' can')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' see')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' that')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' current')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' weather')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' in')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' San')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' Francisco')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' as')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' follows')), PartDeltaEvent( - index=0, + index=3, delta=ThinkingPartDelta( content_delta="""\ : @@ -1550,55 +1456,55 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro """ ), ), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' The')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' temperature')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' around')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='61')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='°F')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='63')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='°F')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' (')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='17')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='°C')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=').\n')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' It')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' partly')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cloudy')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' mostly')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' sunny')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.\n')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' The')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' humidity')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' around')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='90')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='94')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='%.\n')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' The')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' wind')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' speed')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' around')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='7')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='-')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='22')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' km')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='/h')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='-')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' The')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' temperature')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' around')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' ')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='61')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='°F')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' ')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='63')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='°F')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' (')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='17')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='°C')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=').\n')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='-')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' It')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' partly')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' cloudy')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' mostly')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' sunny')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='.\n')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='-')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' The')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' humidity')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' around')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' ')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='90')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='-')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='94')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='%.\n')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='-')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' The')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' wind')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' speed')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' around')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' ')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='7')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='-')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='22')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' km')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='/h')), PartDeltaEvent( - index=0, + index=3, delta=ThinkingPartDelta( content_delta="""\ . @@ -1606,34 +1512,34 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro """ ), ), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='So')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' current')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' weather')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' in')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' San')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Francisco')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' partly')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cloudy')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' temperature')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' of')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='61')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='°F')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' (')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='17')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='°C')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=')')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' high')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' humidity')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='So')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' current')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' weather')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' in')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' San')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' Francisco')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' partly')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' cloudy')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' with')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' a')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' temperature')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' of')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' ')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='61')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='°F')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' (')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='17')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='°C')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=')')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' and')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' high')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' humidity')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='.')), PartDeltaEvent( - index=0, + index=3, delta=ThinkingPartDelta( content_delta="""\ \n\ @@ -1641,20 +1547,20 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro """ ), ), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='Now')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=',')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' I')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' will')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' provide')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' final')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' answer')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' to')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' the')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' user')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='Now')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=',')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' I')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' will')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' provide')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' final')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' answer')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' to')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' the')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' user')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='.')), PartDeltaEvent( - index=0, + index=3, delta=ThinkingPartDelta( content_delta="""\ \n\ @@ -1662,82 +1568,186 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro """ ), ), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='The')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' weather')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' in')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' San')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' Francisco')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' today')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' is')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' partly')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' cloudy')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' with')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' a')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' temperature')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' of')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' ')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='61')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='°F')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' (')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='17')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='°C')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=')')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' and')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' high')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta=' humidity')), - PartDeltaEvent(index=0, delta=ThinkingPartDelta(content_delta='.')), - PartStartEvent(index=3, part=TextPart(content='The'), previous_part_kind='builtin-tool-return'), - FinalResultEvent(tool_name=None, tool_call_id=None), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' weather')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' in')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' San')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' Francisco')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' today')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' is')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' partly')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' cloudy')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' with')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' a')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' temperature')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' of')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' ')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='61')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='°F')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' (')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='17')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='°C')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=')')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' and')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' high')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' humidity')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='.')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' The')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' current')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' conditions')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' include')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' a')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' wind')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' speed')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' of')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' around')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' ')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='7')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='-')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='22')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' km')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='/h')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' and')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' a')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' humidity')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' level')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' of')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta=' ')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='90')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='-')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='94')), - PartDeltaEvent(index=3, delta=TextPartDelta(content_delta='%.')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='The')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' weather')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' in')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' San')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' Francisco')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' today')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' is')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' partly')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' cloudy')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' with')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' a')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' temperature')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' of')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' ')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='61')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='°F')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' (')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='17')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='°C')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=')')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' and')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' high')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta=' humidity')), + PartDeltaEvent(index=3, delta=ThinkingPartDelta(content_delta='.')), PartEndEvent( index=3, + part=ThinkingPart( + content="""\ + +Title: Weather in San Francisco +URL: https://www.weatherapi.com/ +Content: {'location': {'name': 'San Francisco', 'region': 'California', 'country': 'United States of America', 'lat': 37.775, 'lon': -122.4183, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1758144075, 'localtime': '2025-09-17 14:21'}, 'current': {'last_updated_epoch': 1758143700, 'last_updated': '2025-09-17 14:15', 'temp_c': 17.4, 'temp_f': 63.3, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 7.8, 'wind_kph': 12.6, 'wind_degree': 264, 'wind_dir': 'W', 'pressure_mb': 1014.0, 'pressure_in': 29.95, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 94, 'cloud': 75, 'feelslike_c': 17.4, 'feelslike_f': 63.3, 'windchill_c': 17.7, 'windchill_f': 63.9, 'heatindex_c': 17.7, 'heatindex_f': 63.9, 'dewpoint_c': 15.3, 'dewpoint_f': 59.6, 'vis_km': 13.0, 'vis_miles': 8.0, 'uv': 6.8, 'gust_mph': 14.4, 'gust_kph': 23.1}} +Score: 0.9655 + +Title: San Francisco, CA | Weather Forecasts Now, Live Radar Maps ... +URL: https://www.weatherbug.com/weather-forecast/now/san-francisco-ca-94103 +Content: Today's Weather - San Francisco, CA. September 17, 2025 10:00 AM. Exploratorium. 61°. Feels Like 61°. Hi 69°F Lo 56°F. Mostly Sunny. +Score: 0.9512 + +Title: San Francisco, CA Weather Conditions | Weather Underground +URL: https://www.wunderground.com/weather/us/ca/san-francisco +Content: access_time 10:56 AM PDT on September 17, 2025 (GMT -7) | Updated 10 seconds ago. 76° | 59°. 74 °F. like 75°. icon. Sunny. N. 0. Today's temperature is forecast +Score: 0.9272 + +Title: Weather for San Francisco, California, USA - Time and Date +URL: https://www.timeanddate.com/weather/usa/san-francisco +Content: Weather in San Francisco, California, USA ; Sep 17, 2025 at 8:56 am · 10 mi · 29.98 "Hg · 87% · 57 °F +Score: 0.9224 + +Title: San Francisco - 14-Day Forecast: Temperature, Wind & Radar +URL: https://www.ventusky.com/san-francisco +Content: ... Current time: 01:50 2025/09/17. Current Weather; Forecast; Sun and Moon. partly cloudy, 16 °C. Wind speed 22 km/h. Humidity, 90 %. Air pressure, 1014 hPa. +Score: 0.9118 + +Title: Bay Area forecast discussion - National Weather Service +URL: https://forecast.weather.gov/product.php?format=ci&glossary=1&issuedby=mtr&product=afd&site=mtr&version=1 +Content: 723 FXUS66 KMTR 171146 AFDMTR Area Forecast Discussion National Weather Service San Francisco ... Issued at 406 AM PDT Wed Sep 17 2025 (Today and tonight) +Score: 0.8015 + +Title: Weather in San Francisco in September 2025 +URL: https://world-weather.info/forecast/usa/san_francisco/september-2025/ +Content: Detailed ⚡ San Francisco Weather Forecast for September 2025 – day/night 🌡️ temperatures, precipitations – World-Weather.info. +Score: 0.7647 + +Title: San Francisco weather in September 2025 | Weather25.com +URL: https://www.weather25.com/north-america/usa/california/san-francisco?page=month&month=September +Content: Full weather forecast for San Francisco in September 2025. Check the temperatures, chance of rain and more in San Francisco during September. +Score: 0.7192 + +Title: San Francisco, CA Weather Forecast - AccuWeather +URL: https://www.accuweather.com/en/us/san-francisco/94103/weather-forecast/347629 +Content: 10-Day Weather Forecast ; Today. 9/17. 76° · Partly sunny ; Thu. 9/18. 68° · Rather cloudy ; Fri. 9/19. 73° · Partly sunny and pleasant ; Sat. 9/20. 71° · Mostly sunny +Score: 0.6832 + +Title: AccuWeather Forecast: 1 more day of hot temperatures away from ... +URL: https://abc7news.com/post/weather-bay-area-forecast-temperatures/39468/ +Content: We have one more day of hot weather away from the coast today. A dense fog ... 2025 ABC, Inc., KGO-TV San Francisco. All Rights Reserved. +Score: 0.6164 + +Title: San Francisco Bay Area weather and First Alert Weather forecasts +URL: https://www.cbsnews.com/sanfrancisco/weather/ +Content: Wednesday morning First Alert weather forecast with Jessica Burch - 9/17/25 ... National - Current Temperatures · National - First Alert Doppler. Latest +Score: 0.6011 + +Title: 10-Day Weather Forecast for San Francisco, CA +URL: https://weather.com/weather/tenday/l/USCA0987:1:US +Content: 10 Day Weather-San Francisco, CA. As of 2:31 pm PDT. Today. 67°/58°. 2%. Day. 67°. 2%. W 17 mph. Plentiful sunshine. High 67F. Winds W at 10 to 20 mph. +Score: 0.5229 + +Title: 10-Day Weather Forecast for San Francisco, CA +URL: https://weather.com/weather/tenday/l/94112:4:US +Content: 10 Day Weather-San Francisco, CA. As of 5:34 pm PDT. Tonight. --/58°. 18%. Night. 58°. 18%. W 15 mph. Partly cloudy early with increasing clouds overnight. +Score: 0.4822 + +Title: Past Weather in San Francisco, California, USA - Time and Date +URL: https://www.timeanddate.com/weather/usa/san-francisco/historic +Content: Night Sky · TodayHourly14 DaysPastClimate. Currently: 61 °F. Passing clouds. (Weather station: San Francisco International Airport, USA). See more current +Score: 0.4242 + +Title: Monthly Weather Forecast for San Francisco, CA +URL: https://weather.com/weather/monthly/l/69bedc6a5b6e977993fb3e5344e3c06d8bc36a1fb6754c3ddfb5310a3c6d6c87 +Content: Considerable cloudiness. Low 56F. Winds WSW at 10 to 15 mph. Record Low52°. +Score: 0.3279 + +Title: San Francisco, CA Hourly Weather Forecast - Weather Underground +URL: https://www.wunderground.com/hourly/us/ca/san-francisco +Content: San Francisco Weather Forecasts. Weather Underground provides local & long-range weather ... Hourly Forecast for Today, Wednesday 09/17Hourly for Today, Wed 09/17. +Score: 0.2700 + + + + +Based on the search results, I can see that the current weather in San Francisco is as follows: + +- The temperature is around 61°F to 63°F (17°C). +- It is partly cloudy to mostly sunny. +- The humidity is around 90-94%. +- The wind speed is around 7-22 km/h. + +So, the current weather in San Francisco is partly cloudy with a temperature of 61°F (17°C) and high humidity. \n\ + +Now, I will provide the final answer to the user. \n\ + +The weather in San Francisco today is partly cloudy with a temperature of 61°F (17°C) and high humidity.\ +""" + ), + next_part_kind='text', + ), + PartStartEvent(index=4, part=TextPart(content='The'), previous_part_kind='thinking'), + FinalResultEvent(tool_name=None, tool_call_id=None), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' weather')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' in')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' San')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' Francisco')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' today')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' is')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' partly')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' cloudy')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' with')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' temperature')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' of')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='61')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='°F')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' (')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='17')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='°C')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=')')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' high')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' humidity')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='.')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' The')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' current')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' conditions')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' include')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' wind')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' speed')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' of')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' around')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='7')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='-')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='22')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' km')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='/h')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' and')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' a')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' humidity')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' level')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' of')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta=' ')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='90')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='-')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='94')), + PartDeltaEvent(index=4, delta=TextPartDelta(content_delta='%.')), + PartEndEvent( + index=4, part=TextPart( content='The weather in San Francisco today is partly cloudy with a temperature of 61°F (17°C) and high humidity. The current conditions include a wind speed of around 7-22 km/h and a humidity level of 90-94%.' ), From 42e39e220c289d34d604247ca3e8cdd0693b65d4 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 11:13:21 +0000 Subject: [PATCH 26/46] coverage --- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 7 +- .../pydantic_ai/ui/event_stream.py | 10 +- tests/test_ui.py | 93 +++++-------------- 3 files changed, 29 insertions(+), 81 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 0b0286a998..d46a23f478 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -190,16 +190,11 @@ async def process_stream( async for event in event_stream.handle_stream(stream): yield event - try: - result = event_stream.result - if on_complete is not None and result is not None: + if (result := event_stream.result) and on_complete is not None: if _utils.is_async_callable(on_complete): await on_complete(result) else: await _utils.run_in_executor(on_complete, result) - except Exception as e: # TODO (DouweM): coverage - async for event in event_stream.on_error(e): - yield event async def run_stream( self, diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 84a1cd6f8e..87a2336c05 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -117,7 +117,7 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat if turn != next_turn: if turn == 'request': async for e in self.after_request(): - yield e # TODO (DouweM): coverage + yield e elif turn == 'response': async for e in self.after_response(): yield e # TODO (DouweM): coverage @@ -178,7 +178,7 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no case FinalResultEvent(): self._final_result_event = event async for e in self.handle_final_result(event): - yield e # TODO (DouweM): coverage + yield e case FunctionToolCallEvent(): async for e in self.handle_function_tool_call(event): yield e # TODO (DouweM): coverage @@ -204,7 +204,7 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no self.result = event.result async for e in self.handle_run_result(event): - yield e # TODO (DouweM): coverage + yield e case _: pass @@ -250,7 +250,7 @@ async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[EventT case ThinkingPartDelta(): async for e in self.handle_thinking_delta(delta): yield e - case ToolCallPartDelta(): # TODO (DouweM): coverage branch + case ToolCallPartDelta(): # pragma: no branch async for e in self.handle_tool_call_delta(delta): yield e @@ -331,7 +331,7 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator Yields: Protocol-specific events. """ - return + return # TODO (DouweM): coverage yield # Make this an async generator async def handle_thinking_end( diff --git a/tests/test_ui.py b/tests/test_ui.py index 10a59f6113..2ad7579bad 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -84,7 +84,7 @@ def event_stream(self) -> UIEventStream: @cached_property def messages(self) -> list[ModelMessage]: - return self.request.messages + return self.load_messages(self.request.messages) @cached_property def state(self) -> dict[str, Any] | None: @@ -122,136 +122,75 @@ async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[str]: async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[str]: yield f'{part.content}' - async for e in super().handle_text_start(part, follows_text): - yield e async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[str]: yield delta.content_delta - async for e in super().handle_text_delta(delta): - yield e async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[str]: yield '' - async for e in super().handle_text_end(part, followed_by_text): - yield e async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[str]: yield f'{part.content}' - async for e in super().handle_thinking_start(part, follows_thinking): - yield e async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[str]: yield str(delta.content_delta) - async for e in super().handle_thinking_delta(delta): - yield e async def handle_thinking_end(self, part: ThinkingPart, followed_by_thinking: bool = False) -> AsyncIterator[str]: yield '' - async for e in super().handle_thinking_end(part, followed_by_thinking): - yield e async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[str]: yield f'{part.args}' - async for e in super().handle_tool_call_start(part): - yield e async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[str]: yield str(delta.args_delta) - async for e in super().handle_tool_call_delta(delta): - yield e async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[str]: yield f'' - async for e in super().handle_tool_call_end(part): - yield e async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[str]: yield f'{part.args}' - async for e in super().handle_builtin_tool_call_start(part): - yield e async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[str]: yield f'' - async for e in super().handle_builtin_tool_call_end(part): - yield e async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[str]: yield f'{part.content}' - async for e in super().handle_builtin_tool_return(part): - yield e async def handle_file(self, part: FilePart) -> AsyncIterator[str]: yield f'' - async for e in super().handle_file(part): - yield e async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[str]: yield '' - async for e in super().handle_final_result(event): - yield e async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[str]: yield f'{event.part.args}' - async for e in super().handle_function_tool_call(event): - yield e async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[str]: yield f'{event.result.content}' - async for e in super().handle_function_tool_result(event): - yield e async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[str]: yield f'{event.result.output}' - async for e in super().handle_run_result(event): - yield e - - async def before_request(self) -> AsyncIterator[str]: - yield '' - async for e in super().before_request(): - yield e - async def after_request(self) -> AsyncIterator[str]: - yield '' - async for e in super().after_request(): - yield e + async def before_stream(self) -> AsyncIterator[str]: + yield '' async def before_response(self) -> AsyncIterator[str]: yield '' - async for e in super().before_response(): - yield e async def after_response(self) -> AsyncIterator[str]: yield '' - async for e in super().after_response(): - yield e - async def before_stream(self) -> AsyncIterator[str]: - yield '' - async for e in super().before_stream(): - yield e + async def before_request(self) -> AsyncIterator[str]: + yield '' + + async def after_request(self) -> AsyncIterator[str]: + yield '' async def after_stream(self) -> AsyncIterator[str]: yield '' - async for e in super().after_stream(): - yield e async def on_error(self, error: Exception) -> AsyncIterator[str]: - yield f'on_error({error.__class__.__name__}({str(error)!r}))' - async for e in super().on_error(error): - yield e - - -async def test_event_stream_text(): - # text - # - back to back - # thinking - # - back to back - # tool call - # builtin tool call - # file - # error - # output tool - pass + yield f'{str(error)}' async def test_event_stream_builtin_tool_call(): @@ -313,3 +252,17 @@ async def stream_function( '', ] ) + + +async def test_event_stream_text(): + # tool call + # error + # output tool + # - dispatch_request + # text + # - back to back + # thinking + # - back to back + # tool call + # file + pass From 0b1dea375e63e61768a0c9989755c52f581b3a48 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 14:00:21 +0000 Subject: [PATCH 27/46] tests --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 5 +- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 37 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 5 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 23 +- .../pydantic_ai/ui/event_stream.py | 138 ++++-- .../pydantic_ai/ui/vercel_ai/_adapter.py | 5 +- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 15 +- tests/test_ag_ui.py | 3 +- tests/test_ui.py | 460 ++++++++++++++++-- 9 files changed, 560 insertions(+), 131 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index df03646ae9..01968fce83 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -20,6 +20,7 @@ from .usage import RunUsage, UsageLimits try: + from ag_ui.core import BaseEvent from ag_ui.core.types import RunAgentInput from .ui import OnCompleteFunc, StateDeps, StateHandler @@ -171,7 +172,7 @@ async def handle_ag_ui_request( usage: RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, - on_complete: OnCompleteFunc | None = None, + on_complete: OnCompleteFunc[BaseEvent] | None = None, ) -> Response: """Handle an AG-UI request by running the agent and returning a streaming response. @@ -226,7 +227,7 @@ async def run_ag_ui( usage: RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, - on_complete: OnCompleteFunc | None = None, + on_complete: OnCompleteFunc[BaseEvent] | None = None, ) -> AsyncIterator[str]: """Run the agent with the AG-UI run input and stream AG-UI protocol events. diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index d46a23f478..e3a4bf9fe8 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -7,7 +7,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from collections.abc import AsyncIterator, Awaitable, Callable, Mapping, Sequence +from collections.abc import AsyncIterator, Mapping, Sequence from dataclasses import Field, dataclass, replace from functools import cached_property from http import HTTPStatus @@ -17,24 +17,23 @@ ClassVar, Generic, Protocol, - TypeAlias, TypeVar, runtime_checkable, ) from pydantic import BaseModel, ValidationError -from .. import DeferredToolRequests, DeferredToolResults, _utils -from ..agent import AbstractAgent, AgentDepsT, AgentRunResult +from .. import DeferredToolRequests, DeferredToolResults +from ..agent import AbstractAgent, AgentDepsT from ..builtin_tools import AbstractBuiltinTool from ..exceptions import UserError from ..messages import ModelMessage from ..models import KnownModelName, Model -from ..output import OutputSpec +from ..output import OutputDataT, OutputSpec from ..settings import ModelSettings from ..toolsets import AbstractToolset from ..usage import RunUsage, UsageLimits -from .event_stream import BaseEventStream, SourceEvent +from .event_stream import BaseEventStream, OnCompleteFunc, SourceEvent if TYPE_CHECKING: from starlette.requests import Request @@ -55,9 +54,6 @@ EventT = TypeVar('EventT') """Type variable for protocol-specific event types.""" -OnCompleteFunc: TypeAlias = Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]] -"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async.""" - # State management types @@ -111,10 +107,10 @@ class StateDeps(Generic[StateT]): @dataclass -class BaseAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT]): +class BaseAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" - agent: AbstractAgent[AgentDepsT] + agent: AbstractAgent[AgentDepsT, OutputDataT] """The Pydantic AI agent to run.""" request: RunRequestT @@ -134,7 +130,7 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: @property @abstractmethod - def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT]: + def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT, OutputDataT]: """Create an event stream for the adapter.""" raise NotImplementedError @@ -178,7 +174,7 @@ def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None async def process_stream( self, stream: AsyncIterator[SourceEvent], - on_complete: OnCompleteFunc | None = None, + on_complete: OnCompleteFunc[EventT] | None = None, ) -> AsyncIterator[EventT]: """Process a stream of events and return a stream of events. @@ -186,16 +182,9 @@ async def process_stream( stream: The stream of events to process. on_complete: Optional callback function called when the agent run completes successfully. """ - event_stream = self.event_stream - async for event in event_stream.handle_stream(stream): + async for event in self.event_stream.handle_stream(stream, on_complete=on_complete): yield event - if (result := event_stream.result) and on_complete is not None: - if _utils.is_async_callable(on_complete): - await on_complete(result) - else: - await _utils.run_in_executor(on_complete, result) - async def run_stream( self, *, @@ -210,7 +199,7 @@ async def run_stream( infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, - on_complete: OnCompleteFunc | None = None, + on_complete: OnCompleteFunc[EventT] | None = None, ) -> AsyncIterator[EventT]: """Run the agent with the AG-UI run input and stream AG-UI protocol events. @@ -298,7 +287,7 @@ async def stream_response(self, stream: AsyncIterator[EventT], accept: str | Non @classmethod async def dispatch_request( cls, - agent: AbstractAgent[AgentDepsT, Any], + agent: AbstractAgent[AgentDepsT, OutputDataT], request: Request, *, message_history: Sequence[ModelMessage] | None = None, @@ -312,7 +301,7 @@ async def dispatch_request( infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, - on_complete: OnCompleteFunc | None = None, + on_complete: OnCompleteFunc[EventT] | None = None, ) -> Response: """Handle an AG-UI request and return a streaming response. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 5c3eef5280..846bfc0729 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -25,6 +25,7 @@ ToolReturnPart, UserPromptPart, ) +from ...output import OutputDataT from ...toolsets import AbstractToolset try: @@ -92,7 +93,7 @@ def label(self) -> str: return 'the AG-UI frontend tools' # pragma: no cover -class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT]): +class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" @classmethod @@ -101,7 +102,7 @@ async def validate_request(cls, request: Request) -> RunAgentInput: return RunAgentInput.model_validate(await request.json()) @property - def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]: + def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: """Create an event stream for the adapter.""" return AGUIEventStream(self.request) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index a29706da2f..fba4650404 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -8,6 +8,7 @@ import json from collections.abc import AsyncIterator, Iterable +from dataclasses import dataclass, field from typing import Final from ...messages import ( @@ -23,6 +24,7 @@ ToolCallPartDelta, ToolReturnPart, ) +from ...output import OutputDataT from ...tools import AgentDepsT from .. import BaseEventStream @@ -69,14 +71,13 @@ BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' -class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT]): +@dataclass +class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" - def __init__(self, request: RunAgentInput) -> None: - """Initialize AG-UI event stream state.""" - super().__init__(request) - self._thinking_text = False - self._builtin_tool_call_ids: dict[str, str] = {} + _thinking_text: bool = False + _builtin_tool_call_ids: dict[str, str] = field(default_factory=dict) + _error: bool = False def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: """Encode an AG-UI event as SSE. @@ -100,13 +101,15 @@ async def before_stream(self) -> AsyncIterator[BaseEvent]: async def after_stream(self) -> AsyncIterator[BaseEvent]: """Handle an AgentRunResultEvent, cleaning up any pending state.""" - yield RunFinishedEvent( - thread_id=self.request.thread_id, - run_id=self.request.run_id, - ) + if not self._error: + yield RunFinishedEvent( + thread_id=self.request.thread_id, + run_id=self.request.run_id, + ) async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: """Handle errors during streaming.""" + self._error = True yield RunErrorEvent(message=str(error)) async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 87a2336c05..e4fb0254af 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -6,12 +6,15 @@ from __future__ import annotations +import inspect from abc import ABC, abstractmethod -from collections.abc import AsyncIterator +from collections.abc import AsyncIterator, Awaitable, Callable from dataclasses import dataclass, field -from typing import Generic, Literal, TypeVar +from typing import Any, Generic, Literal, TypeAlias, TypeVar, cast from uuid import uuid4 +from pydantic_ai import _utils + from ..messages import ( AgentStreamEvent, BuiltinToolCallEvent, # pyright: ignore[reportDeprecated] @@ -33,6 +36,7 @@ ToolCallPartDelta, ToolReturnPart, ) +from ..output import OutputDataT from ..run import AgentRunResult, AgentRunResultEvent from ..tools import AgentDepsT @@ -46,18 +50,27 @@ RunRequestT = TypeVar('RunRequestT') """Type variable for request types.""" -SourceEvent = AgentStreamEvent | AgentRunResultEvent +SourceEvent = AgentStreamEvent | AgentRunResultEvent[Any] + +OnCompleteFunc: TypeAlias = ( + Callable[[AgentRunResult[Any]], None] + | Callable[[AgentRunResult[Any]], Awaitable[None]] + | Callable[[AgentRunResult[Any]], AsyncIterator[EventT]] +) +"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async and can yield events.""" @dataclass -class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT]): +class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" request: RunRequestT - result: AgentRunResult | None = None + result: AgentRunResult[OutputDataT] | None = None message_id: str = field(default_factory=lambda: str(uuid4())) + _turn: Literal['request', 'response'] | None = None + _final_result_event: FinalResultEvent | None = None def new_message_id(self) -> str: @@ -89,11 +102,14 @@ async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None async for event in stream: yield self.encode_event(event, accept) - async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterator[EventT]: # noqa: C901 + async def handle_stream( # noqa: C901 + self, stream: AsyncIterator[SourceEvent], on_complete: OnCompleteFunc[EventT] | None = None + ) -> AsyncIterator[EventT]: """Handle a stream of agent events. Args: stream: The stream of agent events to handle. + on_complete: Optional callback function called when the agent run completes successfully. Yields: Protocol-specific events. @@ -101,35 +117,51 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat async for e in self.before_stream(): yield e - turn: Literal['request', 'response'] | None = None try: async for event in stream: # TODO (DouweM): Introduce, possibly, MessageStartEvent, MessageEndEvent with ModelRequest/Response? # People have requested these before. We can store Request and Response - next_turn = turn if isinstance(event, PartStartEvent): - next_turn = 'request' + async for e in self._turn_to('response'): + yield e elif isinstance(event, FunctionToolCallEvent): - next_turn = 'response' + async for e in self._turn_to('request'): + yield e elif isinstance(event, AgentRunResultEvent): - next_turn = None + if ( + self._final_result_event + and (tool_call_id := self._final_result_event.tool_call_id) + and (tool_name := self._final_result_event.tool_name) + ): + async for e in self._turn_to('request'): + yield e - if turn != next_turn: - if turn == 'request': - async for e in self.after_request(): + self._final_result_event = None + output_tool_result_event = FunctionToolResultEvent( + result=ToolReturnPart( + tool_call_id=tool_call_id, + tool_name=tool_name, + content='Final result processed.', + ) + ) + async for e in self.handle_function_tool_result(output_tool_result_event): yield e - elif turn == 'response': - async for e in self.after_response(): - yield e # TODO (DouweM): coverage - turn = next_turn + self.result = cast(AgentRunResult[OutputDataT], event.result) - if turn == 'request': - async for e in self.before_request(): - yield e - elif turn == 'response': - async for e in self.before_response(): - yield e # TODO (DouweM): coverage + async for e in self._turn_to(None): + yield e + + if on_complete is not None: + if inspect.isasyncgenfunction(on_complete): + async for e in on_complete(self.result): + yield e + elif _utils.is_async_callable(on_complete): + await on_complete(self.result) + else: + await _utils.run_in_executor(on_complete, self.result) + elif isinstance(event, FinalResultEvent): + self._final_result_event = event if isinstance(event, BuiltinToolCallEvent | BuiltinToolResultEvent): # pyright: ignore[reportDeprecated] # The events were deprecated before this feature was introduced @@ -140,20 +172,40 @@ async def handle_stream(self, stream: AsyncIterator[SourceEvent]) -> AsyncIterat except Exception as e: async for e in self.on_error(e): yield e - else: - if turn == 'request': - async for ( - e - ) in self.after_request(): # TODO (DouweM): coverage. does this make sense here? should it be finally? - yield e - elif turn == 'response': - async for e in self.after_response(): # TODO (DouweM): coverage - yield e + finally: + async for e in self._turn_to(None): + yield e async for e in self.after_stream(): yield e - async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # noqa: C901 + async def _turn_to(self, to_turn: Literal['request', 'response'] | None) -> AsyncIterator[EventT]: + """Handle a turn. + + Args: + from_turn: The turn to start from. + to_turn: The turn to end at. + """ + if to_turn == self._turn: + return + + if self._turn == 'request': + async for e in self.after_request(): + yield e + elif self._turn == 'response': + async for e in self.after_response(): + yield e + + self._turn = to_turn + + if to_turn == 'request': + async for e in self.before_request(): + yield e + elif to_turn == 'response': + async for e in self.before_response(): + yield e + + async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: """Transform a Pydantic AI agent event into protocol-specific events. This method dispatches to specific `handle_*` methods based on event and part type. @@ -176,7 +228,6 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no async for e in self.handle_part_end(event): yield e case FinalResultEvent(): - self._final_result_event = event async for e in self.handle_final_result(event): yield e case FunctionToolCallEvent(): @@ -186,23 +237,6 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: # no async for e in self.handle_function_tool_result(event): yield e case AgentRunResultEvent(): - if ( - self._final_result_event - and (tool_call_id := self._final_result_event.tool_call_id) - and (tool_name := self._final_result_event.tool_name) - ): # TODO (DouweM): coverage - self._final_result_event = None - output_tool_result_event = FunctionToolResultEvent( - result=ToolReturnPart( - tool_call_id=tool_call_id, - tool_name=tool_name, - content='Final result processed.', - ) - ) - async for e in self.handle_function_tool_result(output_tool_result_event): - yield e - - self.result = event.result async for e in self.handle_run_result(event): yield e case _: diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index c654571aae..58ce3685de 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -29,6 +29,7 @@ UserPromptPart, VideoUrl, ) +from ...output import OutputDataT from ..adapter import BaseAdapter from ..event_stream import BaseEventStream from ._event_stream import VercelAIEventStream @@ -63,7 +64,7 @@ @dataclass -class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT]): +class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" @classmethod @@ -72,7 +73,7 @@ async def validate_request(cls, request: Request) -> RequestData: return request_data_ta.validate_json(await request.body()) # TODO (DouweM): coverage @property - def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT]: + def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: return VercelAIEventStream(self.request) @property diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index cbdff933f8..09d74044f5 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -3,6 +3,7 @@ from __future__ import annotations from collections.abc import AsyncIterator +from dataclasses import dataclass from typing import Any from pydantic_core import to_json @@ -20,6 +21,7 @@ ToolCallPart, ToolCallPartDelta, ) +from ...output import OutputDataT from ...tools import AgentDepsT from .. import BaseEventStream from ._request_types import RequestData @@ -53,14 +55,11 @@ def _json_dumps(obj: Any) -> str: return to_json(obj).decode('utf-8') -class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT]): +@dataclass +class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" - def __init__(self, request: RequestData) -> None: - """Initialize Vercel AI event stream state.""" - super().__init__(request) - - self._step_started = False + _step_started: bool = False def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: if isinstance(event, DoneChunk): @@ -71,12 +70,12 @@ async def before_stream(self) -> AsyncIterator[BaseChunk]: """Yield events before agent streaming starts.""" yield StartChunk() - async def before_request(self) -> AsyncIterator[BaseChunk]: + async def before_response(self) -> AsyncIterator[BaseChunk]: """Yield events before the request is processed.""" self._step_started = True yield StartStepChunk() - async def after_response(self) -> AsyncIterator[BaseChunk]: + async def after_request(self) -> AsyncIterator[BaseChunk]: """Yield events after the response is processed.""" if self._step_started: # TODO (DouweM): coverage yield FinishStepChunk() diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 2f43a072d9..0844b7372e 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -51,6 +51,7 @@ with try_import() as imports_successful: from ag_ui.core import ( AssistantMessage, + BaseEvent, CustomEvent, DeveloperMessage, EventType, @@ -116,7 +117,7 @@ async def run_and_collect_events( agent: Agent[AgentDepsT, OutputDataT], *run_inputs: RunAgentInput, deps: AgentDepsT = None, - on_complete: OnCompleteFunc | None = None, + on_complete: OnCompleteFunc[BaseEvent] | None = None, ) -> list[dict[str, Any]]: events = list[dict[str, Any]]() for run_input in run_inputs: diff --git a/tests/test_ui.py b/tests/test_ui.py index 2ad7579bad..ce6cc05157 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -1,6 +1,6 @@ from __future__ import annotations -from collections.abc import AsyncIterator, Sequence +from collections.abc import AsyncIterator, MutableMapping, Sequence from dataclasses import dataclass, field from functools import cached_property from typing import Any @@ -10,8 +10,10 @@ from pydantic import BaseModel from pydantic_ai import Agent +from pydantic_ai._run_context import AgentDepsT from pydantic_ai.builtin_tools import WebSearchTool from pydantic_ai.messages import ( + BinaryImage, BuiltinToolCallPart, BuiltinToolReturnPart, FilePart, @@ -30,8 +32,18 @@ ToolCallPart, ToolCallPartDelta, ) -from pydantic_ai.models.function import AgentInfo, BuiltinToolCallsReturns, DeltaToolCall, DeltaToolCalls, FunctionModel -from pydantic_ai.run import AgentRunResultEvent +from pydantic_ai.models.function import ( + AgentInfo, + BuiltinToolCallsReturns, + DeltaThinkingCalls, + DeltaThinkingPart, + DeltaToolCall, + DeltaToolCalls, + FunctionModel, +) +from pydantic_ai.models.test import TestModel +from pydantic_ai.output import OutputDataT +from pydantic_ai.run import AgentRunResult, AgentRunResultEvent from pydantic_ai.tools import ToolDefinition from pydantic_ai.toolsets import AbstractToolset, ExternalToolset from pydantic_ai.ui.adapter import BaseAdapter @@ -41,6 +53,8 @@ with try_import() as starlette_import_successful: from starlette.requests import Request + from starlette.responses import StreamingResponse + pytestmark = [ pytest.mark.anyio, @@ -69,7 +83,7 @@ class UIDeps: state: UIState -class UIAdapter(BaseAdapter[UIRequest, ModelMessage, str, UIDeps]): +class UIAdapter(BaseAdapter[UIRequest, ModelMessage, str, AgentDepsT, OutputDataT]): @classmethod async def validate_request(cls, request: Request) -> UIRequest: return UIRequest.model_validate(await request.json()) @@ -79,8 +93,8 @@ def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: return list(messages) @property - def event_stream(self) -> UIEventStream: - return UIEventStream(self.request) + def event_stream(self) -> UIEventStream[AgentDepsT, OutputDataT]: + return UIEventStream[AgentDepsT, OutputDataT](self.request) @cached_property def messages(self) -> list[ModelMessage]: @@ -91,12 +105,16 @@ def state(self) -> dict[str, Any] | None: return self.request.state @cached_property - def toolset(self) -> AbstractToolset[UIDeps] | None: + def toolset(self) -> AbstractToolset[AgentDepsT] | None: return ExternalToolset(self.request.tool_defs) if self.request.tool_defs else None + @property + def response_headers(self) -> dict[str, str]: + return {'x-test': 'test'} + @dataclass(kw_only=True) -class UIEventStream(BaseEventStream[UIRequest, str, UIDeps]): +class UIEventStream(BaseEventStream[UIRequest, str, AgentDepsT, OutputDataT]): def encode_event(self, event: str, accept: str | None = None) -> str: return event @@ -121,22 +139,22 @@ async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[str]: yield e async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[str]: - yield f'{part.content}' + yield f'{part.content}' async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[str]: yield delta.content_delta async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[str]: - yield '' + yield f'' async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[str]: - yield f'{part.content}' + yield f'{part.content}' async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[str]: yield str(delta.content_delta) async def handle_thinking_end(self, part: ThinkingPart, followed_by_thinking: bool = False) -> AsyncIterator[str]: - yield '' + yield f'' async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[str]: yield f'{part.args}' @@ -160,7 +178,7 @@ async def handle_file(self, part: FilePart) -> AsyncIterator[str]: yield f'' async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[str]: - yield '' + yield f'' async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[str]: yield f'{event.part.args}' @@ -193,7 +211,51 @@ async def on_error(self, error: Exception) -> AsyncIterator[str]: yield f'{str(error)}' -async def test_event_stream_builtin_tool_call(): +async def test_run_stream_text_and_thinking(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaThinkingCalls | str]: + yield {0: DeltaThinkingPart(content='Half of ')} + yield {0: DeltaThinkingPart(content='a thought')} + yield {1: DeltaThinkingPart(content='Another thought')} + yield {2: DeltaThinkingPart(content='And one more')} + yield 'Half of ' + yield 'some text' + yield {5: DeltaThinkingPart(content='More thinking')} + + agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + deps = UIDeps(state=UIState()) + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps)] + + assert events == snapshot( + [ + '', + '', + 'Half of ', + 'a thought', + '', + 'Another thought', + '', + 'And one more', + '', + 'Half of ', + '', + 'some text', + '', + 'More thinking', + '', + '', + 'Half of some text', + '', + ] + ) + + +async def test_run_stream_builtin_tool_call(): async def stream_function( messages: list[ModelMessage], agent_info: AgentInfo ) -> AsyncIterator[BuiltinToolCallsReturns | DeltaToolCalls | str]: @@ -239,30 +301,368 @@ async def stream_function( assert events == snapshot( [ '', - '', + '', '{"query":', '"Hello world"}', "", "{'results': [{'title': '\"Hello, World!\" program', 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program'}]}", - 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', - '', - '', + 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', + '', + '', + '', + 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', + '', + ] + ) + + +async def test_run_stream_tool_call(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaToolCalls | str]: + if len(messages) == 1: + yield { + 0: DeltaToolCall( + name='web_search', + json_args='{"query":', + tool_call_id='search_1', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } + else: + yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + + agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + + @agent.tool_plain + async def web_search(query: str) -> dict[str, list[dict[str, str]]]: + return { + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + } + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + deps = UIDeps(state=UIState()) + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps)] + + assert events == snapshot( + [ + '', + '', + '{"query":', + '"Hello world"}', + "", + '', + '', + '{"query":"Hello world"}', + "{'results': [{'title': '\"Hello, World!\" program', 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program'}]}", '', + '', + 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', + '', + '', + '', 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', '', ] ) -async def test_event_stream_text(): - # tool call - # error - # output tool - # - dispatch_request - # text - # - back to back - # thinking - # - back to back - # tool call - # file - pass +async def test_run_stream_file(): + agent = Agent(model=TestModel(), deps_type=UIDeps) + + async def event_generator(): + yield PartStartEvent(index=0, part=FilePart(content=BinaryImage(data=b'fake', media_type='image/png'))) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Generate an image')]) + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.process_stream(event_generator())] + + assert events == snapshot( + [ + '', + '', + "", + '', + '', + ] + ) + + +async def test_run_stream_external_tools(): + agent = Agent(model=TestModel()) + + request = UIRequest( + messages=[ModelRequest.user_text_prompt('Call a tool')], + tool_defs=[ToolDefinition(name='external_tool')], + ) + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream()] + + assert events == snapshot( + [ + '', + '', + "{}", + '', + "", + '', + '', + "{}", + '', + "DeferredToolRequests(calls=[ToolCallPart(tool_name='external_tool', args={}, tool_call_id='pyd_ai_tool_call_id__external_tool')], approvals=[])", + '', + ] + ) + + +async def test_run_stream_output_tool(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaToolCalls | str]: + if len(messages) == 1: + yield { + 0: DeltaToolCall( + name='final_result', + json_args='{"query":', + tool_call_id='search_1', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } + else: + yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + + def web_search(query: str) -> dict[str, list[dict[str, str]]]: + return { + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + } + + agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps, output_type=web_search) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + deps = UIDeps(state=UIState()) + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps)] + + assert events == snapshot( + [ + '', + '', + '{"query":', + "", + '"Hello world"}', + "", + '', + '', + "Final result processed.", + '', + "{'results': [{'title': '\"Hello, World!\" program', 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program'}]}", + '', + ] + ) + + +async def test_run_stream_response_error(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaToolCalls | str]: + yield { + 0: DeltaToolCall( + name='unknown_tool', + ) + } + + agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + deps = UIDeps(state=UIState()) + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps)] + + assert events == snapshot( + [ + '', + '', + "None", + "", + '', + '', + "None", + "Unknown tool name: 'unknown_tool'. No tools available.", + '', + '', + "None", + "", + "Exceeded maximum retries (1) for output validation", + '', + '', + ] + ) + + +async def test_run_stream_request_error(): + agent = Agent(model=TestModel(), deps_type=UIDeps) + + @agent.tool_plain + async def tool(query: str) -> str: + raise ValueError('Unknown tool') + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + deps = UIDeps(state=UIState()) + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps)] + + assert events == snapshot( + [ + '', + '', + "{'query': 'a'}", + "", + '', + '', + "{'query': 'a'}", + "Unknown tool", + '', + '', + ] + ) + + +async def test_run_stream_on_complete_error(): + agent = Agent(model=TestModel(), deps_type=UIDeps) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + deps = UIDeps(state=UIState()) + + def raise_error(run_result: AgentRunResult[Any]) -> None: + raise ValueError('Faulty on_complete') + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps, on_complete=raise_error)] + + assert events == snapshot( + [ + '', + '', + '', + '', + 'success ', + '(no ', + 'tool ', + 'calls)', + '', + '', + "Faulty on_complete", + '', + ] + ) + + +async def test_run_stream_on_complete(): + agent = Agent(model=TestModel(), deps_type=UIDeps) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + deps = UIDeps(state=UIState()) + + async def on_complete(run_result: AgentRunResult[Any]) -> AsyncIterator[str]: + yield '' + + adapter = UIAdapter(agent, request) + events = [event async for event in adapter.run_stream(deps=deps, on_complete=on_complete)] + + assert events == snapshot( + [ + '', + '', + '', + '', + 'success ', + '(no ', + 'tool ', + 'calls)', + '', + '', + '', + 'success (no tool calls)', + '', + ] + ) + + +@pytest.mark.skipif(not starlette_import_successful, reason='Starlette is not installed') +async def test_adapter_dispatch_request(): + agent = Agent(model=TestModel()) + + request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + + async def receive() -> dict[str, Any]: + return {'type': 'http.request', 'body': request.model_dump_json().encode('utf-8')} + + starlette_request = Request( + scope={ + 'type': 'http', + 'method': 'POST', + 'headers': [ + (b'content-type', b'application/json'), + ], + }, + receive=receive, + ) + + response = await UIAdapter.dispatch_request(agent, starlette_request) + + assert isinstance(response, StreamingResponse) + + chunks: list[MutableMapping[str, Any]] = [] + + async def send(data: MutableMapping[str, Any]) -> None: + chunks.append(data) + + await response.stream_response(send) + + assert chunks == snapshot( + [ + {'type': 'http.response.start', 'status': 200, 'headers': [(b'x-test', b'test')]}, + {'type': 'http.response.body', 'body': b'', 'more_body': True}, + {'type': 'http.response.body', 'body': b'', 'more_body': True}, + {'type': 'http.response.body', 'body': b'', 'more_body': True}, + {'type': 'http.response.body', 'body': b'', 'more_body': True}, + {'type': 'http.response.body', 'body': b'success ', 'more_body': True}, + {'type': 'http.response.body', 'body': b'(no ', 'more_body': True}, + {'type': 'http.response.body', 'body': b'tool ', 'more_body': True}, + {'type': 'http.response.body', 'body': b'calls)', 'more_body': True}, + {'type': 'http.response.body', 'body': b'', 'more_body': True}, + {'type': 'http.response.body', 'body': b'', 'more_body': True}, + { + 'type': 'http.response.body', + 'body': b'success (no tool calls)', + 'more_body': True, + }, + {'type': 'http.response.body', 'body': b'', 'more_body': True}, + {'type': 'http.response.body', 'body': b'', 'more_body': False}, + ] + ) From 4622eb5a42e531d1f76f3b11e7d83375118ea281 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 15:19:00 +0000 Subject: [PATCH 28/46] tests --- .../pydantic_ai/ui/event_stream.py | 6 +- tests/test_ui.py | 57 +- tests/test_vercel_ai.py | 640 +++++++++++++++++- 3 files changed, 661 insertions(+), 42 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index e4fb0254af..fd6cf70247 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -232,7 +232,7 @@ async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: yield e case FunctionToolCallEvent(): async for e in self.handle_function_tool_call(event): - yield e # TODO (DouweM): coverage + yield e case FunctionToolResultEvent(): async for e in self.handle_function_tool_result(event): yield e @@ -266,7 +266,7 @@ async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT case BuiltinToolReturnPart(): async for e in self.handle_builtin_tool_return(part): yield e - case FilePart(): # TODO (DouweM): coverage + case FilePart(): # pragma: no branch async for e in self.handle_file(part): yield e @@ -365,7 +365,7 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator Yields: Protocol-specific events. """ - return # TODO (DouweM): coverage + return # pragma: no cover yield # Make this an async generator async def handle_thinking_end( diff --git a/tests/test_ui.py b/tests/test_ui.py index ce6cc05157..ae24a2ff39 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -223,13 +223,11 @@ async def stream_function( yield 'some text' yield {5: DeltaThinkingPart(content='More thinking')} - agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + agent = Agent(model=FunctionModel(stream_function=stream_function)) request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - deps = UIDeps(state=UIState()) - adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps)] + events = [event async for event in adapter.run_stream()] assert events == snapshot( [ @@ -290,13 +288,11 @@ async def stream_function( } yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' - agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + agent = Agent(model=FunctionModel(stream_function=stream_function)) request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - deps = UIDeps(state=UIState()) - adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps)] + events = [event async for event in adapter.run_stream()] assert events == snapshot( [ @@ -337,7 +333,7 @@ async def stream_function( else: yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' - agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + agent = Agent(model=FunctionModel(stream_function=stream_function)) @agent.tool_plain async def web_search(query: str) -> dict[str, list[dict[str, str]]]: @@ -351,10 +347,8 @@ async def web_search(query: str) -> dict[str, list[dict[str, str]]]: } request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - deps = UIDeps(state=UIState()) - adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps)] + events = [event async for event in adapter.run_stream()] assert events == snapshot( [ @@ -379,15 +373,13 @@ async def web_search(query: str) -> dict[str, list[dict[str, str]]]: ) -async def test_run_stream_file(): - agent = Agent(model=TestModel(), deps_type=UIDeps) - +async def test_event_stream_file(): async def event_generator(): yield PartStartEvent(index=0, part=FilePart(content=BinaryImage(data=b'fake', media_type='image/png'))) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Generate an image')]) - adapter = UIAdapter(agent, request) - events = [event async for event in adapter.process_stream(event_generator())] + request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + event_stream = UIEventStream(request=request) + events = [event async for event in event_stream.handle_stream(event_generator())] assert events == snapshot( [ @@ -458,13 +450,11 @@ def web_search(query: str) -> dict[str, list[dict[str, str]]]: ] } - agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps, output_type=web_search) + agent = Agent(model=FunctionModel(stream_function=stream_function), output_type=web_search) request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - deps = UIDeps(state=UIState()) - adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps)] + events = [event async for event in adapter.run_stream()] assert events == snapshot( [ @@ -494,13 +484,11 @@ async def stream_function( ) } - agent = Agent(model=FunctionModel(stream_function=stream_function), deps_type=UIDeps) + agent = Agent(model=FunctionModel(stream_function=stream_function)) request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - deps = UIDeps(state=UIState()) - adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps)] + events = [event async for event in adapter.run_stream()] assert events == snapshot( [ @@ -524,17 +512,15 @@ async def stream_function( async def test_run_stream_request_error(): - agent = Agent(model=TestModel(), deps_type=UIDeps) + agent = Agent(model=TestModel()) @agent.tool_plain async def tool(query: str) -> str: raise ValueError('Unknown tool') request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) - deps = UIDeps(state=UIState()) - adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps)] + events = [event async for event in adapter.run_stream()] assert events == snapshot( [ @@ -553,16 +539,15 @@ async def tool(query: str) -> str: async def test_run_stream_on_complete_error(): - agent = Agent(model=TestModel(), deps_type=UIDeps) + agent = Agent(model=TestModel()) request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) - deps = UIDeps(state=UIState()) def raise_error(run_result: AgentRunResult[Any]) -> None: raise ValueError('Faulty on_complete') adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps, on_complete=raise_error)] + events = [event async for event in adapter.run_stream(on_complete=raise_error)] assert events == snapshot( [ @@ -583,16 +568,15 @@ def raise_error(run_result: AgentRunResult[Any]) -> None: async def test_run_stream_on_complete(): - agent = Agent(model=TestModel(), deps_type=UIDeps) + agent = Agent(model=TestModel()) request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) - deps = UIDeps(state=UIState()) async def on_complete(run_result: AgentRunResult[Any]) -> AsyncIterator[str]: yield '' adapter = UIAdapter(agent, request) - events = [event async for event in adapter.run_stream(deps=deps, on_complete=on_complete)] + events = [event async for event in adapter.run_stream(on_complete=on_complete)] assert events == snapshot( [ @@ -616,7 +600,6 @@ async def on_complete(run_result: AgentRunResult[Any]) -> AsyncIterator[str]: @pytest.mark.skipif(not starlette_import_successful, reason='Starlette is not installed') async def test_adapter_dispatch_request(): agent = Agent(model=TestModel()) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) async def receive() -> dict[str, Any]: diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index d6e83445b1..4097ca52aa 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -1,23 +1,54 @@ from __future__ import annotations import json +from collections.abc import AsyncIterator, MutableMapping +from typing import Any, cast import pytest from inline_snapshot import snapshot from pydantic_ai import Agent from pydantic_ai.builtin_tools import WebSearchTool -from pydantic_ai.messages import ModelRequest, ModelResponse, TextPart, ToolCallPart, ToolReturnPart, UserPromptPart -from pydantic_ai.ui.vercel_ai import VercelAIAdapter +from pydantic_ai.messages import ( + BinaryImage, + BuiltinToolCallPart, + BuiltinToolReturnPart, + FilePart, + ModelMessage, + ModelRequest, + ModelResponse, + PartStartEvent, + TextPart, + ToolCallPart, + ToolReturnPart, + UserPromptPart, +) +from pydantic_ai.models.function import ( + AgentInfo, + BuiltinToolCallsReturns, + DeltaThinkingCalls, + DeltaThinkingPart, + DeltaToolCall, + DeltaToolCalls, + FunctionModel, +) +from pydantic_ai.models.test import TestModel +from pydantic_ai.run import AgentRunResult +from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIEventStream from pydantic_ai.ui.vercel_ai._request_types import ( SubmitMessage, TextUIPart, ToolOutputAvailablePart, UIMessage, ) +from pydantic_ai.ui.vercel_ai._response_types import BaseChunk, DataChunk from .conftest import IsDatetime, IsStr, try_import +with try_import() as starlette_import_successful: + from starlette.requests import Request + from starlette.responses import StreamingResponse + with try_import() as openai_import_successful: from pydantic_ai.models.openai import OpenAIResponsesModel from pydantic_ai.providers.openai import OpenAIProvider @@ -1019,3 +1050,608 @@ def client_response\ '[DONE]', ] ) + + +async def test_run_stream_text_and_thinking(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaThinkingCalls | str]: + yield {0: DeltaThinkingPart(content='Half of ')} + yield {0: DeltaThinkingPart(content='a thought')} + yield {1: DeltaThinkingPart(content='Another thought')} + yield {2: DeltaThinkingPart(content='And one more')} + yield 'Half of ' + yield 'some text' + yield {5: DeltaThinkingPart(content='More thinking')} + + agent = Agent(model=FunctionModel(stream_function=stream_function)) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Tell me about Hello World')], + ), + ], + ) + + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream()) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-delta', 'id': IsStr(), 'delta': 'Half of '}, + {'type': 'reasoning-delta', 'id': IsStr(), 'delta': 'a thought'}, + {'type': 'reasoning-end', 'id': IsStr()}, + {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-delta', 'id': IsStr(), 'delta': 'Another thought'}, + {'type': 'reasoning-end', 'id': IsStr()}, + {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-delta', 'id': IsStr(), 'delta': 'And one more'}, + {'type': 'reasoning-end', 'id': IsStr()}, + {'type': 'text-start', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'Half of ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'some text', 'id': IsStr()}, + {'type': 'text-end', 'id': IsStr()}, + {'type': 'reasoning-start', 'id': IsStr()}, + {'type': 'reasoning-delta', 'id': IsStr(), 'delta': 'More thinking'}, + {'type': 'reasoning-end', 'id': IsStr()}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_run_stream_builtin_tool_call(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[BuiltinToolCallsReturns | DeltaToolCalls | str]: + yield { + 0: BuiltinToolCallPart( + tool_name=WebSearchTool.kind, + args='{"query":', + tool_call_id='search_1', + provider_name='function', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } + yield { + 1: BuiltinToolReturnPart( + tool_name=WebSearchTool.kind, + content={ + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + }, + tool_call_id='search_1', + provider_name='function', + ) + } + yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + + agent = Agent(model=FunctionModel(stream_function=stream_function)) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Tell me about Hello World')], + ), + ], + ) + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream()) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'tool-input-start', 'toolCallId': 'search_1', 'toolName': 'web_search', 'providerExecuted': True}, + {'type': 'tool-input-delta', 'toolCallId': 'search_1', 'inputTextDelta': '{"query":'}, + {'type': 'tool-input-delta', 'toolCallId': 'search_1', 'inputTextDelta': '"Hello world"}'}, + { + 'type': 'tool-input-available', + 'toolCallId': 'search_1', + 'toolName': 'web_search', + 'input': '{"query":"Hello world"}', + 'providerExecuted': True, + 'providerMetadata': {'pydantic_ai': {'provider_name': 'function'}}, + }, + { + 'type': 'tool-output-available', + 'toolCallId': 'search_1', + 'output': { + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + }, + 'providerExecuted': True, + }, + {'type': 'text-start', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', + 'id': IsStr(), + }, + {'type': 'text-end', 'id': IsStr()}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_run_stream_tool_call(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaToolCalls | str]: + if len(messages) == 1: + yield { + 0: DeltaToolCall( + name='web_search', + json_args='{"query":', + tool_call_id='search_1', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } + else: + yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + + agent = Agent(model=FunctionModel(stream_function=stream_function)) + + @agent.tool_plain + async def web_search(query: str) -> dict[str, list[dict[str, str]]]: + return { + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + } + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Tell me about Hello World')], + ), + ], + ) + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream()) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'tool-input-start', 'toolCallId': 'search_1', 'toolName': 'web_search'}, + {'type': 'tool-input-delta', 'toolCallId': 'search_1', 'inputTextDelta': '{"query":'}, + {'type': 'tool-input-delta', 'toolCallId': 'search_1', 'inputTextDelta': '"Hello world"}'}, + { + 'type': 'tool-input-available', + 'toolCallId': 'search_1', + 'toolName': 'web_search', + 'input': '{"query":"Hello world"}', + }, + { + 'type': 'tool-output-available', + 'toolCallId': 'search_1', + 'output': { + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + }, + }, + {'type': 'finish-step'}, + {'type': 'start-step'}, + {'type': 'text-start', 'id': IsStr()}, + { + 'type': 'text-delta', + 'delta': 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ', + 'id': IsStr(), + }, + {'type': 'text-end', 'id': IsStr()}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_event_stream_file(): + async def event_generator(): + yield PartStartEvent(index=0, part=FilePart(content=BinaryImage(data=b'fake', media_type='image/png'))) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Hello')], + ), + ], + ) + event_stream = VercelAIEventStream(request=request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in event_stream.encode_stream(event_stream.handle_stream(event_generator())) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'file', 'url': 'data:image/png;base64,ZmFrZQ==', 'mediaType': 'image/png'}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_run_stream_output_tool(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaToolCalls | str]: + if len(messages) == 1: + yield { + 0: DeltaToolCall( + name='final_result', + json_args='{"query":', + tool_call_id='search_1', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } + else: + yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + + def web_search(query: str) -> dict[str, list[dict[str, str]]]: + return { + 'results': [ + { + 'title': '"Hello, World!" program', + 'url': 'https://en.wikipedia.org/wiki/%22Hello,_World!%22_program', + } + ] + } + + agent = Agent(model=FunctionModel(stream_function=stream_function), output_type=web_search) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Tell me about Hello World')], + ), + ], + ) + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream()) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'tool-input-start', 'toolCallId': 'search_1', 'toolName': 'final_result'}, + {'type': 'tool-input-delta', 'toolCallId': 'search_1', 'inputTextDelta': '{"query":'}, + {'type': 'tool-input-delta', 'toolCallId': 'search_1', 'inputTextDelta': '"Hello world"}'}, + { + 'type': 'tool-input-available', + 'toolCallId': 'search_1', + 'toolName': 'final_result', + 'input': '{"query":"Hello world"}', + }, + {'type': 'tool-output-available', 'toolCallId': 'search_1', 'output': 'Final result processed.'}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_run_stream_response_error(): + async def stream_function( + messages: list[ModelMessage], agent_info: AgentInfo + ) -> AsyncIterator[DeltaToolCalls | str]: + yield { + 0: DeltaToolCall( + name='unknown_tool', + ) + } + + agent = Agent(model=FunctionModel(stream_function=stream_function)) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Tell me about Hello World')], + ), + ], + ) + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream()) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'unknown_tool', + }, + { + 'type': 'tool-input-available', + 'toolCallId': IsStr(), + 'toolName': 'unknown_tool', + }, + { + 'type': 'tool-output-error', + 'toolCallId': IsStr(), + 'errorText': """\ +Unknown tool name: 'unknown_tool'. No tools available. + +Fix the errors and try again.\ +""", + }, + {'type': 'finish-step'}, + {'type': 'start-step'}, + { + 'type': 'tool-input-start', + 'toolCallId': IsStr(), + 'toolName': 'unknown_tool', + }, + { + 'type': 'tool-input-available', + 'toolCallId': IsStr(), + 'toolName': 'unknown_tool', + }, + {'type': 'error', 'errorText': 'Exceeded maximum retries (1) for output validation'}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_run_stream_request_error(): + agent = Agent(model=TestModel()) + + @agent.tool_plain + async def tool(query: str) -> str: + raise ValueError('Unknown tool') + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Hello')], + ), + ], + ) + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream()) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'tool-input-start', 'toolCallId': 'pyd_ai_tool_call_id__tool', 'toolName': 'tool'}, + {'type': 'tool-input-delta', 'toolCallId': 'pyd_ai_tool_call_id__tool', 'inputTextDelta': '{"query":"a"}'}, + { + 'type': 'tool-input-available', + 'toolCallId': 'pyd_ai_tool_call_id__tool', + 'toolName': 'tool', + 'input': {'query': 'a'}, + }, + {'type': 'error', 'errorText': 'Unknown tool'}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_run_stream_on_complete_error(): + agent = Agent(model=TestModel()) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Hello')], + ), + ], + ) + + def raise_error(run_result: AgentRunResult[Any]) -> None: + raise ValueError('Faulty on_complete') + + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream(on_complete=raise_error)) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'text-start', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'success ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '(no ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'tool ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'calls)', 'id': IsStr()}, + {'type': 'text-end', 'id': IsStr()}, + {'type': 'error', 'errorText': 'Faulty on_complete'}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_run_stream_on_complete(): + agent = Agent(model=TestModel()) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Hello')], + ), + ], + ) + + async def on_complete(run_result: AgentRunResult[Any]) -> AsyncIterator[BaseChunk]: + yield DataChunk(type='data-custom', data={'foo': 'bar'}) + + adapter = VercelAIAdapter(agent, request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in adapter.encode_stream(adapter.run_stream(on_complete=on_complete)) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'text-start', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'success ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '(no ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'tool ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'calls)', 'id': IsStr()}, + {'type': 'text-end', 'id': IsStr()}, + {'type': 'data-custom', 'data': {'foo': 'bar'}}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +@pytest.mark.skipif(not starlette_import_successful, reason='Starlette is not installed') +async def test_adapter_dispatch_request(): + agent = Agent(model=TestModel()) + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Hello')], + ), + ], + ) + + async def receive() -> dict[str, Any]: + return {'type': 'http.request', 'body': request.model_dump_json().encode('utf-8')} + + starlette_request = Request( + scope={ + 'type': 'http', + 'method': 'POST', + 'headers': [ + (b'content-type', b'application/json'), + ], + }, + receive=receive, + ) + + response = await VercelAIAdapter.dispatch_request(agent, starlette_request) + + assert isinstance(response, StreamingResponse) + + chunks: list[str | dict[str, Any]] = [] + + async def send(data: MutableMapping[str, Any]) -> None: + body = cast(bytes, data.get('body', b'')).decode('utf-8').strip().removeprefix('data: ') + if not body: + return + if body == '[DONE]': + chunks.append('[DONE]') + else: + chunks.append(json.loads(body)) + + await response.stream_response(send) + + assert chunks == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'text-start', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'success ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '(no ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'tool ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'calls)', 'id': IsStr()}, + {'type': 'text-end', 'id': IsStr()}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) From bd6cbc38ec4c875a269b41763977d71fbb64e86f Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Thu, 16 Oct 2025 15:20:53 +0000 Subject: [PATCH 29/46] coverage --- pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py | 4 ++-- pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 58ce3685de..481947c49d 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -70,7 +70,7 @@ class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, @classmethod async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" - return request_data_ta.validate_json(await request.body()) # TODO (DouweM): coverage + return request_data_ta.validate_json(await request.body()) @property def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: @@ -79,7 +79,7 @@ def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT, Ou @property def response_headers(self) -> Mapping[str, str] | None: """Get the response headers for the adapter.""" - return VERCEL_AI_DSP_HEADERS # TODO (DouweM): coverage + return VERCEL_AI_DSP_HEADERS @cached_property def messages(self) -> list[ModelMessage]: diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 09d74044f5..18fdafbfdf 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -91,7 +91,7 @@ async def after_stream(self) -> AsyncIterator[BaseChunk]: async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: """Handle errors during streaming.""" - yield ErrorChunk(error_text=str(error)) # TODO (DouweM): coverage + yield ErrorChunk(error_text=str(error)) async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" @@ -136,7 +136,7 @@ async def handle_thinking_end( def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart or BuiltinToolCallPart at start.""" - return self._handle_tool_call_start(part) # TODO (DouweM): coverage + return self._handle_tool_call_start(part) def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolCallEvent, emitting tool input events.""" From 5544992cbaf5d9920ea6fa137288a61ce7c3fd83 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 27 Oct 2025 19:45:05 +0000 Subject: [PATCH 30/46] Set Content-Type header on StreamingResponse --- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 3 +- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 29 +++++++++------ .../pydantic_ai/ui/ag_ui/_adapter.py | 16 ++++++-- .../pydantic_ai/ui/ag_ui/_event_stream.py | 25 ++++++++----- .../pydantic_ai/ui/event_stream.py | 37 ++++++++++++++----- .../pydantic_ai/ui/vercel_ai/_adapter.py | 7 ++-- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 2 +- tests/test_ui.py | 13 ++++--- 8 files changed, 88 insertions(+), 44 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index 44c2611879..3a27b55042 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -7,11 +7,12 @@ from __future__ import annotations from .adapter import BaseAdapter, OnCompleteFunc, StateDeps, StateHandler -from .event_stream import BaseEventStream +from .event_stream import SSE_CONTENT_TYPE, BaseEventStream __all__ = [ 'BaseAdapter', 'BaseEventStream', + 'SSE_CONTENT_TYPE', 'StateDeps', 'StateHandler', 'OnCompleteFunc', diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index e3a4bf9fe8..d35236fb4e 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -128,10 +128,18 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: """Load messages from the request and return the loaded messages.""" raise NotImplementedError - @property @abstractmethod - def event_stream(self) -> BaseEventStream[RunRequestT, EventT, AgentDepsT, OutputDataT]: - """Create an event stream for the adapter.""" + def build_event_stream( + self, accept: str | None = None + ) -> BaseEventStream[RunRequestT, EventT, AgentDepsT, OutputDataT]: + """Create an event stream for the adapter. + + Args: + accept: The accept header value. + + Returns: + The event stream. + """ raise NotImplementedError @cached_property @@ -167,9 +175,9 @@ def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None Args: stream: The stream of events to encode. - accept: The accept header value for encoding format. + accept: The accept header value. """ - return self.event_stream.encode_stream(stream, accept) + return self.build_event_stream(accept).encode_stream(stream) async def process_stream( self, @@ -182,7 +190,7 @@ async def process_stream( stream: The stream of events to process. on_complete: Optional callback function called when the agent run completes successfully. """ - async for event in self.event_stream.handle_stream(stream, on_complete=on_complete): + async for event in self.build_event_stream().handle_stream(stream, on_complete=on_complete): yield event async def run_stream( @@ -266,7 +274,7 @@ async def stream_response(self, stream: AsyncIterator[EventT], accept: str | Non Args: stream: The stream of events to encode. - accept: The accept header value for encoding format. + accept: The accept header value. """ try: from starlette.responses import StreamingResponse @@ -276,12 +284,11 @@ async def stream_response(self, stream: AsyncIterator[EventT], accept: str | Non 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' ) from e + event_stream = self.build_event_stream(accept) return StreamingResponse( - self.encode_stream( - stream, - accept=accept, - ), + event_stream.encode_stream(stream), headers=self.response_headers, + media_type=event_stream.content_type, ) @classmethod diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 846bfc0729..fc2a375a04 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -101,10 +101,18 @@ async def validate_request(cls, request: Request) -> RunAgentInput: """Validate the request and return the validated request.""" return RunAgentInput.model_validate(await request.json()) - @property - def event_stream(self) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: - """Create an event stream for the adapter.""" - return AGUIEventStream(self.request) + def build_event_stream( + self, accept: str | None = None + ) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: + """Create an event stream for the adapter. + + Args: + accept: The accept header value. + + Returns: + The event stream. + """ + return AGUIEventStream(self.request, accept=accept) @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index fba4650404..7c8c404df3 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -26,7 +26,7 @@ ) from ...output import OutputDataT from ...tools import AgentDepsT -from .. import BaseEventStream +from .. import SSE_CONTENT_TYPE, BaseEventStream try: from ag_ui.core import ( @@ -64,10 +64,6 @@ 'RunFinishedEvent', ] -SSE_CONTENT_TYPE: Final[str] = 'text/event-stream' -"""Content type header value for Server-Sent Events (SSE).""" - - BUILTIN_TOOL_CALL_ID_PREFIX: Final[str] = 'pyd_ai_builtin' @@ -79,18 +75,29 @@ class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT, Outp _builtin_tool_call_ids: dict[str, str] = field(default_factory=dict) _error: bool = False - def encode_event(self, event: BaseEvent, accept: str | None = None) -> str: + @property + def _event_encoder(self) -> EventEncoder: + return EventEncoder(accept=self.accept or SSE_CONTENT_TYPE) + + @property + def content_type(self) -> str: + """Get the content type for the event stream, compatible with the accept header value. + + Args: + accept: The accept header value. + """ + return self._event_encoder.get_content_type() + + def encode_event(self, event: BaseEvent) -> str: """Encode an AG-UI event as SSE. Args: event: The AG-UI event to encode. - accept: The accept header value for encoding format. Returns: The SSE-formatted string. """ - encoder = EventEncoder(accept=accept or SSE_CONTENT_TYPE) - return encoder.encode(event) + return self._event_encoder.encode(event) async def before_stream(self) -> AsyncIterator[BaseEvent]: """Yield events before agent streaming starts.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index fd6cf70247..a998f5fd3e 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -44,6 +44,9 @@ 'BaseEventStream', ] +SSE_CONTENT_TYPE = 'text/event-stream' +"""Content type header value for Server-Sent Events (SSE).""" + EventT = TypeVar('EventT') """Type variable for protocol-specific event types.""" @@ -65,12 +68,15 @@ class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT, OutputDataT] """TODO (DouwM): Docstring.""" request: RunRequestT - result: AgentRunResult[OutputDataT] | None = None + + accept: str | None = None + """TODO (DouweM): Docstring""" message_id: str = field(default_factory=lambda: str(uuid4())) _turn: Literal['request', 'response'] | None = None + _result: AgentRunResult[OutputDataT] | None = None _final_result_event: FinalResultEvent | None = None def new_message_id(self) -> str: @@ -82,25 +88,35 @@ def new_message_id(self) -> str: self.message_id = str(uuid4()) return self.message_id + @property + def content_type(self) -> str: + """Get the content type for the event stream, compatible with the accept header value. + + By default, this returns the SSE content type (`text/event-stream`). + If a subclass supports other types as well, it should consider `self.accept` in `encode_event` and return the resulting content type here. + + Args: + accept: The accept header value. + """ + return SSE_CONTENT_TYPE + @abstractmethod - def encode_event(self, event: EventT, accept: str | None = None) -> str: + def encode_event(self, event: EventT) -> str: """Encode an event as a string. Args: event: The event to encode. - accept: The accept header value for encoding format. """ raise NotImplementedError - async def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: + async def encode_stream(self, stream: AsyncIterator[EventT]) -> AsyncIterator[str]: """Encode a stream of events as SSE strings. Args: stream: The stream of events to encode. - accept: The accept header value for encoding format. """ async for event in stream: - yield self.encode_event(event, accept) + yield self.encode_event(event) async def handle_stream( # noqa: C901 self, stream: AsyncIterator[SourceEvent], on_complete: OnCompleteFunc[EventT] | None = None @@ -147,19 +163,20 @@ async def handle_stream( # noqa: C901 async for e in self.handle_function_tool_result(output_tool_result_event): yield e - self.result = cast(AgentRunResult[OutputDataT], event.result) + result = cast(AgentRunResult[OutputDataT], event.result) + self._result = result async for e in self._turn_to(None): yield e if on_complete is not None: if inspect.isasyncgenfunction(on_complete): - async for e in on_complete(self.result): + async for e in on_complete(result): yield e elif _utils.is_async_callable(on_complete): - await on_complete(self.result) + await on_complete(result) else: - await _utils.run_in_executor(on_complete, self.result) + await _utils.run_in_executor(on_complete, result) elif isinstance(event, FinalResultEvent): self._final_result_event = event diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 481947c49d..425bb4e71c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -72,9 +72,10 @@ async def validate_request(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" return request_data_ta.validate_json(await request.body()) - @property - def event_stream(self) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: - return VercelAIEventStream(self.request) + def build_event_stream( + self, accept: str | None = None + ) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: + return VercelAIEventStream(self.request, accept=accept) @property def response_headers(self) -> Mapping[str, str] | None: diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 18fdafbfdf..33a8fae288 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -61,7 +61,7 @@ class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT, Ou _step_started: bool = False - def encode_event(self, event: BaseChunk, accept: str | None = None) -> str: + def encode_event(self, event: BaseChunk) -> str: if isinstance(event, DoneChunk): return 'data: [DONE]\n\n' return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' diff --git a/tests/test_ui.py b/tests/test_ui.py index ae24a2ff39..cbdc06066e 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -92,9 +92,8 @@ async def validate_request(cls, request: Request) -> UIRequest: def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: return list(messages) - @property - def event_stream(self) -> UIEventStream[AgentDepsT, OutputDataT]: - return UIEventStream[AgentDepsT, OutputDataT](self.request) + def build_event_stream(self, accept: str | None = None) -> UIEventStream[AgentDepsT, OutputDataT]: + return UIEventStream[AgentDepsT, OutputDataT](self.request, accept=accept) @cached_property def messages(self) -> list[ModelMessage]: @@ -115,7 +114,7 @@ def response_headers(self) -> dict[str, str]: @dataclass(kw_only=True) class UIEventStream(BaseEventStream[UIRequest, str, AgentDepsT, OutputDataT]): - def encode_event(self, event: str, accept: str | None = None) -> str: + def encode_event(self, event: str) -> str: return event async def handle_event(self, event: SourceEvent) -> AsyncIterator[str]: @@ -629,7 +628,11 @@ async def send(data: MutableMapping[str, Any]) -> None: assert chunks == snapshot( [ - {'type': 'http.response.start', 'status': 200, 'headers': [(b'x-test', b'test')]}, + { + 'type': 'http.response.start', + 'status': 200, + 'headers': [(b'x-test', b'test'), (b'content-type', b'text/event-stream; charset=utf-8')], + }, {'type': 'http.response.body', 'body': b'', 'more_body': True}, {'type': 'http.response.body', 'body': b'', 'more_body': True}, {'type': 'http.response.body', 'body': b'', 'more_body': True}, From 2b9b830ba2a5ad7d269735e76355d689d5243a25 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 27 Oct 2025 19:46:24 +0000 Subject: [PATCH 31/46] fix snapshots --- tests/models/test_anthropic.py | 2 ++ tests/models/test_bedrock.py | 10 +++++++++- tests/models/test_openai_responses.py | 3 +++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 7199e291c4..614167bd87 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -3649,6 +3649,7 @@ async def test_anthropic_mcp_servers_stream(allow_model_requests: None, anthropi tool_call_id='mcptoolu_01FZmJ5UspaX5BB9uU339UT1', provider_name='anthropic', ), + previous_part_kind='thinking', ), PartDeltaEvent( index=1, @@ -3811,6 +3812,7 @@ async def test_anthropic_mcp_servers_stream(allow_model_requests: None, anthropi timestamp=IsDatetime(), provider_name='anthropic', ), + previous_part_kind='builtin-tool-call', ), ] ) diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index 6ad8e3499d..7915f4f680 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -1363,8 +1363,16 @@ async def test_bedrock_model_stream_empty_text_delta(allow_model_requests: None, content='The user just says "Hi". We need to respond appropriately, friendly greeting. No special instructions. Should be short.' ), ), - PartStartEvent(index=1, part=TextPart(content='Hello! How can I help')), + PartEndEvent( + index=0, + part=ThinkingPart( + content='The user just says "Hi". We need to respond appropriately, friendly greeting. No special instructions. Should be short.' + ), + next_part_kind='text', + ), + PartStartEvent(index=1, part=TextPart(content='Hello! How can I help'), previous_part_kind='thinking'), FinalResultEvent(tool_name=None, tool_call_id=None), PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' you today?')), + PartEndEvent(index=1, part=TextPart(content='Hello! How can I help you today?')), ] ) diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 5adb0e7508..77689830e2 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -6960,6 +6960,7 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='builtin-tool-call', ), PartStartEvent( index=3, @@ -6968,6 +6969,7 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request tool_call_id='mcp_00b9cc7a23d047270068faa0e67fb0819fa9e21302c398e9ac', provider_name='openai', ), + previous_part_kind='thinking', ), PartDeltaEvent( index=3, @@ -7075,6 +7077,7 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request timestamp=IsDatetime(), provider_name='openai', ), + previous_part_kind='builtin-tool-call', ), ] ) From 5bcc5974f6b887cf2e89b9a8b72f25538d29957e Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 27 Oct 2025 20:17:56 +0000 Subject: [PATCH 32/46] Fix 3.10 lint --- examples/pydantic_ai_examples/sqlite_database.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/pydantic_ai_examples/sqlite_database.py b/examples/pydantic_ai_examples/sqlite_database.py index 9d470a937a..67127efeb3 100644 --- a/examples/pydantic_ai_examples/sqlite_database.py +++ b/examples/pydantic_ai_examples/sqlite_database.py @@ -8,9 +8,10 @@ from dataclasses import dataclass from functools import partial from pathlib import Path -from typing import Any, LiteralString, ParamSpec, TypeVar +from typing import Any, ParamSpec, TypeVar import logfire +from typing_extensions import LiteralString P = ParamSpec('P') R = TypeVar('R') From 0871ac702a49b85dae8ac3bfd2a1f92c4a07bd15 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 27 Oct 2025 20:50:53 +0000 Subject: [PATCH 33/46] Add UIApp, AGUIApp, VercelAIApp --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 112 +-------------- .../pydantic_ai/agent/abstract.py | 1 + pydantic_ai_slim/pydantic_ai/ui/__init__.py | 10 +- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 10 +- .../pydantic_ai/ui/ag_ui/__init__.py | 15 ++ .../pydantic_ai/ui/ag_ui/_adapter.py | 8 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 4 +- pydantic_ai_slim/pydantic_ai/ui/app.py | 130 ++++++++++++++++++ .../pydantic_ai/ui/event_stream.py | 4 +- .../pydantic_ai/ui/vercel_ai/__init__.py | 15 ++ .../pydantic_ai/ui/vercel_ai/_adapter.py | 8 +- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 4 +- tests/test_ui.py | 8 +- 13 files changed, 196 insertions(+), 133 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/app.py diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index 01968fce83..ed16e69502 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -6,14 +6,14 @@ from __future__ import annotations -from collections.abc import AsyncIterator, Callable, Mapping, Sequence -from typing import Any, Generic +from collections.abc import AsyncIterator, Sequence +from typing import Any from . import DeferredToolResults from .agent import AbstractAgent from .messages import ModelMessage from .models import KnownModelName, Model -from .output import OutputDataT, OutputSpec +from .output import OutputSpec from .settings import ModelSettings from .tools import AgentDepsT from .toolsets import AbstractToolset @@ -27,6 +27,7 @@ from .ui.ag_ui import ( SSE_CONTENT_TYPE, AGUIAdapter, + AGUIApp, ) except ImportError as e: # pragma: no cover raise ImportError( @@ -35,12 +36,8 @@ ) from e try: - from starlette.applications import Starlette - from starlette.middleware import Middleware from starlette.requests import Request from starlette.responses import Response - from starlette.routing import BaseRoute - from starlette.types import ExceptionHandler, Lifespan except ImportError as e: # pragma: no cover raise ImportError( 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' @@ -59,105 +56,6 @@ ] -class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette): - """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" - - def __init__( - self, - agent: AbstractAgent[AgentDepsT, OutputDataT], - *, - # Agent.iter parameters. - output_type: OutputSpec[Any] | None = None, - message_history: Sequence[ModelMessage] | None = None, - deferred_tool_results: DeferredToolResults | None = None, - model: Model | KnownModelName | str | None = None, - deps: AgentDepsT = None, - model_settings: ModelSettings | None = None, - usage_limits: UsageLimits | None = None, - usage: RunUsage | None = None, - infer_name: bool = True, - toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, - # Starlette parameters. - debug: bool = False, - routes: Sequence[BaseRoute] | None = None, - middleware: Sequence[Middleware] | None = None, - exception_handlers: Mapping[Any, ExceptionHandler] | None = None, - on_startup: Sequence[Callable[[], Any]] | None = None, - on_shutdown: Sequence[Callable[[], Any]] | None = None, - lifespan: Lifespan[AGUIApp[AgentDepsT, OutputDataT]] | None = None, - ) -> None: - """An ASGI application that handles every AG-UI request by running the agent. - - Note that the `deps` will be the same for each request, with the exception of the AG-UI state that's - injected into the `state` field of a `deps` object that implements the [`StateHandler`][pydantic_ai.ag_ui.StateHandler] protocol. - To provide different `deps` for each request (e.g. based on the authenticated user), - use [`pydantic_ai.ag_ui.run_ag_ui`][pydantic_ai.ag_ui.run_ag_ui] or - [`pydantic_ai.ag_ui.handle_ag_ui_request`][pydantic_ai.ag_ui.handle_ag_ui_request] instead. - - Args: - agent: The agent to run. - - output_type: Custom output type to use for this run, `output_type` may only be used if the agent has - no output validators since output validators would expect an argument that matches the agent's - output type. - message_history: History of the conversation so far. - deferred_tool_results: Optional results for deferred tool calls in the message history. - model: Optional model to use for this run, required if `model` was not set when creating the agent. - deps: Optional dependencies to use for this run. - model_settings: Optional settings to use for this model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. - infer_name: Whether to try to infer the agent name from the call frame if it's not set. - toolsets: Optional additional toolsets for this run. - - debug: Boolean indicating if debug tracebacks should be returned on errors. - routes: A list of routes to serve incoming HTTP and WebSocket requests. - middleware: A list of middleware to run for every request. A starlette application will always - automatically include two middleware classes. `ServerErrorMiddleware` is added as the very - outermost middleware, to handle any uncaught errors occurring anywhere in the entire stack. - `ExceptionMiddleware` is added as the very innermost middleware, to deal with handled - exception cases occurring in the routing or endpoints. - exception_handlers: A mapping of either integer status codes, or exception class types onto - callables which handle the exceptions. Exception handler callables should be of the form - `handler(request, exc) -> response` and may be either standard functions, or async functions. - on_startup: A list of callables to run on application startup. Startup handler callables do not - take any arguments, and may be either standard functions, or async functions. - on_shutdown: A list of callables to run on application shutdown. Shutdown handler callables do - not take any arguments, and may be either standard functions, or async functions. - lifespan: A lifespan context function, which can be used to perform startup and shutdown tasks. - This is a newer style that replaces the `on_startup` and `on_shutdown` handlers. Use one or - the other, not both. - """ - super().__init__( - debug=debug, - routes=routes, - middleware=middleware, - exception_handlers=exception_handlers, - on_startup=on_startup, - on_shutdown=on_shutdown, - lifespan=lifespan, - ) - - async def endpoint(request: Request) -> Response: - """Endpoint to run the agent with the provided input data.""" - return await handle_ag_ui_request( - agent, - request, - output_type=output_type, - message_history=message_history, - deferred_tool_results=deferred_tool_results, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - ) - - self.router.add_route('/', endpoint, methods=['POST'], name='run_agent') - - async def handle_ag_ui_request( agent: AbstractAgent[AgentDepsT, Any], request: Request, @@ -202,6 +100,8 @@ async def handle_ag_ui_request( request, deps=deps, output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, model=model, model_settings=model_settings, usage_limits=usage_limits, diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 2e7f8ca3d9..2a46e2cb0a 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -993,6 +993,7 @@ async def __aenter__(self) -> AbstractAgent[AgentDepsT, OutputDataT]: async def __aexit__(self, *args: Any) -> bool | None: raise NotImplementedError + # TODO (v2): Remove in favor of using `AGUIApp` directly -- we don't have `to_temporal()` or `to_vercel_ai()` either. def to_ag_ui( self, *, diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index 3a27b55042..87c2c485e9 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -6,14 +6,16 @@ from __future__ import annotations -from .adapter import BaseAdapter, OnCompleteFunc, StateDeps, StateHandler -from .event_stream import SSE_CONTENT_TYPE, BaseEventStream +from .adapter import OnCompleteFunc, StateDeps, StateHandler, UIAdapter +from .app import UIApp +from .event_stream import SSE_CONTENT_TYPE, UIEventStream __all__ = [ - 'BaseAdapter', - 'BaseEventStream', + 'UIAdapter', + 'UIEventStream', 'SSE_CONTENT_TYPE', 'StateDeps', 'StateHandler', 'OnCompleteFunc', + 'UIApp', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index d35236fb4e..d60d365008 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -33,7 +33,7 @@ from ..settings import ModelSettings from ..toolsets import AbstractToolset from ..usage import RunUsage, UsageLimits -from .event_stream import BaseEventStream, OnCompleteFunc, SourceEvent +from .event_stream import OnCompleteFunc, SourceEvent, UIEventStream if TYPE_CHECKING: from starlette.requests import Request @@ -41,7 +41,7 @@ __all__ = [ - 'BaseAdapter', + 'UIAdapter', ] @@ -107,7 +107,7 @@ class StateDeps(Generic[StateT]): @dataclass -class BaseAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT, OutputDataT]): +class UIAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" agent: AbstractAgent[AgentDepsT, OutputDataT] @@ -131,7 +131,7 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: @abstractmethod def build_event_stream( self, accept: str | None = None - ) -> BaseEventStream[RunRequestT, EventT, AgentDepsT, OutputDataT]: + ) -> UIEventStream[RunRequestT, EventT, AgentDepsT, OutputDataT]: """Create an event stream for the adapter. Args: @@ -235,7 +235,7 @@ async def run_stream( toolset = self.toolset if toolset: output_type = [output_type or self.agent.output_type, DeferredToolRequests] - toolsets = [*toolsets, toolset] if toolsets else [toolset] + toolsets = [*(toolsets or []), toolset] if isinstance(deps, StateHandler): raw_state = self.state or {} diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index eccd37ed89..745d01f718 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,5 +1,12 @@ """AG-UI protocol integration for Pydantic AI agents.""" +from typing import Any + +from pydantic_ai.agent import AbstractAgent +from pydantic_ai.output import OutputDataT +from pydantic_ai.tools import AgentDepsT + +from .. import UIApp from ._adapter import AGUIAdapter from ._event_stream import SSE_CONTENT_TYPE, AGUIEventStream @@ -7,4 +14,12 @@ 'AGUIAdapter', 'AGUIEventStream', 'SSE_CONTENT_TYPE', + 'AGUIApp', ] + + +class AGUIApp(UIApp[AgentDepsT, OutputDataT]): + """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" + + def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any) -> None: + super().__init__(AGUIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index fc2a375a04..9c858b63bc 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -41,8 +41,8 @@ UserMessage, ) - from ..adapter import BaseAdapter - from ..event_stream import BaseEventStream + from ..adapter import UIAdapter + from ..event_stream import UIEventStream from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream except ImportError as e: # pragma: no cover raise ImportError( @@ -93,7 +93,7 @@ def label(self) -> str: return 'the AG-UI frontend tools' # pragma: no cover -class AGUIAdapter(BaseAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, OutputDataT]): +class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" @classmethod @@ -103,7 +103,7 @@ async def validate_request(cls, request: Request) -> RunAgentInput: def build_event_stream( self, accept: str | None = None - ) -> BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: + ) -> UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: """Create an event stream for the adapter. Args: diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 7c8c404df3..91d93621cd 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -26,7 +26,7 @@ ) from ...output import OutputDataT from ...tools import AgentDepsT -from .. import SSE_CONTENT_TYPE, BaseEventStream +from .. import SSE_CONTENT_TYPE, UIEventStream try: from ag_ui.core import ( @@ -68,7 +68,7 @@ @dataclass -class AGUIEventStream(BaseEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): +class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" _thinking_text: bool = False diff --git a/pydantic_ai_slim/pydantic_ai/ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/app.py new file mode 100644 index 0000000000..d082788247 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/app.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +from collections.abc import Callable, Mapping, Sequence +from typing import Any, Generic, Self + +from .. import DeferredToolResults +from ..agent import AbstractAgent +from ..messages import ModelMessage +from ..models import KnownModelName, Model +from ..output import OutputDataT, OutputSpec +from ..settings import ModelSettings +from ..tools import AgentDepsT +from ..toolsets import AbstractToolset +from ..usage import RunUsage, UsageLimits +from .adapter import UIAdapter + +try: + from starlette.applications import Starlette + from starlette.middleware import Middleware + from starlette.requests import Request + from starlette.responses import Response + from starlette.routing import BaseRoute + from starlette.types import ExceptionHandler, Lifespan +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + + +class UIApp(Generic[AgentDepsT, OutputDataT], Starlette): + """ASGI application for running Pydantic AI agents with UI protocol support.""" + + def __init__( + self, + adapter_type: type[UIAdapter[Any, Any, Any, AgentDepsT, OutputDataT]], + agent: AbstractAgent[AgentDepsT, OutputDataT], + *, + # Agent.iter parameters. + output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + # Starlette parameters. + debug: bool = False, + routes: Sequence[BaseRoute] | None = None, + middleware: Sequence[Middleware] | None = None, + exception_handlers: Mapping[Any, ExceptionHandler] | None = None, + on_startup: Sequence[Callable[[], Any]] | None = None, + on_shutdown: Sequence[Callable[[], Any]] | None = None, + lifespan: Lifespan[Self] | None = None, + ) -> None: + """An ASGI application that handles every request by running the agent and streaming the response. + + # TODO (DouweM): Docstring + Note that the `deps` will be the same for each request, with the exception of the AG-UI state that's + injected into the `state` field of a `deps` object that implements the [`StateHandler`][pydantic_ai.ag_ui.StateHandler] protocol. + To provide different `deps` for each request (e.g. based on the authenticated user), + use [`pydantic_ai.ag_ui.run_ag_ui`][pydantic_ai.ag_ui.run_ag_ui] or + [`pydantic_ai.ag_ui.handle_ag_ui_request`][pydantic_ai.ag_ui.handle_ag_ui_request] instead. + + Args: + adapter_type: The type of the UI adapter to use. + agent: The agent to run. + + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has + no output validators since output validators would expect an argument that matches the agent's + output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + + debug: Boolean indicating if debug tracebacks should be returned on errors. + routes: A list of routes to serve incoming HTTP and WebSocket requests. + middleware: A list of middleware to run for every request. A starlette application will always + automatically include two middleware classes. `ServerErrorMiddleware` is added as the very + outermost middleware, to handle any uncaught errors occurring anywhere in the entire stack. + `ExceptionMiddleware` is added as the very innermost middleware, to deal with handled + exception cases occurring in the routing or endpoints. + exception_handlers: A mapping of either integer status codes, or exception class types onto + callables which handle the exceptions. Exception handler callables should be of the form + `handler(request, exc) -> response` and may be either standard functions, or async functions. + on_startup: A list of callables to run on application startup. Startup handler callables do not + take any arguments, and may be either standard functions, or async functions. + on_shutdown: A list of callables to run on application shutdown. Shutdown handler callables do + not take any arguments, and may be either standard functions, or async functions. + lifespan: A lifespan context function, which can be used to perform startup and shutdown tasks. + This is a newer style that replaces the `on_startup` and `on_shutdown` handlers. Use one or + the other, not both. + """ + super().__init__( + debug=debug, + routes=routes, + middleware=middleware, + exception_handlers=exception_handlers, + on_startup=on_startup, + on_shutdown=on_shutdown, + lifespan=lifespan, + ) + + async def run_agent(request: Request) -> Response: + """Endpoint to run the agent with the provided input data.""" + return await adapter_type.dispatch_request( + agent, + request, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + ) + + self.router.add_route('/', run_agent, methods=['POST']) diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index a998f5fd3e..0fce114a6f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -41,7 +41,7 @@ from ..tools import AgentDepsT __all__ = [ - 'BaseEventStream', + 'UIEventStream', ] SSE_CONTENT_TYPE = 'text/event-stream' @@ -64,7 +64,7 @@ @dataclass -class BaseEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT, OutputDataT]): +class UIEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" request: RunRequestT diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index efb5a4b7d6..d29cd06690 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -7,6 +7,13 @@ https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts """ +from typing import Any + +from pydantic_ai.agent import AbstractAgent +from pydantic_ai.output import OutputDataT +from pydantic_ai.tools import AgentDepsT + +from .. import UIApp from ._adapter import VercelAIAdapter from ._event_stream import VercelAIEventStream from ._request_types import ( @@ -85,4 +92,12 @@ # Event stream and adapter 'VercelAIEventStream', 'VercelAIAdapter', + 'VercelAIApp', ] + + +class VercelAIApp(UIApp[AgentDepsT, OutputDataT]): + """ASGI application for running Pydantic AI agents with Vercel UI protocol support.""" + + def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any) -> None: + super().__init__(VercelAIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 425bb4e71c..5512faec50 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -30,8 +30,8 @@ VideoUrl, ) from ...output import OutputDataT -from ..adapter import BaseAdapter -from ..event_stream import BaseEventStream +from ..adapter import UIAdapter +from ..event_stream import UIEventStream from ._event_stream import VercelAIEventStream from ._request_types import ( DataUIPart, @@ -64,7 +64,7 @@ @dataclass -class VercelAIAdapter(BaseAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, OutputDataT]): +class VercelAIAdapter(UIAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" @classmethod @@ -74,7 +74,7 @@ async def validate_request(cls, request: Request) -> RequestData: def build_event_stream( self, accept: str | None = None - ) -> BaseEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: + ) -> UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: return VercelAIEventStream(self.request, accept=accept) @property diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 33a8fae288..0e1e41d56b 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -23,7 +23,7 @@ ) from ...output import OutputDataT from ...tools import AgentDepsT -from .. import BaseEventStream +from .. import UIEventStream from ._request_types import RequestData from ._response_types import ( BaseChunk, @@ -56,7 +56,7 @@ def _json_dumps(obj: Any) -> str: @dataclass -class VercelAIEventStream(BaseEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]): +class VercelAIEventStream(UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" _step_started: bool = False diff --git a/tests/test_ui.py b/tests/test_ui.py index cbdc06066e..7a4886284d 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -46,8 +46,8 @@ from pydantic_ai.run import AgentRunResult, AgentRunResultEvent from pydantic_ai.tools import ToolDefinition from pydantic_ai.toolsets import AbstractToolset, ExternalToolset -from pydantic_ai.ui.adapter import BaseAdapter -from pydantic_ai.ui.event_stream import BaseEventStream, SourceEvent +from pydantic_ai.ui.adapter import UIAdapter +from pydantic_ai.ui.event_stream import SourceEvent, UIEventStream from .conftest import try_import @@ -83,7 +83,7 @@ class UIDeps: state: UIState -class UIAdapter(BaseAdapter[UIRequest, ModelMessage, str, AgentDepsT, OutputDataT]): +class UIAdapter(UIAdapter[UIRequest, ModelMessage, str, AgentDepsT, OutputDataT]): @classmethod async def validate_request(cls, request: Request) -> UIRequest: return UIRequest.model_validate(await request.json()) @@ -113,7 +113,7 @@ def response_headers(self) -> dict[str, str]: @dataclass(kw_only=True) -class UIEventStream(BaseEventStream[UIRequest, str, AgentDepsT, OutputDataT]): +class UIEventStream(UIEventStream[UIRequest, str, AgentDepsT, OutputDataT]): def encode_event(self, event: str) -> str: return event From d9feb52a8ae6b6a7ef09fedd56e6d5e946f9583d Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 27 Oct 2025 22:32:57 +0000 Subject: [PATCH 34/46] Refactoring --- examples/pydantic_ai_examples/chat_app.py | 2 +- pydantic_ai_slim/pydantic_ai/ag_ui.py | 12 +- .../pydantic_ai/agent/abstract.py | 6 + pydantic_ai_slim/pydantic_ai/ui/adapter.py | 268 ++++++++++-------- .../pydantic_ai/ui/ag_ui/__init__.py | 2 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 27 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 14 +- pydantic_ai_slim/pydantic_ai/ui/app.py | 6 +- .../pydantic_ai/ui/event_stream.py | 57 +++- .../pydantic_ai/ui/vercel_ai/__init__.py | 2 +- .../pydantic_ai/ui/vercel_ai/_adapter.py | 20 +- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 10 +- tests/test_ui.py | 79 +++--- tests/test_vercel_ai.py | 8 +- 14 files changed, 274 insertions(+), 239 deletions(-) diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index ea37a576e5..30ba06f3da 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -96,7 +96,7 @@ def options_chat(): @app.post('/api/chat') async def get_chat(request: Request, database: Database = Depends(get_db)) -> Response: return await VercelAIAdapter[Deps].dispatch_request( - chat_agent, request, deps=Deps(database, 123) + request, agent=chat_agent, deps=Deps(database, 123) ) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index ed16e69502..70a4498188 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -96,8 +96,8 @@ async def handle_ag_ui_request( A streaming Starlette response with AG-UI protocol events. """ return await AGUIAdapter[AgentDepsT].dispatch_request( - agent, request, + agent=agent, deps=deps, output_type=output_type, message_history=message_history, @@ -112,7 +112,7 @@ async def handle_ag_ui_request( ) -async def run_ag_ui( +def run_ag_ui( agent: AbstractAgent[AgentDepsT, Any], run_input: RunAgentInput, accept: str = SSE_CONTENT_TYPE, @@ -153,8 +153,8 @@ async def run_ag_ui( Yields: Streaming event chunks encoded as strings according to the accept header value. """ - adapter = AGUIAdapter(agent=agent, request=run_input) - async for event in adapter.encode_stream( + adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept) + return adapter.encode_stream( adapter.run_stream( output_type=output_type, message_history=message_history, @@ -168,6 +168,4 @@ async def run_ag_ui( toolsets=toolsets, on_complete=on_complete, ), - accept=accept, - ): - yield event + ) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 2a46e2cb0a..aa31f1e572 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -999,6 +999,8 @@ def to_ag_ui( *, # Agent.iter parameters output_type: OutputSpec[OutputDataT] | None = None, + message_history: Sequence[_messages.ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, model: models.Model | models.KnownModelName | str | None = None, deps: AgentDepsT = None, model_settings: ModelSettings | None = None, @@ -1045,6 +1047,8 @@ def to_ag_ui( output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. model: Optional model to use for this run, required if `model` was not set when creating the agent. deps: Optional dependencies to use for this run. model_settings: Optional settings to use for this model's request. @@ -1080,6 +1084,8 @@ def to_ag_ui( agent=self, # Agent.iter parameters output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, model=model, deps=deps, model_settings=model_settings, diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index d60d365008..44ee061d23 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -7,8 +7,8 @@ from __future__ import annotations from abc import ABC, abstractmethod -from collections.abc import AsyncIterator, Mapping, Sequence -from dataclasses import Field, dataclass, replace +from collections.abc import AsyncIterator, Sequence +from dataclasses import KW_ONLY, Field, dataclass, replace from functools import cached_property from http import HTTPStatus from typing import ( @@ -23,30 +23,33 @@ from pydantic import BaseModel, ValidationError -from .. import DeferredToolRequests, DeferredToolResults -from ..agent import AbstractAgent, AgentDepsT -from ..builtin_tools import AbstractBuiltinTool -from ..exceptions import UserError -from ..messages import ModelMessage -from ..models import KnownModelName, Model -from ..output import OutputDataT, OutputSpec -from ..settings import ModelSettings -from ..toolsets import AbstractToolset -from ..usage import RunUsage, UsageLimits -from .event_stream import OnCompleteFunc, SourceEvent, UIEventStream +from pydantic_ai import DeferredToolRequests, DeferredToolResults +from pydantic_ai.agent import AbstractAgent, AgentDepsT +from pydantic_ai.builtin_tools import AbstractBuiltinTool +from pydantic_ai.exceptions import UserError +from pydantic_ai.messages import ModelMessage +from pydantic_ai.models import KnownModelName, Model +from pydantic_ai.output import OutputDataT, OutputSpec +from pydantic_ai.settings import ModelSettings +from pydantic_ai.toolsets import AbstractToolset +from pydantic_ai.usage import RunUsage, UsageLimits + +from .event_stream import NativeEvent, OnCompleteFunc, UIEventStream if TYPE_CHECKING: from starlette.requests import Request - from starlette.responses import Response + from starlette.responses import Response, StreamingResponse __all__ = [ 'UIAdapter', + 'StateHandler', + 'StateDeps', ] -RunRequestT = TypeVar('RunRequestT') -"""Type variable for protocol-specific request types.""" +RunInputT = TypeVar('RunInputT') +"""Type variable for protocol-specific run input types.""" MessageT = TypeVar('MessageT') """Type variable for protocol-specific message types.""" @@ -55,8 +58,6 @@ """Type variable for protocol-specific event types.""" -# State management types - StateT = TypeVar('StateT', bound=BaseModel) """Type variable for the state type, which must be a subclass of `BaseModel`.""" @@ -83,16 +84,13 @@ def state(self, state: Any) -> None: Args: state: The run state. - - Raises: - InvalidStateError: If `state` does not match the expected model. """ ... @dataclass class StateDeps(Generic[StateT]): - """Provides AG-UI state management. + """Dependency type that holds state. This class is used to manage the state of an agent run. It allows setting the state of the agent run with a specific type of state model, which must @@ -107,93 +105,97 @@ class StateDeps(Generic[StateT]): @dataclass -class UIAdapter(ABC, Generic[RunRequestT, MessageT, EventT, AgentDepsT, OutputDataT]): +class UIAdapter(ABC, Generic[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" agent: AbstractAgent[AgentDepsT, OutputDataT] """The Pydantic AI agent to run.""" - request: RunRequestT - """The protocol-specific request object.""" + run_input: RunInputT + """The protocol-specific run input object.""" + + _: KW_ONLY + + accept: str | None = None + """The accept header value.""" + + @classmethod + async def from_request( + cls, request: Request, *, agent: AbstractAgent[AgentDepsT, OutputDataT] + ) -> UIAdapter[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]: + """Create an adapter from a protocol-specific request.""" + return cls( + agent=agent, + run_input=await cls.build_run_input(request), + accept=request.headers.get('accept'), + ) @classmethod @abstractmethod - async def validate_request(cls, request: Request) -> RunRequestT: - """Validate the request and return the validated request.""" + async def build_run_input(cls, request: Request) -> RunInputT: + """Validate the HTTP request and return the validated protocol-specific run input.""" raise NotImplementedError @classmethod @abstractmethod def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: - """Load messages from the request and return the loaded messages.""" + """Convert protocol-specific messages into Pydantic AI messages.""" raise NotImplementedError @abstractmethod - def build_event_stream( - self, accept: str | None = None - ) -> UIEventStream[RunRequestT, EventT, AgentDepsT, OutputDataT]: - """Create an event stream for the adapter. - - Args: - accept: The accept header value. - - Returns: - The event stream. - """ + def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, OutputDataT]: + """Create a protocol-specific event stream.""" raise NotImplementedError @cached_property @abstractmethod def messages(self) -> list[ModelMessage]: - """Convert protocol messages to Pydantic AI messages. - - Args: - messages: List of protocol-specific messages. - - Returns: - List of Pydantic AI ModelMessage objects. - """ + """Pydantic AI messages from the protocol-specific request.""" raise NotImplementedError @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: - """Get the toolset for the adapter.""" + """Toolset representing frontend tools from the protocol-specific request.""" return None @cached_property def state(self) -> dict[str, Any] | None: - """Get the state of the agent run.""" + """Run state from the protocol-specific request.""" return None - @property - def response_headers(self) -> Mapping[str, str] | None: - """Get the response headers for the adapter.""" - return None + def transform_stream( + self, + stream: AsyncIterator[NativeEvent], + on_complete: OnCompleteFunc[EventT] | None = None, + ) -> AsyncIterator[EventT]: + """Transform a stream of Pydantic AI events into protocol-specific events. - def encode_stream(self, stream: AsyncIterator[EventT], accept: str | None = None) -> AsyncIterator[str]: - """Encode a stream of events as SSE strings. + Args: + stream: The stream of Pydantic AI events to transform. + on_complete: Optional callback function called when the agent run completes successfully. + """ + return self.build_event_stream().transform_stream(stream, on_complete=on_complete) + + def encode_stream(self, stream: AsyncIterator[EventT]) -> AsyncIterator[str]: + """Encode a stream of protocol-specific events as strings according to the accept header value. Args: - stream: The stream of events to encode. - accept: The accept header value. + stream: The stream of protocol-specific events to encode. """ - return self.build_event_stream(accept).encode_stream(stream) + return self.build_event_stream().encode_stream(stream) - async def process_stream( - self, - stream: AsyncIterator[SourceEvent], - on_complete: OnCompleteFunc[EventT] | None = None, - ) -> AsyncIterator[EventT]: - """Process a stream of events and return a stream of events. + def streaming_response(self, stream: AsyncIterator[EventT]) -> StreamingResponse: + """Generate a streaming response from a stream of protocol-specific events. Args: - stream: The stream of events to process. - on_complete: Optional callback function called when the agent run completes successfully. + stream: The stream of protocol-specific events to encode. + + Returns: + A streaming Starlette response with encoded protocol-specific events. """ - async for event in self.build_event_stream().handle_stream(stream, on_complete=on_complete): - yield event + return self.build_event_stream().streaming_response(stream) - async def run_stream( + def run_stream_native( self, *, output_type: OutputSpec[Any] | None = None, @@ -207,9 +209,8 @@ async def run_stream( infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, - on_complete: OnCompleteFunc[EventT] | None = None, - ) -> AsyncIterator[EventT]: - """Run the agent with the AG-UI run input and stream AG-UI protocol events. + ) -> AsyncIterator[NativeEvent]: + """Run the agent with the protocol-specific request as input and stream Pydantic AI events. Args: output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no @@ -224,11 +225,6 @@ async def run_stream( infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools to use for this run. - on_complete: Optional callback function called when the agent run completes successfully. - The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. - - Yields: - Streaming event chunks encoded as strings according to the accept header value. """ message_history = [*(message_history or []), *self.messages] @@ -250,9 +246,56 @@ async def run_stream( f'State is provided but `deps` of type `{type(deps).__name__}` does not implement the `StateHandler` protocol: it needs to be a dataclass with a non-optional `state` field.' ) - async for event in self.process_stream( - self.agent.run_stream_events( - user_prompt=None, + return self.agent.run_stream_events( + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + builtin_tools=builtin_tools, + ) + + def run_stream( + self, + *, + output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, + on_complete: OnCompleteFunc[EventT] | None = None, + ) -> AsyncIterator[EventT]: + """Run the agent with the protocol-specific request as input and stream protocol-specific events. + + Args: + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no + output validators since output validators would expect an argument that matches the agent's output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + builtin_tools: Optional additional builtin tools to use for this run. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + """ + return self.transform_stream( + self.run_stream_native( output_type=output_type, message_history=message_history, deferred_tool_results=deferred_tool_results, @@ -266,37 +309,14 @@ async def run_stream( builtin_tools=builtin_tools, ), on_complete=on_complete, - ): - yield event - - async def stream_response(self, stream: AsyncIterator[EventT], accept: str | None = None) -> Response: - """Stream a response to the client. - - Args: - stream: The stream of events to encode. - accept: The accept header value. - """ - try: - from starlette.responses import StreamingResponse - except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `starlette` package to use `BaseAdapter.stream_response()` method, ' - 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' - ) from e - - event_stream = self.build_event_stream(accept) - return StreamingResponse( - event_stream.encode_stream(stream), - headers=self.response_headers, - media_type=event_stream.content_type, ) @classmethod async def dispatch_request( cls, - agent: AbstractAgent[AgentDepsT, OutputDataT], request: Request, *, + agent: AbstractAgent[AgentDepsT, OutputDataT], message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, model: Model | KnownModelName | str | None = None, @@ -310,7 +330,7 @@ async def dispatch_request( builtin_tools: Sequence[AbstractBuiltinTool] | None = None, on_complete: OnCompleteFunc[EventT] | None = None, ) -> Response: - """Handle an AG-UI request and return a streaming response. + """Handle an protocol-specific HTTP request by running the agent and return a streaming response of protocol-specific events. Args: agent: The agent to run. @@ -331,18 +351,18 @@ async def dispatch_request( The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. Returns: - A streaming Starlette response with AG-UI protocol events. + A streaming Starlette response with protocol-specific events encoded per the request's accept header value. """ try: from starlette.responses import Response except ImportError as e: # pragma: no cover raise ImportError( - 'Please install the `starlette` package to use `BaseAdapter.dispatch_request()` method, ' + 'Please install the `starlette` package to use `dispatch_request()` method, ' 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' ) from e try: - request_data = await cls.validate_request(request) + adapter = await cls.from_request(request, agent=agent) except ValidationError as e: # pragma: no cover return Response( content=e.json(), @@ -350,21 +370,19 @@ async def dispatch_request( status_code=HTTPStatus.UNPROCESSABLE_ENTITY, ) - adapter = cls(agent=agent, request=request_data) - - run_stream = adapter.run_stream( - message_history=message_history, - deferred_tool_results=deferred_tool_results, - deps=deps, - output_type=output_type, - model=model, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - builtin_tools=builtin_tools, - on_complete=on_complete, + return adapter.streaming_response( + adapter.run_stream( + message_history=message_history, + deferred_tool_results=deferred_tool_results, + deps=deps, + output_type=output_type, + model=model, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + builtin_tools=builtin_tools, + on_complete=on_complete, + ), ) - - return await adapter.stream_response(run_stream, accept=request.headers.get('accept')) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 745d01f718..a5fecc325c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -21,5 +21,5 @@ class AGUIApp(UIApp[AgentDepsT, OutputDataT]): """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" - def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any) -> None: + def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): super().__init__(AGUIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 9c858b63bc..87fa99b28b 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -97,34 +97,25 @@ class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, Outpu """TODO (DouwM): Docstring.""" @classmethod - async def validate_request(cls, request: Request) -> RunAgentInput: - """Validate the request and return the validated request.""" + async def build_run_input(cls, request: Request) -> RunAgentInput: + """Validate the request and return the validated run input.""" return RunAgentInput.model_validate(await request.json()) - def build_event_stream( - self, accept: str | None = None - ) -> UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: - """Create an event stream for the adapter. - - Args: - accept: The accept header value. - - Returns: - The event stream. - """ - return AGUIEventStream(self.request, accept=accept) + def build_event_stream(self) -> UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: + """Create an event stream for the adapter.""" + return AGUIEventStream(self.run_input, accept=self.accept) @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: """Get the toolset for the adapter.""" - if self.request.tools: - return _AGUIFrontendToolset[AgentDepsT](self.request.tools) + if self.run_input.tools: + return _AGUIFrontendToolset[AgentDepsT](self.run_input.tools) return None @cached_property def state(self) -> dict[str, Any] | None: """Get the state of the agent run.""" - return self.request.state + return self.run_input.state @cached_property def messages(self) -> list[ModelMessage]: @@ -136,7 +127,7 @@ def messages(self) -> list[ModelMessage]: Returns: List of Pydantic AI ModelMessage objects. """ - return self.load_messages(self.request.messages) + return self.load_messages(self.run_input.messages) @classmethod def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 91d93621cd..5e75a565fd 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -81,11 +81,7 @@ def _event_encoder(self) -> EventEncoder: @property def content_type(self) -> str: - """Get the content type for the event stream, compatible with the accept header value. - - Args: - accept: The accept header value. - """ + """Get the content type for the event stream, compatible with the accept header value.""" return self._event_encoder.get_content_type() def encode_event(self, event: BaseEvent) -> str: @@ -102,16 +98,16 @@ def encode_event(self, event: BaseEvent) -> str: async def before_stream(self) -> AsyncIterator[BaseEvent]: """Yield events before agent streaming starts.""" yield RunStartedEvent( - thread_id=self.request.thread_id, - run_id=self.request.run_id, + thread_id=self.run_input.thread_id, + run_id=self.run_input.run_id, ) async def after_stream(self) -> AsyncIterator[BaseEvent]: """Handle an AgentRunResultEvent, cleaning up any pending state.""" if not self._error: yield RunFinishedEvent( - thread_id=self.request.thread_id, - run_id=self.request.run_id, + thread_id=self.run_input.thread_id, + run_id=self.run_input.run_id, ) async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: diff --git a/pydantic_ai_slim/pydantic_ai/ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/app.py index d082788247..8af69db57a 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/app.py +++ b/pydantic_ai_slim/pydantic_ai/ui/app.py @@ -23,8 +23,8 @@ from starlette.types import ExceptionHandler, Lifespan except ImportError as e: # pragma: no cover raise ImportError( - 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' - 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + 'Please install the `starlette` package to use `UIApp`, ' + 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' ) from e @@ -113,8 +113,8 @@ def __init__( async def run_agent(request: Request) -> Response: """Endpoint to run the agent with the provided input data.""" return await adapter_type.dispatch_request( - agent, request, + agent=agent, output_type=output_type, message_history=message_history, deferred_tool_results=deferred_tool_results, diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index 0fce114a6f..d8abf61819 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -8,9 +8,9 @@ import inspect from abc import ABC, abstractmethod -from collections.abc import AsyncIterator, Awaitable, Callable +from collections.abc import AsyncIterator, Awaitable, Callable, Mapping from dataclasses import dataclass, field -from typing import Any, Generic, Literal, TypeAlias, TypeVar, cast +from typing import TYPE_CHECKING, Any, Generic, Literal, TypeAlias, TypeVar, cast from uuid import uuid4 from pydantic_ai import _utils @@ -40,6 +40,10 @@ from ..run import AgentRunResult, AgentRunResultEvent from ..tools import AgentDepsT +if TYPE_CHECKING: + from starlette.responses import StreamingResponse + + __all__ = [ 'UIEventStream', ] @@ -50,24 +54,24 @@ EventT = TypeVar('EventT') """Type variable for protocol-specific event types.""" -RunRequestT = TypeVar('RunRequestT') -"""Type variable for request types.""" +RunInputT = TypeVar('RunInputT') +"""Type variable for protocol-specific run input types.""" -SourceEvent = AgentStreamEvent | AgentRunResultEvent[Any] +NativeEvent = AgentStreamEvent | AgentRunResultEvent[Any] OnCompleteFunc: TypeAlias = ( Callable[[AgentRunResult[Any]], None] | Callable[[AgentRunResult[Any]], Awaitable[None]] | Callable[[AgentRunResult[Any]], AsyncIterator[EventT]] ) -"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync or async and can yield events.""" +"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync, async, or an async generator of protocol-specificevents.""" @dataclass -class UIEventStream(ABC, Generic[RunRequestT, EventT, AgentDepsT, OutputDataT]): +class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]): """TODO (DouwM): Docstring.""" - request: RunRequestT + run_input: RunInputT accept: str | None = None """TODO (DouweM): Docstring""" @@ -88,15 +92,17 @@ def new_message_id(self) -> str: self.message_id = str(uuid4()) return self.message_id + @property + def response_headers(self) -> Mapping[str, str] | None: + """Response headers to return to the frontend.""" + return None + @property def content_type(self) -> str: """Get the content type for the event stream, compatible with the accept header value. By default, this returns the SSE content type (`text/event-stream`). If a subclass supports other types as well, it should consider `self.accept` in `encode_event` and return the resulting content type here. - - Args: - accept: The accept header value. """ return SSE_CONTENT_TYPE @@ -118,8 +124,31 @@ async def encode_stream(self, stream: AsyncIterator[EventT]) -> AsyncIterator[st async for event in stream: yield self.encode_event(event) - async def handle_stream( # noqa: C901 - self, stream: AsyncIterator[SourceEvent], on_complete: OnCompleteFunc[EventT] | None = None + def streaming_response(self, stream: AsyncIterator[EventT]) -> StreamingResponse: + """Generate a streaming response from a stream of encoded protocol-specific events. + + Args: + stream: The stream of protocol-specific events to encode. + + Returns: + A streaming Starlette response with encoded protocol-specific events. + """ + try: + from starlette.responses import StreamingResponse + except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use the `streaming_response()` method, ' + 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' + ) from e + + return StreamingResponse( + self.encode_stream(stream), + headers=self.response_headers, + media_type=self.content_type, + ) + + async def transform_stream( # noqa: C901 + self, stream: AsyncIterator[NativeEvent], on_complete: OnCompleteFunc[EventT] | None = None ) -> AsyncIterator[EventT]: """Handle a stream of agent events. @@ -222,7 +251,7 @@ async def _turn_to(self, to_turn: Literal['request', 'response'] | None) -> Asyn async for e in self.before_response(): yield e - async def handle_event(self, event: SourceEvent) -> AsyncIterator[EventT]: + async def handle_event(self, event: NativeEvent) -> AsyncIterator[EventT]: """Transform a Pydantic AI agent event into protocol-specific events. This method dispatches to specific `handle_*` methods based on event and part type. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index d29cd06690..b0013694b7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -99,5 +99,5 @@ class VercelAIApp(UIApp[AgentDepsT, OutputDataT]): """ASGI application for running Pydantic AI agents with Vercel UI protocol support.""" - def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any) -> None: + def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): super().__init__(VercelAIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 5512faec50..d90d3213d0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -2,7 +2,7 @@ from __future__ import annotations -from collections.abc import Mapping, Sequence +from collections.abc import Sequence from dataclasses import dataclass from functools import cached_property @@ -56,9 +56,6 @@ 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e -# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol -VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} - __all__ = ['VercelAIAdapter'] @@ -68,19 +65,12 @@ class VercelAIAdapter(UIAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, O """TODO (DouwM): Docstring.""" @classmethod - async def validate_request(cls, request: Request) -> RequestData: + async def build_run_input(cls, request: Request) -> RequestData: """Validate a Vercel AI request.""" return request_data_ta.validate_json(await request.body()) - def build_event_stream( - self, accept: str | None = None - ) -> UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: - return VercelAIEventStream(self.request, accept=accept) - - @property - def response_headers(self) -> Mapping[str, str] | None: - """Get the response headers for the adapter.""" - return VERCEL_AI_DSP_HEADERS + def build_event_stream(self) -> UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: + return VercelAIEventStream(self.run_input, accept=self.accept) @cached_property def messages(self) -> list[ModelMessage]: @@ -95,7 +85,7 @@ def messages(self) -> list[ModelMessage]: Raises: ValueError: If message format is not supported. """ - return self.load_messages(self.request.messages) + return self.load_messages(self.run_input.messages) @classmethod def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # noqa: C901 diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 0e1e41d56b..bdd1edd512 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -2,7 +2,7 @@ from __future__ import annotations -from collections.abc import AsyncIterator +from collections.abc import AsyncIterator, Mapping from dataclasses import dataclass from typing import Any @@ -49,6 +49,9 @@ __all__ = ['VercelAIEventStream'] +# See https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol +VERCEL_AI_DSP_HEADERS = {'x-vercel-ai-ui-message-stream': 'v1'} + def _json_dumps(obj: Any) -> str: """Dump an object to JSON string.""" @@ -61,6 +64,11 @@ class VercelAIEventStream(UIEventStream[RequestData, BaseChunk, AgentDepsT, Outp _step_started: bool = False + @property + def response_headers(self) -> Mapping[str, str] | None: + """Get the response headers for the adapter.""" + return VERCEL_AI_DSP_HEADERS + def encode_event(self, event: BaseChunk) -> str: if isinstance(event, DoneChunk): return 'data: [DONE]\n\n' diff --git a/tests/test_ui.py b/tests/test_ui.py index 7a4886284d..ef345f6456 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -47,7 +47,7 @@ from pydantic_ai.tools import ToolDefinition from pydantic_ai.toolsets import AbstractToolset, ExternalToolset from pydantic_ai.ui.adapter import UIAdapter -from pydantic_ai.ui.event_stream import SourceEvent, UIEventStream +from pydantic_ai.ui.event_stream import NativeEvent, UIEventStream from .conftest import try_import @@ -68,56 +68,55 @@ ] -class UIRequest(BaseModel): +class DummyUIRunInput(BaseModel): messages: list[ModelMessage] = field(default_factory=list) tool_defs: list[ToolDefinition] = field(default_factory=list) state: dict[str, Any] = field(default_factory=dict) -class UIState(BaseModel): +class DummyUIState(BaseModel): country: str | None = None @dataclass -class UIDeps: - state: UIState +class DummyUIDeps: + state: DummyUIState -class UIAdapter(UIAdapter[UIRequest, ModelMessage, str, AgentDepsT, OutputDataT]): +class DummyUIAdapter(UIAdapter[DummyUIRunInput, ModelMessage, str, AgentDepsT, OutputDataT]): @classmethod - async def validate_request(cls, request: Request) -> UIRequest: - return UIRequest.model_validate(await request.json()) + async def build_run_input(cls, request: Request) -> DummyUIRunInput: + return DummyUIRunInput.model_validate(await request.json()) @classmethod def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: return list(messages) - def build_event_stream(self, accept: str | None = None) -> UIEventStream[AgentDepsT, OutputDataT]: - return UIEventStream[AgentDepsT, OutputDataT](self.request, accept=accept) + def build_event_stream(self) -> UIEventStream[DummyUIRunInput, str, AgentDepsT, OutputDataT]: + return DummyUIEventStream[AgentDepsT, OutputDataT](self.run_input, accept=self.accept) @cached_property def messages(self) -> list[ModelMessage]: - return self.load_messages(self.request.messages) + return self.load_messages(self.run_input.messages) @cached_property def state(self) -> dict[str, Any] | None: - return self.request.state + return self.run_input.state @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: - return ExternalToolset(self.request.tool_defs) if self.request.tool_defs else None + return ExternalToolset(self.run_input.tool_defs) if self.run_input.tool_defs else None + +class DummyUIEventStream(UIEventStream[DummyUIRunInput, str, AgentDepsT, OutputDataT]): @property def response_headers(self) -> dict[str, str]: return {'x-test': 'test'} - -@dataclass(kw_only=True) -class UIEventStream(UIEventStream[UIRequest, str, AgentDepsT, OutputDataT]): def encode_event(self, event: str) -> str: return event - async def handle_event(self, event: SourceEvent) -> AsyncIterator[str]: + async def handle_event(self, event: NativeEvent) -> AsyncIterator[str]: # yield f'[{event.event_kind}]' async for e in super().handle_event(event): yield e @@ -224,8 +223,8 @@ async def stream_function( agent = Agent(model=FunctionModel(stream_function=stream_function)) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - adapter = UIAdapter(agent, request) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream()] assert events == snapshot( @@ -289,8 +288,8 @@ async def stream_function( agent = Agent(model=FunctionModel(stream_function=stream_function)) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - adapter = UIAdapter(agent, request) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream()] assert events == snapshot( @@ -345,8 +344,8 @@ async def web_search(query: str) -> dict[str, list[dict[str, str]]]: ] } - request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - adapter = UIAdapter(agent, request) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream()] assert events == snapshot( @@ -376,9 +375,9 @@ async def test_event_stream_file(): async def event_generator(): yield PartStartEvent(index=0, part=FilePart(content=BinaryImage(data=b'fake', media_type='image/png'))) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) - event_stream = UIEventStream(request=request) - events = [event async for event in event_stream.handle_stream(event_generator())] + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Hello')]) + event_stream = DummyUIEventStream(run_input=request) + events = [event async for event in event_stream.transform_stream(event_generator())] assert events == snapshot( [ @@ -394,11 +393,11 @@ async def event_generator(): async def test_run_stream_external_tools(): agent = Agent(model=TestModel()) - request = UIRequest( + request = DummyUIRunInput( messages=[ModelRequest.user_text_prompt('Call a tool')], tool_defs=[ToolDefinition(name='external_tool')], ) - adapter = UIAdapter(agent, request) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream()] assert events == snapshot( @@ -451,8 +450,8 @@ def web_search(query: str) -> dict[str, list[dict[str, str]]]: agent = Agent(model=FunctionModel(stream_function=stream_function), output_type=web_search) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - adapter = UIAdapter(agent, request) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream()] assert events == snapshot( @@ -485,8 +484,8 @@ async def stream_function( agent = Agent(model=FunctionModel(stream_function=stream_function)) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) - adapter = UIAdapter(agent, request) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Tell me about Hello World')]) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream()] assert events == snapshot( @@ -517,8 +516,8 @@ async def test_run_stream_request_error(): async def tool(query: str) -> str: raise ValueError('Unknown tool') - request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) - adapter = UIAdapter(agent, request) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Hello')]) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream()] assert events == snapshot( @@ -540,12 +539,12 @@ async def tool(query: str) -> str: async def test_run_stream_on_complete_error(): agent = Agent(model=TestModel()) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Hello')]) def raise_error(run_result: AgentRunResult[Any]) -> None: raise ValueError('Faulty on_complete') - adapter = UIAdapter(agent, request) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream(on_complete=raise_error)] assert events == snapshot( @@ -569,12 +568,12 @@ def raise_error(run_result: AgentRunResult[Any]) -> None: async def test_run_stream_on_complete(): agent = Agent(model=TestModel()) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Hello')]) async def on_complete(run_result: AgentRunResult[Any]) -> AsyncIterator[str]: yield '' - adapter = UIAdapter(agent, request) + adapter = DummyUIAdapter(agent, request) events = [event async for event in adapter.run_stream(on_complete=on_complete)] assert events == snapshot( @@ -599,7 +598,7 @@ async def on_complete(run_result: AgentRunResult[Any]) -> AsyncIterator[str]: @pytest.mark.skipif(not starlette_import_successful, reason='Starlette is not installed') async def test_adapter_dispatch_request(): agent = Agent(model=TestModel()) - request = UIRequest(messages=[ModelRequest.user_text_prompt('Hello')]) + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Hello')]) async def receive() -> dict[str, Any]: return {'type': 'http.request', 'body': request.model_dump_json().encode('utf-8')} @@ -615,7 +614,7 @@ async def receive() -> dict[str, Any]: receive=receive, ) - response = await UIAdapter.dispatch_request(agent, starlette_request) + response = await DummyUIAdapter.dispatch_request(starlette_request, agent=agent) assert isinstance(response, StreamingResponse) diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 4097ca52aa..e8ebd11d82 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -178,7 +178,7 @@ async def test_run(allow_model_requests: None, openai_api_key: str): ], ) - adapter = VercelAIAdapter(agent, request=data) + adapter = VercelAIAdapter(agent, run_input=data) assert adapter.messages == snapshot( [ ModelRequest( @@ -1311,10 +1311,10 @@ async def event_generator(): ), ], ) - event_stream = VercelAIEventStream(request=request) + event_stream = VercelAIEventStream(run_input=request) events = [ '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) - async for event in event_stream.encode_stream(event_stream.handle_stream(event_generator())) + async for event in event_stream.encode_stream(event_stream.transform_stream(event_generator())) ] assert events == snapshot( @@ -1623,7 +1623,7 @@ async def receive() -> dict[str, Any]: receive=receive, ) - response = await VercelAIAdapter.dispatch_request(agent, starlette_request) + response = await VercelAIAdapter.dispatch_request(starlette_request, agent=agent) assert isinstance(response, StreamingResponse) From 5cf8802075588cfcf6112ea11c2c812828df87a7 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 27 Oct 2025 23:26:40 +0000 Subject: [PATCH 35/46] Clean up Pydantic AI message building --- pydantic_ai_slim/pydantic_ai/ag_ui.py | 3 +- .../pydantic_ai/agent/abstract.py | 1 - pydantic_ai_slim/pydantic_ai/ui/__init__.py | 2 + pydantic_ai_slim/pydantic_ai/ui/adapter.py | 16 ++--- .../pydantic_ai/ui/ag_ui/__init__.py | 3 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 37 +++------- .../pydantic_ai/ui/ag_ui/_event_stream.py | 2 +- pydantic_ai_slim/pydantic_ai/ui/app.py | 27 +++++--- .../pydantic_ai/ui/event_stream.py | 4 +- .../pydantic_ai/ui/messages_builder.py | 26 +++++++ .../pydantic_ai/ui/vercel_ai/_adapter.py | 69 +++++++------------ .../pydantic_ai/ui/vercel_ai/_event_stream.py | 2 +- 12 files changed, 95 insertions(+), 97 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/messages_builder.py diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index 70a4498188..9926e2fb7e 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -23,9 +23,8 @@ from ag_ui.core import BaseEvent from ag_ui.core.types import RunAgentInput - from .ui import OnCompleteFunc, StateDeps, StateHandler + from .ui import SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, StateHandler from .ui.ag_ui import ( - SSE_CONTENT_TYPE, AGUIAdapter, AGUIApp, ) diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index aa31f1e572..8c5174fc49 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -755,7 +755,6 @@ async def run_agent() -> AgentRunResult[Any]: yield message result = await task - # TODO (DouweM): Consider adding this to every event stream, if we're adding new events anyway yield AgentRunResultEvent(result) @overload diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index 87c2c485e9..96ef9a177d 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -9,6 +9,7 @@ from .adapter import OnCompleteFunc, StateDeps, StateHandler, UIAdapter from .app import UIApp from .event_stream import SSE_CONTENT_TYPE, UIEventStream +from .messages_builder import MessagesBuilder __all__ = [ 'UIAdapter', @@ -18,4 +19,5 @@ 'StateHandler', 'OnCompleteFunc', 'UIApp', + 'MessagesBuilder', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 44ee061d23..97d973ec1f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -106,7 +106,7 @@ class StateDeps(Generic[StateT]): @dataclass class UIAdapter(ABC, Generic[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]): - """TODO (DouwM): Docstring.""" + """TODO (DouweM): Docstring.""" agent: AbstractAgent[AgentDepsT, OutputDataT] """The Pydantic AI agent to run.""" @@ -123,7 +123,7 @@ class UIAdapter(ABC, Generic[RunInputT, MessageT, EventT, AgentDepsT, OutputData async def from_request( cls, request: Request, *, agent: AbstractAgent[AgentDepsT, OutputDataT] ) -> UIAdapter[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]: - """Create an adapter from a protocol-specific request.""" + """Create an adapter from a protocol-specific run input.""" return cls( agent=agent, run_input=await cls.build_run_input(request), @@ -150,17 +150,17 @@ def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, Out @cached_property @abstractmethod def messages(self) -> list[ModelMessage]: - """Pydantic AI messages from the protocol-specific request.""" + """Pydantic AI messages from the protocol-specific run input.""" raise NotImplementedError @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: - """Toolset representing frontend tools from the protocol-specific request.""" + """Toolset representing frontend tools from the protocol-specific run input.""" return None @cached_property def state(self) -> dict[str, Any] | None: - """Run state from the protocol-specific request.""" + """Run state from the protocol-specific run input.""" return None def transform_stream( @@ -210,7 +210,7 @@ def run_stream_native( toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, builtin_tools: Sequence[AbstractBuiltinTool] | None = None, ) -> AsyncIterator[NativeEvent]: - """Run the agent with the protocol-specific request as input and stream Pydantic AI events. + """Run the agent with the protocol-specific run input and stream Pydantic AI events. Args: output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no @@ -276,7 +276,7 @@ def run_stream( builtin_tools: Sequence[AbstractBuiltinTool] | None = None, on_complete: OnCompleteFunc[EventT] | None = None, ) -> AsyncIterator[EventT]: - """Run the agent with the protocol-specific request as input and stream protocol-specific events. + """Run the agent with the protocol-specific run input and stream protocol-specific events. Args: output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no @@ -333,8 +333,8 @@ async def dispatch_request( """Handle an protocol-specific HTTP request by running the agent and return a streaming response of protocol-specific events. Args: - agent: The agent to run. request: The incoming Starlette/FastAPI request. + agent: The agent to run. output_type: Custom output type to use for this run, `output_type` may only be used if the agent has no output validators since output validators would expect an argument that matches the agent's output type. message_history: History of the conversation so far. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index a5fecc325c..45d77fce83 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -8,12 +8,11 @@ from .. import UIApp from ._adapter import AGUIAdapter -from ._event_stream import SSE_CONTENT_TYPE, AGUIEventStream +from ._event_stream import AGUIEventStream __all__ = [ 'AGUIAdapter', 'AGUIEventStream', - 'SSE_CONTENT_TYPE', 'AGUIApp', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 87fa99b28b..e1960f5a9a 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -15,10 +15,6 @@ BuiltinToolCallPart, BuiltinToolReturnPart, ModelMessage, - ModelRequest, - ModelRequestPart, - ModelResponse, - ModelResponsePart, SystemPromptPart, TextPart, ToolCallPart, @@ -43,6 +39,7 @@ from ..adapter import UIAdapter from ..event_stream import UIEventStream + from ..messages_builder import MessagesBuilder from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream except ImportError as e: # pragma: no cover raise ImportError( @@ -94,7 +91,7 @@ def label(self) -> str: class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, OutputDataT]): - """TODO (DouwM): Docstring.""" + """TODO (DouweM): Docstring.""" @classmethod async def build_run_input(cls, request: Request) -> RunAgentInput: @@ -132,31 +129,24 @@ def messages(self) -> list[ModelMessage]: @classmethod def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: """Load messages from the request and return the loaded messages.""" - result: list[ModelMessage] = [] + builder = MessagesBuilder() tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. - request_parts: list[ModelRequestPart] | None = None - response_parts: list[ModelResponsePart] | None = None for msg in messages: if isinstance(msg, UserMessage | SystemMessage | DeveloperMessage) or ( isinstance(msg, ToolMessage) and not msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) ): - if request_parts is None: - request_parts = [] - result.append(ModelRequest(parts=request_parts)) - response_parts = None - if isinstance(msg, UserMessage): - request_parts.append(UserPromptPart(content=msg.content)) + builder.add(UserPromptPart(content=msg.content)) elif isinstance(msg, SystemMessage | DeveloperMessage): - request_parts.append(SystemPromptPart(content=msg.content)) + builder.add(SystemPromptPart(content=msg.content)) else: tool_call_id = msg.tool_call_id tool_name = tool_calls.get(tool_call_id) if tool_name is None: # pragma: no cover raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') - request_parts.append( + builder.add( ToolReturnPart( tool_name=tool_name, content=msg.content, @@ -167,14 +157,9 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: elif isinstance(msg, AssistantMessage) or ( # pragma: no branch isinstance(msg, ToolMessage) and msg.tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX) ): - if response_parts is None: - response_parts = [] - result.append(ModelResponse(parts=response_parts)) - request_parts = None - if isinstance(msg, AssistantMessage): if msg.content: - response_parts.append(TextPart(content=msg.content)) + builder.add(TextPart(content=msg.content)) if msg.tool_calls: for tool_call in msg.tool_calls: @@ -184,7 +169,7 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: if tool_call_id.startswith(BUILTIN_TOOL_CALL_ID_PREFIX): _, provider_name, tool_call_id = tool_call_id.split('|', 2) - response_parts.append( + builder.add( BuiltinToolCallPart( tool_name=tool_name, args=tool_call.function.arguments, @@ -193,7 +178,7 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: ) ) else: - response_parts.append( + builder.add( ToolCallPart( tool_name=tool_name, tool_call_id=tool_call_id, @@ -207,7 +192,7 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: raise ValueError(f'Tool call with ID {tool_call_id} not found in the history.') _, provider_name, tool_call_id = tool_call_id.split('|', 2) - response_parts.append( + builder.add( BuiltinToolReturnPart( tool_name=tool_name, content=msg.content, @@ -216,4 +201,4 @@ def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: ) ) - return result + return builder.messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 5e75a565fd..4049384f37 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -69,7 +69,7 @@ @dataclass class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): - """TODO (DouwM): Docstring.""" + """TODO (DouweM): Docstring.""" _thinking_text: bool = False _builtin_tool_call_ids: dict[str, str] = field(default_factory=dict) diff --git a/pydantic_ai_slim/pydantic_ai/ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/app.py index 8af69db57a..d6d4cb0192 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/app.py +++ b/pydantic_ai_slim/pydantic_ai/ui/app.py @@ -1,10 +1,13 @@ from __future__ import annotations from collections.abc import Callable, Mapping, Sequence -from typing import Any, Generic, Self +from typing import Any, Generic + +from typing_extensions import Self from .. import DeferredToolResults from ..agent import AbstractAgent +from ..builtin_tools import AbstractBuiltinTool from ..messages import ModelMessage from ..models import KnownModelName, Model from ..output import OutputDataT, OutputSpec @@ -12,7 +15,7 @@ from ..tools import AgentDepsT from ..toolsets import AbstractToolset from ..usage import RunUsage, UsageLimits -from .adapter import UIAdapter +from .adapter import OnCompleteFunc, UIAdapter try: from starlette.applications import Starlette @@ -36,7 +39,7 @@ def __init__( adapter_type: type[UIAdapter[Any, Any, Any, AgentDepsT, OutputDataT]], agent: AbstractAgent[AgentDepsT, OutputDataT], *, - # Agent.iter parameters. + # UIAdapter.dispatch_request parameters output_type: OutputSpec[Any] | None = None, message_history: Sequence[ModelMessage] | None = None, deferred_tool_results: DeferredToolResults | None = None, @@ -47,7 +50,9 @@ def __init__( usage: RunUsage | None = None, infer_name: bool = True, toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, - # Starlette parameters. + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, + on_complete: OnCompleteFunc[Any] | None = None, + # Starlette parameters debug: bool = False, routes: Sequence[BaseRoute] | None = None, middleware: Sequence[Middleware] | None = None, @@ -58,12 +63,11 @@ def __init__( ) -> None: """An ASGI application that handles every request by running the agent and streaming the response. - # TODO (DouweM): Docstring - Note that the `deps` will be the same for each request, with the exception of the AG-UI state that's - injected into the `state` field of a `deps` object that implements the [`StateHandler`][pydantic_ai.ag_ui.StateHandler] protocol. + Note that the `deps` will be the same for each request, with the exception of the frontend state that's + injected into the `state` field of a `deps` object that implements the [`StateHandler`][pydantic_ai.ui.StateHandler] protocol. To provide different `deps` for each request (e.g. based on the authenticated user), - use [`pydantic_ai.ag_ui.run_ag_ui`][pydantic_ai.ag_ui.run_ag_ui] or - [`pydantic_ai.ag_ui.handle_ag_ui_request`][pydantic_ai.ag_ui.handle_ag_ui_request] instead. + use [`UIAdapter.run_stream()`][pydantic_ai.ui.UIAdapter.run_stream] or + [`UIAdapter.dispatch_request()`][pydantic_ai.ui.UIAdapter.dispatch_request] instead. Args: adapter_type: The type of the UI adapter to use. @@ -81,6 +85,9 @@ def __init__( usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. infer_name: Whether to try to infer the agent name from the call frame if it's not set. toolsets: Optional additional toolsets for this run. + builtin_tools: Optional additional builtin tools for this run. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. debug: Boolean indicating if debug tracebacks should be returned on errors. routes: A list of routes to serve incoming HTTP and WebSocket requests. @@ -125,6 +132,8 @@ async def run_agent(request: Request) -> Response: usage=usage, infer_name=infer_name, toolsets=toolsets, + builtin_tools=builtin_tools, + on_complete=on_complete, ) self.router.add_route('/', run_agent, methods=['POST']) diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py index d8abf61819..daf66e3254 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/event_stream.py @@ -69,7 +69,7 @@ @dataclass class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]): - """TODO (DouwM): Docstring.""" + """TODO (DouweM): Docstring.""" run_input: RunInputT @@ -164,8 +164,6 @@ async def transform_stream( # noqa: C901 try: async for event in stream: - # TODO (DouweM): Introduce, possibly, MessageStartEvent, MessageEndEvent with ModelRequest/Response? - # People have requested these before. We can store Request and Response if isinstance(event, PartStartEvent): async for e in self._turn_to('response'): yield e diff --git a/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py b/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py new file mode 100644 index 0000000000..195fa842f6 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py @@ -0,0 +1,26 @@ +from dataclasses import dataclass, field +from typing import cast + +from pydantic_ai._utils import get_union_args +from pydantic_ai.messages import ModelMessage, ModelRequest, ModelRequestPart, ModelResponse, ModelResponsePart + + +@dataclass +class MessagesBuilder: + """Helper class to build Pydantic AI messages from protocol-specific messages.""" + + messages: list[ModelMessage] = field(default_factory=list) + + def add(self, part: ModelRequest | ModelResponse | ModelRequestPart | ModelResponsePart) -> None: + """Add a new part, creating a new request or response message if necessary.""" + last_message = self.messages[-1] if self.messages else None + if isinstance(part, get_union_args(ModelRequestPart)): + if isinstance(last_message, ModelRequest): + last_message.parts = [*last_message.parts, cast(ModelRequestPart, part)] + else: + self.messages.append(ModelRequest(parts=[part])) + else: + if isinstance(last_message, ModelResponse): + last_message.parts = [*last_message.parts, part] + else: + self.messages.append(ModelResponse(parts=[part])) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index d90d3213d0..5adb1b76c3 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -16,10 +16,6 @@ FilePart, ImageUrl, ModelMessage, - ModelRequest, - ModelRequestPart, - ModelResponse, - ModelResponsePart, RetryPromptPart, SystemPromptPart, TextPart, @@ -32,6 +28,7 @@ from ...output import OutputDataT from ..adapter import UIAdapter from ..event_stream import UIEventStream +from ..messages_builder import MessagesBuilder from ._event_stream import VercelAIEventStream from ._request_types import ( DataUIPart, @@ -62,7 +59,7 @@ @dataclass class VercelAIAdapter(UIAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, OutputDataT]): - """TODO (DouwM): Docstring.""" + """TODO (DouweM): Docstring.""" @classmethod async def build_run_input(cls, request: Request) -> RequestData: @@ -90,23 +87,16 @@ def messages(self) -> list[ModelMessage]: @classmethod def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # noqa: C901 """Load messages from the request and return the loaded messages.""" - result: list[ModelMessage] = [] - request_parts: list[ModelRequestPart] | None = None - response_parts: list[ModelResponsePart] | None = None + builder = MessagesBuilder() for msg in messages: if msg.role in ('system', 'user'): - if request_parts is None: # TODO (DouweM): coverage branch - request_parts = [] - result.append(ModelRequest(parts=request_parts)) - response_parts = None - for part in msg.parts: if isinstance(part, TextUIPart): if msg.role == 'system': - request_parts.append(SystemPromptPart(content=part.text)) # TODO (DouweM): coverage + builder.add(SystemPromptPart(content=part.text)) # TODO (DouweM): coverage else: - request_parts.append(UserPromptPart(content=part.text)) + builder.add(UserPromptPart(content=part.text)) elif isinstance(part, FileUIPart): # TODO (DouweM): coverage try: file = BinaryContent.from_data_uri(part.url) @@ -121,19 +111,14 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # file = AudioUrl(url=part.url, media_type=part.media_type) case _: file = DocumentUrl(url=part.url, media_type=part.media_type) - request_parts.append(UserPromptPart(content=[file])) + builder.add(UserPromptPart(content=[file])) elif msg.role == 'assistant': # TODO (DouweM): coverage branch for part in msg.parts: - if response_parts is None: - response_parts = [] - result.append(ModelResponse(parts=response_parts)) - request_parts = None - if isinstance(part, TextUIPart): - response_parts.append(TextPart(content=part.text)) + builder.add(TextPart(content=part.text)) elif isinstance(part, ReasoningUIPart): - response_parts.append(ThinkingPart(content=part.text)) # TODO (DouweM): coverage + builder.add(ThinkingPart(content=part.text)) # TODO (DouweM): coverage elif isinstance(part, FileUIPart): # TODO (DouweM): coverage try: file = BinaryContent.from_data_uri(part.url) @@ -142,7 +127,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # raise ValueError( 'Vercel AI integration can currently only handle assistant file parts with data URIs.' ) from e - response_parts.append(FilePart(content=file)) + builder.add(FilePart(content=file)) elif isinstance(part, DataUIPart): # Not currently supported pass @@ -159,7 +144,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # if builtin_tool: # TODO (DouweM): coverage call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) - response_parts.append(call_part) + builder.add(call_part) if isinstance(part, ToolOutputAvailablePart | ToolOutputErrorPart): if part.state == 'output-available': @@ -172,30 +157,26 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # ) call_part.provider_name = provider_name - return_part = BuiltinToolReturnPart( - tool_name=tool_name, - tool_call_id=tool_call_id, - content=output, - provider_name=provider_name, + builder.add( + BuiltinToolReturnPart( + tool_name=tool_name, + tool_call_id=tool_call_id, + content=output, + provider_name=provider_name, + ) ) - response_parts.append(return_part) else: - call_part = ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) - response_parts.append(call_part) + builder.add(ToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args)) - result_part: ToolReturnPart | RetryPromptPart | None = None if part.state == 'output-available': - result_part = ToolReturnPart( - tool_name=tool_name, tool_call_id=tool_call_id, content=part.output + builder.add( + ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=part.output) ) elif part.state == 'output-error': # TODO (DouweM): coverage - result_part = RetryPromptPart( - tool_name=tool_name, tool_call_id=tool_call_id, content=part.error_text + builder.add( + RetryPromptPart( + tool_name=tool_name, tool_call_id=tool_call_id, content=part.error_text + ) ) - if result_part: # TODO (DouweM): coverage branch - request_parts = [result_part] - result.append(ModelRequest(parts=request_parts)) - response_parts = None - - return result + return builder.messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index bdd1edd512..3952e3f131 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -60,7 +60,7 @@ def _json_dumps(obj: Any) -> str: @dataclass class VercelAIEventStream(UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]): - """TODO (DouwM): Docstring.""" + """TODO (DouweM): Docstring.""" _step_started: bool = False From e545e5ceb07d98f9dea0e940dc69f0ef623234a5 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Mon, 27 Oct 2025 23:57:54 +0000 Subject: [PATCH 36/46] fix lint --- pydantic_ai_slim/pydantic_ai/ui/messages_builder.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py b/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py index 195fa842f6..edbae248cf 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py +++ b/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py @@ -11,15 +11,17 @@ class MessagesBuilder: messages: list[ModelMessage] = field(default_factory=list) - def add(self, part: ModelRequest | ModelResponse | ModelRequestPart | ModelResponsePart) -> None: + def add(self, part: ModelRequestPart | ModelResponsePart) -> None: """Add a new part, creating a new request or response message if necessary.""" last_message = self.messages[-1] if self.messages else None if isinstance(part, get_union_args(ModelRequestPart)): + part = cast(ModelRequestPart, part) if isinstance(last_message, ModelRequest): - last_message.parts = [*last_message.parts, cast(ModelRequestPart, part)] + last_message.parts = [*last_message.parts, part] else: self.messages.append(ModelRequest(parts=[part])) else: + part = cast(ModelResponsePart, part) if isinstance(last_message, ModelResponse): last_message.parts = [*last_message.parts, part] else: From 40f46953206fb0a61580b8c8f9a0db99181406e1 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 28 Oct 2025 20:51:52 +0000 Subject: [PATCH 37/46] coverage --- pydantic_ai_slim/pydantic_ai/messages.py | 2 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 4 +- .../pydantic_ai/ui/vercel_ai/_adapter.py | 66 ++- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 38 +- tests/test_ag_ui.py | 169 ++++++- tests/test_ui.py | 58 ++- tests/test_vercel_ai.py | 443 ++++++++++++++++-- 7 files changed, 675 insertions(+), 105 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index a61b251a69..a9231c59bd 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -523,7 +523,7 @@ def from_data_uri(cls, data_uri: str) -> Self: if not data_uri.startswith(prefix): raise ValueError('Data URI must start with "data:"') # pragma: no cover media_type, data = data_uri[len(prefix) :].split(';base64,', 1) - return cls(data=base64.b64decode(data), media_type=media_type) + return cls.narrow_type(cls(data=base64.b64decode(data), media_type=media_type)) @pydantic.computed_field @property diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 4049384f37..55d683dfe5 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -118,7 +118,7 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at start.""" if follows_text: - message_id = self.message_id # TODO (DouweM): coverage + message_id = self.message_id else: message_id = self.new_message_id() yield TextMessageStartEvent(message_id=message_id) @@ -133,7 +133,7 @@ async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEve async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseEvent]: """Handle a TextPart at end.""" - if not followed_by_text: # TODO (DouweM): coverage branch + if not followed_by_text: yield TextMessageEndEvent(message_id=self.message_id) async def handle_thinking_start( diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 5adb1b76c3..3ac40f8f68 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -6,7 +6,8 @@ from dataclasses import dataclass from functools import cached_property -from ...agent import AgentDepsT +from typing_extensions import assert_never + from ...messages import ( AudioUrl, BinaryContent, @@ -22,10 +23,12 @@ ThinkingPart, ToolCallPart, ToolReturnPart, + UserContent, UserPromptPart, VideoUrl, ) from ...output import OutputDataT +from ...tools import AgentDepsT from ..adapter import UIAdapter from ..event_stream import UIEventStream from ..messages_builder import MessagesBuilder @@ -36,6 +39,9 @@ FileUIPart, ReasoningUIPart, RequestData, + SourceDocumentUIPart, + SourceUrlUIPart, + StepStartUIPart, TextUIPart, ToolOutputAvailablePart, ToolOutputErrorPart, @@ -90,14 +96,16 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # builder = MessagesBuilder() for msg in messages: - if msg.role in ('system', 'user'): + if msg.role == 'system': + for part in msg.parts: + if isinstance(part, TextUIPart): # pragma: no branch + builder.add(SystemPromptPart(content=part.text)) + elif msg.role == 'user': + user_prompt_content: str | list[UserContent] = [] for part in msg.parts: if isinstance(part, TextUIPart): - if msg.role == 'system': - builder.add(SystemPromptPart(content=part.text)) # TODO (DouweM): coverage - else: - builder.add(UserPromptPart(content=part.text)) - elif isinstance(part, FileUIPart): # TODO (DouweM): coverage + user_prompt_content.append(part.text) + elif isinstance(part, FileUIPart): try: file = BinaryContent.from_data_uri(part.url) except ValueError: @@ -111,28 +119,30 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # file = AudioUrl(url=part.url, media_type=part.media_type) case _: file = DocumentUrl(url=part.url, media_type=part.media_type) - builder.add(UserPromptPart(content=[file])) + user_prompt_content.append(file) - elif msg.role == 'assistant': # TODO (DouweM): coverage branch + if user_prompt_content: # pragma: no branch + if len(user_prompt_content) == 1 and isinstance(user_prompt_content[0], str): + user_prompt_content = user_prompt_content[0] + builder.add(UserPromptPart(content=user_prompt_content)) + + elif msg.role == 'assistant': for part in msg.parts: if isinstance(part, TextUIPart): builder.add(TextPart(content=part.text)) elif isinstance(part, ReasoningUIPart): - builder.add(ThinkingPart(content=part.text)) # TODO (DouweM): coverage - elif isinstance(part, FileUIPart): # TODO (DouweM): coverage + builder.add(ThinkingPart(content=part.text)) + elif isinstance(part, FileUIPart): try: file = BinaryContent.from_data_uri(part.url) - except ValueError as e: + except ValueError as e: # pragma: no cover # We don't yet handle non-data-URI file URLs returned by assistants, as no Pydantic AI models do this. raise ValueError( 'Vercel AI integration can currently only handle assistant file parts with data URIs.' ) from e builder.add(FilePart(content=file)) - elif isinstance(part, DataUIPart): - # Not currently supported - pass - elif isinstance(part, ToolUIPart | DynamicToolUIPart): # TODO (DouweM): coverage branch - if isinstance(part, DynamicToolUIPart): # TODO (DouweM): coverage + elif isinstance(part, ToolUIPart | DynamicToolUIPart): + if isinstance(part, DynamicToolUIPart): tool_name = part.tool_name builtin_tool = False else: @@ -142,7 +152,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # tool_call_id = part.tool_call_id args = part.input - if builtin_tool: # TODO (DouweM): coverage + if builtin_tool: call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) builder.add(call_part) @@ -150,7 +160,7 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # if part.state == 'output-available': output = part.output else: - output = part.error_text + output = {'error_text': part.error_text, 'is_error': True} provider_name = ( (part.call_provider_metadata or {}).get('pydantic_ai', {}).get('provider_name') @@ -172,11 +182,27 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # builder.add( ToolReturnPart(tool_name=tool_name, tool_call_id=tool_call_id, content=part.output) ) - elif part.state == 'output-error': # TODO (DouweM): coverage + elif part.state == 'output-error': builder.add( RetryPromptPart( tool_name=tool_name, tool_call_id=tool_call_id, content=part.error_text ) ) + elif isinstance(part, DataUIPart): + # Contains custom data that shouldn't be sent to the model + pass + elif isinstance(part, SourceUrlUIPart): + # TODO: Once we support citations: https://github.com/pydantic/pydantic-ai/issues/3126 + pass + elif isinstance(part, SourceDocumentUIPart): + # TODO: Once we support citations: https://github.com/pydantic/pydantic-ai/issues/3126 + pass + elif isinstance(part, StepStartUIPart): + # Nothing to do here + pass + else: + assert_never(part) + else: + assert_never(msg.role) return builder.messages diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 3952e3f131..2b1386a00c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -80,19 +80,15 @@ async def before_stream(self) -> AsyncIterator[BaseChunk]: async def before_response(self) -> AsyncIterator[BaseChunk]: """Yield events before the request is processed.""" + if self._step_started: + yield FinishStepChunk() + self._step_started = True yield StartStepChunk() - async def after_request(self) -> AsyncIterator[BaseChunk]: - """Yield events after the response is processed.""" - if self._step_started: # TODO (DouweM): coverage - yield FinishStepChunk() - self._step_started = False - async def after_stream(self) -> AsyncIterator[BaseChunk]: """Yield events after agent streaming completes.""" - if self._step_started: # TODO (DouweM): coverage branch - yield FinishStepChunk() + yield FinishStepChunk() yield FinishChunk() yield DoneChunk() @@ -104,22 +100,22 @@ async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at start.""" if follows_text: - message_id = self.message_id # TODO (DouweM): coverage + message_id = self.message_id else: message_id = self.new_message_id() yield TextStartChunk(id=message_id) - if part.content: # TODO (DouweM): coverage branch + if part.content: yield TextDeltaChunk(id=message_id, delta=part.content) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]: """Handle a TextPartDelta.""" - if delta.content_delta: # TODO (DouweM): coverage branch + if delta.content_delta: # pragma: no branch yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseChunk]: """Handle a TextPart at end.""" - if not followed_by_text: # TODO (DouweM): coverage branch + if not followed_by_text: yield TextEndChunk(id=self.message_id) async def handle_thinking_start( @@ -129,11 +125,11 @@ async def handle_thinking_start( message_id = self.new_message_id() yield ReasoningStartChunk(id=message_id) if part.content: - yield ReasoningDeltaChunk(id=message_id, delta=part.content) # TODO (DouweM): coverage + yield ReasoningDeltaChunk(id=message_id, delta=part.content) async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ThinkingPartDelta.""" - if delta.content_delta: # TODO (DouweM): coverage + if delta.content_delta: # pragma: no branch yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) async def handle_thinking_end( @@ -164,9 +160,7 @@ async def _handle_tool_call_start( provider_executed=provider_executed, ) if part.args: - yield ToolInputDeltaChunk( - tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str() - ) # TODO (DouweM): coverage + yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str()) async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPartDelta.""" @@ -179,9 +173,7 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a ToolCallPart at end.""" - yield ToolInputAvailableChunk( - tool_call_id=part.tool_call_id, tool_name=part.tool_name, input=part.args - ) # TODO (DouweM): coverage + yield ToolInputAvailableChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name, input=part.args) async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: """Handle a BuiltinToolCallPart at end.""" @@ -204,11 +196,9 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async async def handle_file(self, part: FilePart) -> AsyncIterator[BaseChunk]: """Handle a FilePart.""" file = part.content - yield FileChunk(url=file.data_uri, media_type=file.media_type) # TODO (DouweM): coverage + yield FileChunk(url=file.data_uri, media_type=file.media_type) - async def handle_function_tool_result( - self, event: FunctionToolResultEvent - ) -> AsyncIterator[BaseChunk]: # TODO (DouweM): coverage + async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result if isinstance(result, RetryPromptPart): diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 0844b7372e..07d18bcfac 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -4,7 +4,7 @@ import json import uuid -from collections.abc import AsyncIterator +from collections.abc import AsyncIterator, MutableMapping from dataclasses import dataclass from http import HTTPStatus from typing import Any @@ -22,8 +22,12 @@ ModelMessage, ModelRequest, ModelResponse, + PartDeltaEvent, + PartEndEvent, + PartStartEvent, SystemPromptPart, TextPart, + TextPartDelta, ToolCallPart, ToolReturn, ToolReturnPart, @@ -66,14 +70,18 @@ UserMessage, ) from ag_ui.encoder import EventEncoder + from starlette.requests import Request + from starlette.responses import StreamingResponse from pydantic_ai.ag_ui import ( SSE_CONTENT_TYPE, AGUIAdapter, OnCompleteFunc, StateDeps, + handle_ag_ui_request, run_ag_ui, ) + from pydantic_ai.ui.ag_ui import AGUIEventStream pytestmark = [ @@ -1596,3 +1604,162 @@ async def stream_function( }, ] ) + + +async def test_event_stream_back_to_back_text(): + async def event_generator(): + yield PartStartEvent(index=0, part=TextPart(content='Hello')) + yield PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' world')) + yield PartEndEvent(index=0, part=TextPart(content='Hello world'), next_part_kind='text') + yield PartStartEvent(index=1, part=TextPart(content='Goodbye'), previous_part_kind='text') + yield PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' world')) + yield PartEndEvent(index=1, part=TextPart(content='Goodbye world')) + + run_input = create_input( + UserMessage( + id='msg_1', + content='Tell me about Hello World', + ), + ) + event_stream = AGUIEventStream(run_input=run_input) + events = [ + json.loads(event.removeprefix('data: ')) + async for event in event_stream.encode_stream(event_stream.transform_stream(event_generator())) + ] + + assert events == snapshot( + [ + { + 'type': 'RUN_STARTED', + 'threadId': (thread_id := IsSameStr()), + 'runId': (run_id := IsSameStr()), + }, + {'type': 'TEXT_MESSAGE_START', 'messageId': (message_id := IsSameStr()), 'role': 'assistant'}, + {'type': 'TEXT_MESSAGE_CONTENT', 'messageId': message_id, 'delta': 'Hello'}, + {'type': 'TEXT_MESSAGE_CONTENT', 'messageId': message_id, 'delta': ' world'}, + {'type': 'TEXT_MESSAGE_CONTENT', 'messageId': message_id, 'delta': 'Goodbye'}, + {'type': 'TEXT_MESSAGE_CONTENT', 'messageId': message_id, 'delta': ' world'}, + {'type': 'TEXT_MESSAGE_END', 'messageId': message_id}, + { + 'type': 'RUN_FINISHED', + 'threadId': thread_id, + 'runId': run_id, + }, + ] + ) + + +async def test_handle_ag_ui_request(): + agent = Agent(model=TestModel()) + run_input = create_input( + UserMessage( + id='msg_1', + content='Tell me about Hello World', + ), + ) + + async def receive() -> dict[str, Any]: + return {'type': 'http.request', 'body': run_input.model_dump_json().encode('utf-8')} + + starlette_request = Request( + scope={ + 'type': 'http', + 'method': 'POST', + 'headers': [ + (b'content-type', b'application/json'), + ], + }, + receive=receive, + ) + + response = await handle_ag_ui_request(agent, starlette_request) + + assert isinstance(response, StreamingResponse) + + chunks: list[MutableMapping[str, Any]] = [] + + async def send(data: MutableMapping[str, Any]) -> None: + if body := data.get('body'): + data['body'] = json.loads(body.decode('utf-8').removeprefix('data: ')) + chunks.append(data) + + await response.stream_response(send) + + assert chunks == snapshot( + [ + { + 'type': 'http.response.start', + 'status': 200, + 'headers': [(b'content-type', b'text/event-stream; charset=utf-8')], + }, + { + 'type': 'http.response.body', + 'body': { + 'type': 'RUN_STARTED', + 'threadId': (thread_id := IsSameStr()), + 'runId': (run_id := IsSameStr()), + }, + 'more_body': True, + }, + { + 'type': 'http.response.body', + 'body': { + 'type': 'TEXT_MESSAGE_START', + 'messageId': (message_id := IsSameStr()), + 'role': 'assistant', + }, + 'more_body': True, + }, + { + 'type': 'http.response.body', + 'body': { + 'type': 'TEXT_MESSAGE_CONTENT', + 'messageId': message_id, + 'delta': 'success ', + }, + 'more_body': True, + }, + { + 'type': 'http.response.body', + 'body': { + 'type': 'TEXT_MESSAGE_CONTENT', + 'messageId': message_id, + 'delta': '(no ', + }, + 'more_body': True, + }, + { + 'type': 'http.response.body', + 'body': { + 'type': 'TEXT_MESSAGE_CONTENT', + 'messageId': message_id, + 'delta': 'tool ', + }, + 'more_body': True, + }, + { + 'type': 'http.response.body', + 'body': { + 'type': 'TEXT_MESSAGE_CONTENT', + 'messageId': message_id, + 'delta': 'calls)', + }, + 'more_body': True, + }, + { + 'type': 'http.response.body', + 'body': {'type': 'TEXT_MESSAGE_END', 'messageId': message_id}, + 'more_body': True, + }, + { + 'type': 'http.response.body', + 'body': { + 'type': 'RUN_FINISHED', + 'threadId': thread_id, + 'runId': run_id, + }, + 'more_body': True, + }, + {'type': 'http.response.body', 'body': b'', 'more_body': False}, + ] + ) diff --git a/tests/test_ui.py b/tests/test_ui.py index ef345f6456..251b231a70 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -251,6 +251,35 @@ async def stream_function( ) +async def test_event_stream_back_to_back_text(): + async def event_generator(): + yield PartStartEvent(index=0, part=TextPart(content='Hello')) + yield PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' world')) + yield PartEndEvent(index=0, part=TextPart(content='Hello world'), next_part_kind='text') + yield PartStartEvent(index=1, part=TextPart(content='Goodbye'), previous_part_kind='text') + yield PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' world')) + yield PartEndEvent(index=1, part=TextPart(content='Goodbye world')) + + request = DummyUIRunInput(messages=[ModelRequest.user_text_prompt('Hello')]) + event_stream = DummyUIEventStream(run_input=request) + events = [event async for event in event_stream.transform_stream(event_generator())] + + assert events == snapshot( + [ + '', + '', + 'Hello', + ' world', + '', + 'Goodbye', + ' world', + '', + '', + '', + ] + ) + + async def test_run_stream_builtin_tool_call(): async def stream_function( messages: list[ModelMessage], agent_info: AgentInfo @@ -421,22 +450,19 @@ async def test_run_stream_output_tool(): async def stream_function( messages: list[ModelMessage], agent_info: AgentInfo ) -> AsyncIterator[DeltaToolCalls | str]: - if len(messages) == 1: - yield { - 0: DeltaToolCall( - name='final_result', - json_args='{"query":', - tool_call_id='search_1', - ) - } - yield { - 0: DeltaToolCall( - json_args='"Hello world"}', - tool_call_id='search_1', - ) - } - else: - yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + yield { + 0: DeltaToolCall( + name='final_result', + json_args='{"query":', + tool_call_id='search_1', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } def web_search(query: str) -> dict[str, list[dict[str, str]]]: return { diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index e8ebd11d82..3899e7d470 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -2,26 +2,40 @@ import json from collections.abc import AsyncIterator, MutableMapping +from http import HTTPStatus from typing import Any, cast +import httpx import pytest +from asgi_lifespan import LifespanManager from inline_snapshot import snapshot from pydantic_ai import Agent from pydantic_ai.builtin_tools import WebSearchTool from pydantic_ai.messages import ( + AudioUrl, + BinaryContent, BinaryImage, BuiltinToolCallPart, BuiltinToolReturnPart, + DocumentUrl, FilePart, + ImageUrl, ModelMessage, ModelRequest, ModelResponse, + PartDeltaEvent, + PartEndEvent, PartStartEvent, + RetryPromptPart, + SystemPromptPart, TextPart, + TextPartDelta, + ThinkingPart, ToolCallPart, ToolReturnPart, UserPromptPart, + VideoUrl, ) from pydantic_ai.models.function import ( AgentInfo, @@ -34,16 +48,22 @@ ) from pydantic_ai.models.test import TestModel from pydantic_ai.run import AgentRunResult -from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIEventStream +from pydantic_ai.ui import SSE_CONTENT_TYPE +from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIApp, VercelAIEventStream from pydantic_ai.ui.vercel_ai._request_types import ( + DynamicToolOutputAvailablePart, + FileUIPart, + ReasoningUIPart, SubmitMessage, TextUIPart, + ToolInputAvailablePart, ToolOutputAvailablePart, + ToolOutputErrorPart, UIMessage, ) from pydantic_ai.ui.vercel_ai._response_types import BaseChunk, DataChunk -from .conftest import IsDatetime, IsStr, try_import +from .conftest import IsDatetime, IsSameStr, IsStr, try_import with try_import() as starlette_import_successful: from starlette.requests import Request @@ -78,25 +98,19 @@ async def test_run(allow_model_requests: None, openai_api_key: str): UIMessage( id='BeuwNtYIjJuniHbR', role='user', - metadata=None, parts=[ TextUIPart( - type='text', text="""Use a tool """, - state=None, - provider_metadata=None, ) ], ), UIMessage( id='bylfKVeyoR901rax', role='assistant', - metadata=None, parts=[ TextUIPart( - type='text', text='''I\'d be happy to help you use a tool! However, I need more information about what you\'d like to do. I have access to tools for searching and retrieving documentation for two products: 1. **Pydantic AI** (pydantic-ai) - an open source agent framework library @@ -113,26 +127,21 @@ async def test_run(allow_model_requests: None, openai_api_key: str): For example, you could ask something like "How do I get started with Pydantic AI?" or "Show me the table of contents for Logfire documentation."''', state='streaming', - provider_metadata=None, ) ], ), UIMessage( id='MTdh4Ie641kDuIRh', role='user', - metadata=None, parts=[TextUIPart(type='text', text='Give me the ToCs', state=None, provider_metadata=None)], ), UIMessage( id='3XlOBgFwaf7GsS4l', role='assistant', - metadata=None, parts=[ TextUIPart( - type='text', text="I'll get the table of contents for both repositories.", state='streaming', - provider_metadata=None, ), ToolOutputAvailablePart( type='tool-get_table_of_contents', @@ -140,9 +149,6 @@ async def test_run(allow_model_requests: None, openai_api_key: str): state='output-available', input={'repo': 'pydantic-ai'}, output="[Scrubbed due to 'API Key']", - provider_executed=None, - call_provider_metadata=None, - preliminary=None, ), ToolOutputAvailablePart( type='tool-get_table_of_contents', @@ -150,28 +156,19 @@ async def test_run(allow_model_requests: None, openai_api_key: str): state='output-available', input={'repo': 'logfire'}, output="[Scrubbed due to 'Auth']", - provider_executed=None, - call_provider_metadata=None, - preliminary=None, ), TextUIPart( - type='text', text="""Here are the Table of Contents for both repositories:... Both products are designed to work together - Pydantic AI for building AI agents and Logfire for observing and monitoring them in production.""", state='streaming', - provider_metadata=None, ), ], ), UIMessage( id='QVypsUU4swQ1Loxq', role='user', - metadata=None, parts=[ TextUIPart( - type='text', text='How do I get FastAPI instrumentation to include the HTTP request and response', - state=None, - provider_metadata=None, ) ], ), @@ -1111,6 +1108,48 @@ async def stream_function( ) +async def test_event_stream_back_to_back_text(): + async def event_generator(): + yield PartStartEvent(index=0, part=TextPart(content='Hello')) + yield PartDeltaEvent(index=0, delta=TextPartDelta(content_delta=' world')) + yield PartEndEvent(index=0, part=TextPart(content='Hello world'), next_part_kind='text') + yield PartStartEvent(index=1, part=TextPart(content='Goodbye'), previous_part_kind='text') + yield PartDeltaEvent(index=1, delta=TextPartDelta(content_delta=' world')) + yield PartEndEvent(index=1, part=TextPart(content='Goodbye world')) + + request = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Hello')], + ), + ], + ) + event_stream = VercelAIEventStream(run_input=request) + events = [ + '[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: ')) + async for event in event_stream.encode_stream(event_stream.transform_stream(event_generator())) + ] + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'text-start', 'id': (message_id := IsSameStr())}, + {'type': 'text-delta', 'delta': 'Hello', 'id': message_id}, + {'type': 'text-delta', 'delta': ' world', 'id': message_id}, + {'type': 'text-delta', 'delta': 'Goodbye', 'id': message_id}, + {'type': 'text-delta', 'delta': ' world', 'id': message_id}, + {'type': 'text-end', 'id': message_id}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + async def test_run_stream_builtin_tool_call(): async def stream_function( messages: list[ModelMessage], agent_info: AgentInfo @@ -1333,22 +1372,19 @@ async def test_run_stream_output_tool(): async def stream_function( messages: list[ModelMessage], agent_info: AgentInfo ) -> AsyncIterator[DeltaToolCalls | str]: - if len(messages) == 1: - yield { - 0: DeltaToolCall( - name='final_result', - json_args='{"query":', - tool_call_id='search_1', - ) - } - yield { - 0: DeltaToolCall( - json_args='"Hello world"}', - tool_call_id='search_1', - ) - } - else: - yield 'A "Hello, World!" program is usually a simple computer program that emits (or displays) to the screen (often the console) a message similar to "Hello, World!". ' + yield { + 0: DeltaToolCall( + name='final_result', + json_args='{"query":', + tool_call_id='search_1', + ) + } + yield { + 0: DeltaToolCall( + json_args='"Hello world"}', + tool_call_id='search_1', + ) + } def web_search(query: str) -> dict[str, list[dict[str, str]]]: return { @@ -1655,3 +1691,328 @@ async def send(data: MutableMapping[str, Any]) -> None: '[DONE]', ] ) + + +async def test_app(): + agent = Agent(model=TestModel()) + + run_input = SubmitMessage( + id='foo', + messages=[ + UIMessage( + id='bar', + role='user', + parts=[TextUIPart(text='Hello')], + ), + ], + ) + + app = VercelAIApp(agent) + async with LifespanManager(app): + transport = httpx.ASGITransport(app) + async with httpx.AsyncClient(transport=transport) as client: + client.base_url = 'http://localhost:8000' + async with client.stream( + 'POST', + '/', + content=run_input.model_dump_json(), + headers={'Content-Type': 'application/json', 'Accept': SSE_CONTENT_TYPE}, + ) as response: + assert response.status_code == HTTPStatus.OK, f'Unexpected status code: {response.status_code}' + events: list[str | dict[str, Any]] = [] + async for event in response.aiter_lines(): + if event: + events.append('[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: '))) + + assert events == snapshot( + [ + {'type': 'start'}, + {'type': 'start-step'}, + {'type': 'text-start', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'success ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': '(no ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'tool ', 'id': IsStr()}, + {'type': 'text-delta', 'delta': 'calls)', 'id': IsStr()}, + {'type': 'text-end', 'id': IsStr()}, + {'type': 'finish-step'}, + {'type': 'finish'}, + '[DONE]', + ] + ) + + +async def test_adapter_load_messages(): + data = SubmitMessage( + trigger='submit-message', + id='bvQXcnrJ4OA2iRKU', + messages=[ + UIMessage( + id='foobar', + role='system', + parts=[ + TextUIPart( + text='You are a helpful assistant.', + ), + ], + ), + UIMessage( + id='BeuwNtYIjJuniHbR', + role='user', + parts=[ + TextUIPart( + text='Here are some files:', + ), + FileUIPart( + media_type='image/png', + url='data:image/png;base64,ZmFrZQ==', + ), + FileUIPart( + media_type='image/png', + url='https://example.com/image.png', + ), + FileUIPart( + media_type='video/mp4', + url='https://example.com/video.mp4', + ), + FileUIPart( + media_type='audio/mpeg', + url='https://example.com/audio.mp3', + ), + FileUIPart( + media_type='application/pdf', + url='https://example.com/document.pdf', + ), + ], + ), + UIMessage( + id='bylfKVeyoR901rax', + role='assistant', + parts=[ + ReasoningUIPart( + text='I should tell the user how nice those files are and share another one', + ), + TextUIPart( + text='Nice files, here is another one:', + state='streaming', + ), + FileUIPart( + media_type='image/png', + url='data:image/png;base64,ZmFrZQ==', + ), + ], + ), + UIMessage( + id='MTdh4Ie641kDuIRh', + role='user', + parts=[TextUIPart(type='text', text='Give me the ToCs', state=None, provider_metadata=None)], + ), + UIMessage( + id='3XlOBgFwaf7GsS4l', + role='assistant', + parts=[ + TextUIPart( + text="I'll get the table of contents for both repositories.", + state='streaming', + ), + ToolOutputAvailablePart( + type='tool-get_table_of_contents', + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', + input={'repo': 'pydantic'}, + output="[Scrubbed due to 'API Key']", + ), + DynamicToolOutputAvailablePart( + tool_name='get_table_of_contents', + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoY', + input={'repo': 'pydantic-ai'}, + output="[Scrubbed due to 'API Key']", + ), + ToolOutputErrorPart( + type='tool-get_table_of_contents', + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', + input={'repo': 'logfire'}, + error_text="Can't do that", + ), + ToolOutputAvailablePart( + type='tool-web_search', + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4s', + input={'query': 'What is Logfire?'}, + output="[Scrubbed due to 'Auth']", + provider_executed=True, + call_provider_metadata={'pydantic_ai': {'provider_name': 'openai'}}, + ), + ToolOutputErrorPart( + type='tool-web_search', + tool_call_id='toolu_01W2yGpGQcMx7pXV2z', + input={'query': 'What is Logfire?'}, + error_text="Can't do that", + provider_executed=True, + call_provider_metadata={'pydantic_ai': {'provider_name': 'openai'}}, + ), + TextUIPart( + text="""Here are the Table of Contents for both repositories:... Both products are designed to work together - Pydantic AI for building AI agents and Logfire for observing and monitoring them in production.""", + state='streaming', + ), + FileUIPart( + media_type='application/pdf', + url='data:application/pdf;base64,ZmFrZQ==', + ), + ToolInputAvailablePart( + type='tool-get_table_of_contents', + tool_call_id='toolu_01XX3rjFfG77h', + input={'repo': 'pydantic'}, + ), + ToolInputAvailablePart( + type='tool-web_search', + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4s', + input={'query': 'What is Logfire?'}, + provider_executed=True, + ), + ], + ), + ], + ) + + messages = VercelAIAdapter.load_messages(data.messages) + assert messages == snapshot( + [ + ModelRequest( + parts=[ + SystemPromptPart( + content='You are a helpful assistant.', + timestamp=IsDatetime(), + ), + UserPromptPart( + content=[ + 'Here are some files:', + BinaryImage(data=b'fake', media_type='image/png'), + ImageUrl(url='https://example.com/image.png', _media_type='image/png'), + VideoUrl(url='https://example.com/video.mp4', _media_type='video/mp4'), + AudioUrl(url='https://example.com/audio.mp3', _media_type='audio/mpeg'), + DocumentUrl(url='https://example.com/document.pdf', _media_type='application/pdf'), + ], + timestamp=IsDatetime(), + ), + ] + ), + ModelResponse( + parts=[ + ThinkingPart(content='I should tell the user how nice those files are and share another one'), + TextPart(content='Nice files, here is another one:'), + FilePart(content=BinaryImage(data=b'fake', media_type='image/png')), + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + UserPromptPart( + content='Give me the ToCs', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + TextPart(content="I'll get the table of contents for both repositories."), + ToolCallPart( + tool_name='get_table_of_contents', + args={'repo': 'pydantic'}, + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', + ), + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='get_table_of_contents', + content="[Scrubbed due to 'API Key']", + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + ToolCallPart( + tool_name='get_table_of_contents', + args={'repo': 'pydantic-ai'}, + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoY', + ) + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='get_table_of_contents', + content="[Scrubbed due to 'API Key']", + tool_call_id='toolu_01XX3rjFfG77h3KCbVHoY', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + ToolCallPart( + tool_name='get_table_of_contents', + args={'repo': 'logfire'}, + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', + ) + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + RetryPromptPart( + content="Can't do that", + tool_name='get_table_of_contents', + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', + timestamp=IsDatetime(), + ) + ] + ), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'What is Logfire?'}, + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4s', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='web_search', + content="[Scrubbed due to 'Auth']", + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4s', + timestamp=IsDatetime(), + provider_name='openai', + ), + BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'What is Logfire?'}, + tool_call_id='toolu_01W2yGpGQcMx7pXV2z', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='web_search', + content={'error_text': "Can't do that", 'is_error': True}, + tool_call_id='toolu_01W2yGpGQcMx7pXV2z', + timestamp=IsDatetime(), + provider_name='openai', + ), + TextPart( + content='Here are the Table of Contents for both repositories:... Both products are designed to work together - Pydantic AI for building AI agents and Logfire for observing and monitoring them in production.' + ), + FilePart(content=BinaryContent(data=b'fake', media_type='application/pdf')), + ToolCallPart( + tool_name='get_table_of_contents', args={'repo': 'pydantic'}, tool_call_id='toolu_01XX3rjFfG77h' + ), + BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'What is Logfire?'}, + tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4s', + ), + ], + timestamp=IsDatetime(), + ), + ] + ) From f8be256234d4dda3d0bac0579181613d474b52be Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 28 Oct 2025 20:58:41 +0000 Subject: [PATCH 38/46] fix lint --- pydantic_ai_slim/pydantic_ai/messages.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index a9231c59bd..89256d626c 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -13,7 +13,7 @@ import pydantic_core from genai_prices import calc_price, types as genai_types from opentelemetry._events import Event # pyright: ignore[reportPrivateImportUsage] -from typing_extensions import Self, deprecated +from typing_extensions import deprecated from . import _otel_messages, _utils from ._utils import generate_tool_call_id as _generate_tool_call_id, now_utc as _now_utc @@ -517,7 +517,7 @@ def narrow_type(bc: BinaryContent) -> BinaryContent | BinaryImage: return bc # pragma: no cover @classmethod - def from_data_uri(cls, data_uri: str) -> Self: + def from_data_uri(cls, data_uri: str) -> BinaryContent: """Create a `BinaryContent` from a data URI.""" prefix = 'data:' if not data_uri.startswith(prefix): From d8345830cf06c58a64e4f136f253f3eb4125332e Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 28 Oct 2025 21:03:18 +0000 Subject: [PATCH 39/46] Reset chat app example --- examples/pydantic_ai_examples/chat_app.py | 239 +++++++++++++----- .../pydantic_ai_examples/sqlite_database.py | 82 ------ 2 files changed, 179 insertions(+), 142 deletions(-) delete mode 100644 examples/pydantic_ai_examples/sqlite_database.py diff --git a/examples/pydantic_ai_examples/chat_app.py b/examples/pydantic_ai_examples/chat_app.py index 30ba06f3da..8b1c4073a1 100644 --- a/examples/pydantic_ai_examples/chat_app.py +++ b/examples/pydantic_ai_examples/chat_app.py @@ -7,97 +7,216 @@ from __future__ import annotations as _annotations +import asyncio +import json +import sqlite3 +from collections.abc import AsyncIterator, Callable +from concurrent.futures.thread import ThreadPoolExecutor from contextlib import asynccontextmanager from dataclasses import dataclass +from datetime import datetime, timezone +from functools import partial from pathlib import Path +from typing import Annotated, Any, Literal, TypeVar import fastapi import logfire -from fastapi import Depends, Request, Response - -from pydantic_ai import Agent, RunContext -from pydantic_ai.ui.vercel_ai import VercelAIAdapter - -from .sqlite_database import Database +from fastapi import Depends, Request +from fastapi.responses import FileResponse, Response, StreamingResponse +from typing_extensions import LiteralString, ParamSpec, TypedDict + +from pydantic_ai import ( + Agent, + ModelMessage, + ModelMessagesTypeAdapter, + ModelRequest, + ModelResponse, + TextPart, + UnexpectedModelBehavior, + UserPromptPart, +) # 'if-token-present' means nothing will be sent (and the example will work) if you don't have logfire configured logfire.configure(send_to_logfire='if-token-present') logfire.instrument_pydantic_ai() +agent = Agent('openai:gpt-4o') THIS_DIR = Path(__file__).parent -sql_schema = """ -create table if not exists memory( - id integer primary key, - user_id integer not null, - value text not null, - unique(user_id, value) -);""" @asynccontextmanager async def lifespan(_app: fastapi.FastAPI): - async with Database.connect(sql_schema) as db: + async with Database.connect() as db: yield {'db': db} -@dataclass -class Deps: - conn: Database - user_id: int - - -chat_agent = Agent( - 'openai:gpt-4.1', - deps_type=Deps, - instructions=""" -You are a helpful assistant. - -Always reply with markdown. ALWAYS use code fences for code examples and lines of code. -""", -) - - -@chat_agent.tool -async def record_memory(ctx: RunContext[Deps], value: str) -> str: - """Use this tool to store information in memory.""" - await ctx.deps.conn.execute( - 'insert into memory(user_id, value) values(?, ?) on conflict do nothing', - ctx.deps.user_id, - value, - commit=True, - ) - return 'Value added to memory.' +app = fastapi.FastAPI(lifespan=lifespan) +logfire.instrument_fastapi(app) -@chat_agent.tool -async def retrieve_memories(ctx: RunContext[Deps], memory_contains: str) -> str: - """Get all memories about the user.""" - rows = await ctx.deps.conn.fetchall( - 'select value from memory where user_id = ? and value like ?', - ctx.deps.user_id, - f'%{memory_contains}%', - ) - return '\n'.join([row[0] for row in rows]) +@app.get('/') +async def index() -> FileResponse: + return FileResponse((THIS_DIR / 'chat_app.html'), media_type='text/html') -app = fastapi.FastAPI(lifespan=lifespan) -logfire.instrument_fastapi(app) +@app.get('/chat_app.ts') +async def main_ts() -> FileResponse: + """Get the raw typescript code, it's compiled in the browser, forgive me.""" + return FileResponse((THIS_DIR / 'chat_app.ts'), media_type='text/plain') async def get_db(request: Request) -> Database: return request.state.db -@app.options('/api/chat') -def options_chat(): - pass +@app.get('/chat/') +async def get_chat(database: Database = Depends(get_db)) -> Response: + msgs = await database.get_messages() + return Response( + b'\n'.join(json.dumps(to_chat_message(m)).encode('utf-8') for m in msgs), + media_type='text/plain', + ) -@app.post('/api/chat') -async def get_chat(request: Request, database: Database = Depends(get_db)) -> Response: - return await VercelAIAdapter[Deps].dispatch_request( - request, agent=chat_agent, deps=Deps(database, 123) - ) +class ChatMessage(TypedDict): + """Format of messages sent to the browser.""" + + role: Literal['user', 'model'] + timestamp: str + content: str + + +def to_chat_message(m: ModelMessage) -> ChatMessage: + first_part = m.parts[0] + if isinstance(m, ModelRequest): + if isinstance(first_part, UserPromptPart): + assert isinstance(first_part.content, str) + return { + 'role': 'user', + 'timestamp': first_part.timestamp.isoformat(), + 'content': first_part.content, + } + elif isinstance(m, ModelResponse): + if isinstance(first_part, TextPart): + return { + 'role': 'model', + 'timestamp': m.timestamp.isoformat(), + 'content': first_part.content, + } + raise UnexpectedModelBehavior(f'Unexpected message type for chat app: {m}') + + +@app.post('/chat/') +async def post_chat( + prompt: Annotated[str, fastapi.Form()], database: Database = Depends(get_db) +) -> StreamingResponse: + async def stream_messages(): + """Streams new line delimited JSON `Message`s to the client.""" + # stream the user prompt so that can be displayed straight away + yield ( + json.dumps( + { + 'role': 'user', + 'timestamp': datetime.now(tz=timezone.utc).isoformat(), + 'content': prompt, + } + ).encode('utf-8') + + b'\n' + ) + # get the chat history so far to pass as context to the agent + messages = await database.get_messages() + # run the agent with the user prompt and the chat history + async with agent.run_stream(prompt, message_history=messages) as result: + async for text in result.stream_output(debounce_by=0.01): + # text here is a `str` and the frontend wants + # JSON encoded ModelResponse, so we create one + m = ModelResponse(parts=[TextPart(text)], timestamp=result.timestamp()) + yield json.dumps(to_chat_message(m)).encode('utf-8') + b'\n' + + # add new messages (e.g. the user prompt and the agent response in this case) to the database + await database.add_messages(result.new_messages_json()) + + return StreamingResponse(stream_messages(), media_type='text/plain') + + +P = ParamSpec('P') +R = TypeVar('R') + + +@dataclass +class Database: + """Rudimentary database to store chat messages in SQLite. + + The SQLite standard library package is synchronous, so we + use a thread pool executor to run queries asynchronously. + """ + + con: sqlite3.Connection + _loop: asyncio.AbstractEventLoop + _executor: ThreadPoolExecutor + + @classmethod + @asynccontextmanager + async def connect( + cls, file: Path = THIS_DIR / '.chat_app_messages.sqlite' + ) -> AsyncIterator[Database]: + with logfire.span('connect to DB'): + loop = asyncio.get_event_loop() + executor = ThreadPoolExecutor(max_workers=1) + con = await loop.run_in_executor(executor, cls._connect, file) + slf = cls(con, loop, executor) + try: + yield slf + finally: + await slf._asyncify(con.close) + + @staticmethod + def _connect(file: Path) -> sqlite3.Connection: + con = sqlite3.connect(str(file)) + con = logfire.instrument_sqlite3(con) + cur = con.cursor() + cur.execute( + 'CREATE TABLE IF NOT EXISTS messages (id INT PRIMARY KEY, message_list TEXT);' + ) + con.commit() + return con + + async def add_messages(self, messages: bytes): + await self._asyncify( + self._execute, + 'INSERT INTO messages (message_list) VALUES (?);', + messages, + commit=True, + ) + await self._asyncify(self.con.commit) + + async def get_messages(self) -> list[ModelMessage]: + c = await self._asyncify( + self._execute, 'SELECT message_list FROM messages order by id' + ) + rows = await self._asyncify(c.fetchall) + messages: list[ModelMessage] = [] + for row in rows: + messages.extend(ModelMessagesTypeAdapter.validate_json(row[0])) + return messages + + def _execute( + self, sql: LiteralString, *args: Any, commit: bool = False + ) -> sqlite3.Cursor: + cur = self.con.cursor() + cur.execute(sql, args) + if commit: + self.con.commit() + return cur + + async def _asyncify( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + return await self._loop.run_in_executor( # type: ignore + self._executor, + partial(func, **kwargs), + *args, # type: ignore + ) if __name__ == '__main__': diff --git a/examples/pydantic_ai_examples/sqlite_database.py b/examples/pydantic_ai_examples/sqlite_database.py deleted file mode 100644 index 67127efeb3..0000000000 --- a/examples/pydantic_ai_examples/sqlite_database.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import annotations as _annotations - -import asyncio -import sqlite3 -from collections.abc import AsyncIterator, Callable -from concurrent.futures.thread import ThreadPoolExecutor -from contextlib import asynccontextmanager -from dataclasses import dataclass -from functools import partial -from pathlib import Path -from typing import Any, ParamSpec, TypeVar - -import logfire -from typing_extensions import LiteralString - -P = ParamSpec('P') -R = TypeVar('R') - - -@dataclass -class Database: - """Rudimentary database to store chat messages in SQLite. - - The SQLite standard library package is synchronous, so we - use a thread pool executor to run queries asynchronously. - """ - - con: sqlite3.Connection - _loop: asyncio.AbstractEventLoop - _executor: ThreadPoolExecutor - - @classmethod - @asynccontextmanager - async def connect( - cls, schema_sql: str, file: Path = Path('.chat_app_messages.sqlite') - ) -> AsyncIterator[Database]: - with logfire.span('connect to DB'): - loop = asyncio.get_event_loop() - executor = ThreadPoolExecutor(max_workers=1) - con = await loop.run_in_executor(executor, cls._connect, schema_sql, file) - slf = cls(con, loop, executor) - try: - yield slf - finally: - await slf._asyncify(con.close) - - @staticmethod - def _connect(schema_sql: str, file: Path) -> sqlite3.Connection: - con = sqlite3.connect(str(file)) - con = logfire.instrument_sqlite3(con) - cur = con.cursor() - cur.execute(schema_sql) - con.commit() - return con - - async def execute(self, sql: LiteralString, *args: Any, commit: bool = False): - await self._asyncify(self._execute, sql, *args, commit=True) - if commit: - await self._asyncify(self.con.commit) - - async def fetchall(self, sql: LiteralString, *args: Any) -> list[tuple[str, ...]]: - c = await self._asyncify(self._execute, sql, *args) - rows = await self._asyncify(c.fetchall) - return [tuple(row) for row in rows] - - def _execute( - self, sql: LiteralString, *args: Any, commit: bool = False - ) -> sqlite3.Cursor: - cur = self.con.cursor() - cur.execute(sql, args) - if commit: - self.con.commit() - return cur - - async def _asyncify( - self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs - ) -> R: - return await self._loop.run_in_executor( # type: ignore - self._executor, - partial(func, **kwargs), - *args, # type: ignore - ) From 3d628b84fb1a863754c2fd9a526ff25b4296340f Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 28 Oct 2025 21:47:00 +0000 Subject: [PATCH 40/46] AG-UI docs --- docs/ag-ui.md | 51 +++++++++++-------- .../ag_ui/api/agentic_chat.py | 5 +- .../ag_ui/api/agentic_generative_ui.py | 3 +- .../ag_ui/api/human_in_the_loop.py | 3 +- .../ag_ui/api/predictive_state_updates.py | 5 +- .../ag_ui/api/shared_state.py | 5 +- .../ag_ui/api/tool_based_generative_ui.py | 3 +- pydantic_ai_slim/pydantic_ai/ag_ui.py | 21 +++----- .../pydantic_ai/agent/abstract.py | 4 +- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 2 - pydantic_ai_slim/pydantic_ai/ui/adapter.py | 3 +- .../pydantic_ai/ui/ag_ui/__init__.py | 15 ------ .../pydantic_ai/ui/ag_ui/_adapter.py | 12 +---- pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py | 17 +++++++ .../pydantic_ai/ui/vercel_ai/__init__.py | 15 ------ .../pydantic_ai/ui/vercel_ai/_adapter.py | 22 ++++---- .../pydantic_ai/ui/vercel_ai/app.py | 17 +++++++ tests/test_vercel_ai.py | 3 +- 18 files changed, 106 insertions(+), 100 deletions(-) create mode 100644 pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py create mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py diff --git a/docs/ag-ui.md b/docs/ag-ui.md index e4f75ed9ec..85441b3cc7 100644 --- a/docs/ag-ui.md +++ b/docs/ag-ui.md @@ -33,27 +33,26 @@ pip/uv-add uvicorn There are three ways to run a Pydantic AI agent based on AG-UI run input with streamed AG-UI events as output, from most to least flexible. If you're using a Starlette-based web framework like FastAPI, you'll typically want to use the second method. -1. [`run_ag_ui()`][pydantic_ai.ag_ui.run_ag_ui] takes an agent and an AG-UI [`RunAgentInput`](https://docs.ag-ui.com/sdk/python/core/types#runagentinput) object, and returns a stream of AG-UI events encoded as strings. It also takes optional [`Agent.iter()`][pydantic_ai.Agent.iter] arguments including `deps`. Use this if you're using a web framework not based on Starlette (e.g. Django or Flask) or want to modify the input or output some way. -2. [`handle_ag_ui_request()`][pydantic_ai.ag_ui.handle_ag_ui_request] takes an agent and a Starlette request (e.g. from FastAPI) coming from an AG-UI frontend, and returns a streaming Starlette response of AG-UI events that you can return directly from your endpoint. It also takes optional [`Agent.iter()`][pydantic_ai.Agent.iter] arguments including `deps`, that you can vary for each request (e.g. based on the authenticated user). -3. [`Agent.to_ag_ui()`][pydantic_ai.agent.AbstractAgent.to_ag_ui] returns an ASGI application that handles every AG-UI request by running the agent. It also takes optional [`Agent.iter()`][pydantic_ai.Agent.iter] arguments including `deps`, but these will be the same for each request, with the exception of the AG-UI state that's injected as described under [state management](#state-management). This ASGI app can be [mounted](https://fastapi.tiangolo.com/advanced/sub-applications/) at a given path in an existing FastAPI app. +1. The [`AGUIAdapter.run_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream] method, when called on an [`AGUIAdapter`][pydantic_ai.ui.ag_ui.AGUIAdapter] instantiated with an agent and an AG-UI [`RunAgentInput`](https://docs.ag-ui.com/sdk/python/core/types#runagentinput) object, will run the agent and return a stream of AG-UI events. It also takes optional [`Agent.iter()`][pydantic_ai.Agent.iter] arguments including `deps`. Use this if you're using a web framework not based on Starlette (e.g. Django or Flask) or want to modify the input or output some way. +2. The [`AGUIAdapter.dispatch_request()`][pydantic_ai.ui.ag_ui.AGUIAdapter.dispatch_request] class method takes an agent and a Starlette request (e.g. from FastAPI) coming from an AG-UI frontend, and returns a streaming Starlette response of AG-UI events that you can return directly from your endpoint. It also takes optional [`Agent.iter()`][pydantic_ai.Agent.iter] arguments including `deps`, that you can vary for each request (e.g. based on the authenticated user). This is a convenience method that combines [`AGUIAdapter.from_request()`][pydantic_ai.ui.ag_ui.AGUIAdapter.from_request], [`AGUIAdapter.run_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream], and [`AGUIAdapter.streaming_response()`][pydantic_ai.ui.ag_ui.AGUIAdapter.streaming_response]. +3. [`AGUIApp`][pydantic_ai.ui.ag_ui.app.AGUIApp] represents an ASGI application that handles every AG-UI request by running the agent. It also takes optional [`Agent.iter()`][pydantic_ai.Agent.iter] arguments including `deps`, but these will be the same for each request, with the exception of the AG-UI state that's injected as described under [state management](#state-management). This ASGI app can be [mounted](https://fastapi.tiangolo.com/advanced/sub-applications/) at a given path in an existing FastAPI app. ### Handle run input and output directly -This example uses [`run_ag_ui()`][pydantic_ai.ag_ui.run_ag_ui] and performs its own request parsing and response generation. +This example uses [`AGUIAdapter.run_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream] and performs its own request parsing and response generation. This can be modified to work with any web framework. ```py {title="run_ag_ui.py"} import json from http import HTTPStatus -from ag_ui.core import RunAgentInput from fastapi import FastAPI from fastapi.requests import Request from fastapi.responses import Response, StreamingResponse from pydantic import ValidationError from pydantic_ai import Agent -from pydantic_ai.ag_ui import SSE_CONTENT_TYPE, run_ag_ui +from pydantic_ai.ui.ag_ui import AGUIAdapter, SSE_CONTENT_TYPE agent = Agent('openai:gpt-4.1', instructions='Be fun!') @@ -64,7 +63,7 @@ app = FastAPI() async def run_agent(request: Request) -> Response: accept = request.headers.get('accept', SSE_CONTENT_TYPE) try: - run_input = RunAgentInput.model_validate(await request.json()) + run_input = AGUIAdapter.build_run_input(await request.json()) # (1) except ValidationError as e: # pragma: no cover return Response( content=json.dumps(e.json()), @@ -72,11 +71,16 @@ async def run_agent(request: Request) -> Response: status_code=HTTPStatus.UNPROCESSABLE_ENTITY, ) - event_stream = run_ag_ui(agent, run_input, accept=accept) + adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept) + events = adapter.run_stream() # (2) - return StreamingResponse(event_stream, media_type=accept) + return StreamingResponse(adapter.encode_stream(events), media_type=accept) # (3) ``` +1. You can also use the [`AGUIAdapter.from_request()`][pydantic_ai.ui.ag_ui.AGUIAdapter.from_request] class method to build an adapter directly from a request. +2. You can also use the [`AGUIAdapter.run_stream_native()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream_native] method to run the agent and return a stream of Pydantic AI events instead of AG-UI events. These can then be transformed into AG-UI events using the [`AGUIAdapter.transform_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.transform_stream] method. +3. The [`AGUIAdapter.encode_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.encode_stream] method encodes the stream of AG-UI events as strings according to the accept header value. You can also use the [`AGUIAdapter.streaming_response()`][pydantic_ai.ui.ag_ui.AGUIAdapter.streaming_response] method to generate a streaming response directly from the AG-UI event stream returned by `run_stream()`. + Since `app` is an ASGI application, it can be used with any ASGI server: ```shell @@ -87,7 +91,7 @@ This will expose the agent as an AG-UI server, and your frontend can start sendi ### Handle a Starlette request -This example uses [`handle_ag_ui_request()`][pydantic_ai.ag_ui.run_ag_ui] to directly handle a FastAPI request and return a response. Something analogous to this will work with any Starlette-based web framework. +This example uses [`AGUIAdapter.dispatch_request()`][pydantic_ai.ui.ag_ui.AGUIAdapter.dispatch_request] to directly handle a FastAPI request and return a response. Something analogous to this will work with any Starlette-based web framework. ```py {title="handle_ag_ui_request.py"} from fastapi import FastAPI @@ -95,7 +99,7 @@ from starlette.requests import Request from starlette.responses import Response from pydantic_ai import Agent -from pydantic_ai.ag_ui import handle_ag_ui_request +from pydantic_ai.ui.ag_ui import AGUIAdapter agent = Agent('openai:gpt-4.1', instructions='Be fun!') @@ -103,9 +107,11 @@ app = FastAPI() @app.post('/') async def run_agent(request: Request) -> Response: - return await handle_ag_ui_request(agent, request) + return await AGUIAdapter.dispatch_request(request, agent=agent) # (1) ``` +1. This method essentially does the same as the previous example, but it's more convenient to use when you're already using a Starlette/FastAPI app. + Since `app` is an ASGI application, it can be used with any ASGI server: ```shell @@ -116,19 +122,20 @@ This will expose the agent as an AG-UI server, and your frontend can start sendi ### Stand-alone ASGI app -This example uses [`Agent.to_ag_ui()`][pydantic_ai.agent.AbstractAgent.to_ag_ui] to turn the agent into a stand-alone ASGI application: +This example uses [`AGUIApp`][pydantic_ai.ui.ag_ui.app.AGUIApp] to turn the agent into a stand-alone ASGI application: -```py {title="agent_to_ag_ui.py" hl_lines="4"} +```py {title="ag_ui_app.py" hl_lines="4"} from pydantic_ai import Agent +from pydantic_ai.ui.ag_ui.app import AGUIApp agent = Agent('openai:gpt-4.1', instructions='Be fun!') -app = agent.to_ag_ui() +app = AGUIApp(agent) ``` Since `app` is an ASGI application, it can be used with any ASGI server: ```shell -uvicorn agent_to_ag_ui:app +uvicorn ag_ui_app:app ``` This will expose the agent as an AG-UI server, and your frontend can start sending requests to it. @@ -174,7 +181,8 @@ validate state contained in [`RunAgentInput.state`](https://docs.ag-ui.com/sdk/j from pydantic import BaseModel from pydantic_ai import Agent -from pydantic_ai.ag_ui import StateDeps +from pydantic_ai.ui import StateDeps +from pydantic_ai.ui.ag_ui import AGUIApp class DocumentState(BaseModel): @@ -188,7 +196,7 @@ agent = Agent( instructions='Be fun!', deps_type=StateDeps[DocumentState], ) -app = agent.to_ag_ui(deps=StateDeps(DocumentState())) +app = AGUIApp(agent, deps=StateDeps(DocumentState())) ``` Since `app` is an ASGI application, it can be used with any ASGI server: @@ -214,7 +222,8 @@ from ag_ui.core import CustomEvent, EventType, StateSnapshotEvent from pydantic import BaseModel from pydantic_ai import Agent, RunContext, ToolReturn -from pydantic_ai.ag_ui import StateDeps +from pydantic_ai.ui import StateDeps +from pydantic_ai.ui.ag_ui import AGUIApp class DocumentState(BaseModel): @@ -228,7 +237,7 @@ agent = Agent( instructions='Be fun!', deps_type=StateDeps[DocumentState], ) -app = agent.to_ag_ui(deps=StateDeps(DocumentState())) +app = AGUIApp(agent, deps=StateDeps(DocumentState())) @agent.tool @@ -271,7 +280,7 @@ uvicorn ag_ui_tool_events:app --host 0.0.0.0 --port 9000 ## Examples -For more examples of how to use [`to_ag_ui()`][pydantic_ai.agent.AbstractAgent.to_ag_ui] see +For more examples of how to use [`AGUIApp`][pydantic_ai.ui.ag_ui.app.AGUIApp] see [`pydantic_ai_examples.ag_ui`](https://github.com/pydantic/pydantic-ai/tree/main/examples/pydantic_ai_examples/ag_ui), which includes a server for use with the [AG-UI Dojo](https://docs.ag-ui.com/tutorials/debugging#the-ag-ui-dojo). diff --git a/examples/pydantic_ai_examples/ag_ui/api/agentic_chat.py b/examples/pydantic_ai_examples/ag_ui/api/agentic_chat.py index c91a84ad2a..961bda426c 100644 --- a/examples/pydantic_ai_examples/ag_ui/api/agentic_chat.py +++ b/examples/pydantic_ai_examples/ag_ui/api/agentic_chat.py @@ -6,9 +6,9 @@ from zoneinfo import ZoneInfo from pydantic_ai import Agent +from pydantic_ai.ui.ag_ui.app import AGUIApp agent = Agent('openai:gpt-4o-mini') -app = agent.to_ag_ui() @agent.tool_plain @@ -23,3 +23,6 @@ async def current_time(timezone: str = 'UTC') -> str: """ tz: ZoneInfo = ZoneInfo(timezone) return datetime.now(tz=tz).isoformat() + + +app = AGUIApp(agent) diff --git a/examples/pydantic_ai_examples/ag_ui/api/agentic_generative_ui.py b/examples/pydantic_ai_examples/ag_ui/api/agentic_generative_ui.py index e6043a5540..40be79190b 100644 --- a/examples/pydantic_ai_examples/ag_ui/api/agentic_generative_ui.py +++ b/examples/pydantic_ai_examples/ag_ui/api/agentic_generative_ui.py @@ -9,6 +9,7 @@ from ag_ui.core import EventType, StateDeltaEvent, StateSnapshotEvent from pydantic_ai import Agent +from pydantic_ai.ui.ag_ui.app import AGUIApp StepStatus = Literal['pending', 'completed'] @@ -116,4 +117,4 @@ async def update_plan_step( ) -app = agent.to_ag_ui() +app = AGUIApp(agent) diff --git a/examples/pydantic_ai_examples/ag_ui/api/human_in_the_loop.py b/examples/pydantic_ai_examples/ag_ui/api/human_in_the_loop.py index 3f48462976..69b443d832 100644 --- a/examples/pydantic_ai_examples/ag_ui/api/human_in_the_loop.py +++ b/examples/pydantic_ai_examples/ag_ui/api/human_in_the_loop.py @@ -8,6 +8,7 @@ from textwrap import dedent from pydantic_ai import Agent +from pydantic_ai.ui.ag_ui.app import AGUIApp agent = Agent( 'openai:gpt-4o-mini', @@ -23,4 +24,4 @@ ), ) -app = agent.to_ag_ui() +app = AGUIApp(agent) diff --git a/examples/pydantic_ai_examples/ag_ui/api/predictive_state_updates.py b/examples/pydantic_ai_examples/ag_ui/api/predictive_state_updates.py index 2b3fddbf56..70a1ef9fba 100644 --- a/examples/pydantic_ai_examples/ag_ui/api/predictive_state_updates.py +++ b/examples/pydantic_ai_examples/ag_ui/api/predictive_state_updates.py @@ -8,7 +8,8 @@ from ag_ui.core import CustomEvent, EventType from pydantic_ai import Agent, RunContext -from pydantic_ai.ag_ui import StateDeps +from pydantic_ai.ui import StateDeps +from pydantic_ai.ui.ag_ui.app import AGUIApp class DocumentState(BaseModel): @@ -74,4 +75,4 @@ async def story_instructions(ctx: RunContext[StateDeps[DocumentState]]) -> str: ) -app = agent.to_ag_ui(deps=StateDeps(DocumentState())) +app = AGUIApp(agent, deps=StateDeps(DocumentState())) diff --git a/examples/pydantic_ai_examples/ag_ui/api/shared_state.py b/examples/pydantic_ai_examples/ag_ui/api/shared_state.py index 97fc0d99ad..ccbd78355c 100644 --- a/examples/pydantic_ai_examples/ag_ui/api/shared_state.py +++ b/examples/pydantic_ai_examples/ag_ui/api/shared_state.py @@ -9,7 +9,8 @@ from ag_ui.core import EventType, StateSnapshotEvent from pydantic_ai import Agent, RunContext -from pydantic_ai.ag_ui import StateDeps +from pydantic_ai.ui import StateDeps +from pydantic_ai.ui.ag_ui.app import AGUIApp class SkillLevel(str, Enum): @@ -135,4 +136,4 @@ async def recipe_instructions(ctx: RunContext[StateDeps[RecipeSnapshot]]) -> str ) -app = agent.to_ag_ui(deps=StateDeps(RecipeSnapshot())) +app = AGUIApp(agent, deps=StateDeps(RecipeSnapshot())) diff --git a/examples/pydantic_ai_examples/ag_ui/api/tool_based_generative_ui.py b/examples/pydantic_ai_examples/ag_ui/api/tool_based_generative_ui.py index 88dfee0437..44727d8864 100644 --- a/examples/pydantic_ai_examples/ag_ui/api/tool_based_generative_ui.py +++ b/examples/pydantic_ai_examples/ag_ui/api/tool_based_generative_ui.py @@ -6,6 +6,7 @@ from __future__ import annotations from pydantic_ai import Agent +from pydantic_ai.ui.ag_ui.app import AGUIApp agent = Agent('openai:gpt-4o-mini') -app = agent.to_ag_ui() +app = AGUIApp(agent) diff --git a/pydantic_ai_slim/pydantic_ai/ag_ui.py b/pydantic_ai_slim/pydantic_ai/ag_ui.py index 9926e2fb7e..fb751877f5 100644 --- a/pydantic_ai_slim/pydantic_ai/ag_ui.py +++ b/pydantic_ai_slim/pydantic_ai/ag_ui.py @@ -4,6 +4,8 @@ for building interactive AI applications with streaming event-based communication. """ +# TODO (v2): Remove this module in favor of `pydantic_ai.ui.ag_ui` + from __future__ import annotations from collections.abc import AsyncIterator, Sequence @@ -22,24 +24,15 @@ try: from ag_ui.core import BaseEvent from ag_ui.core.types import RunAgentInput - - from .ui import SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, StateHandler - from .ui.ag_ui import ( - AGUIAdapter, - AGUIApp, - ) -except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `ag-ui-protocol` package to use `Agent.to_ag_ui()` method, ' - 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' - ) from e - -try: from starlette.requests import Request from starlette.responses import Response + + from .ui import SSE_CONTENT_TYPE, OnCompleteFunc, StateDeps, StateHandler + from .ui.ag_ui import AGUIAdapter + from .ui.ag_ui.app import AGUIApp except ImportError as e: # pragma: no cover raise ImportError( - 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' + 'Please install the `ag-ui-protocol` and `starlette` packages to use `AGUIAdapter`, ' 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 8c5174fc49..458e2af7de 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -49,7 +49,7 @@ from starlette.routing import BaseRoute, Route from starlette.types import ExceptionHandler, Lifespan - from ..ag_ui import AGUIApp + from pydantic_ai.ui.ag_ui.app import AGUIApp T = TypeVar('T') @@ -1077,7 +1077,7 @@ def to_ag_ui( Returns: An ASGI application for running Pydantic AI agents with AG-UI protocol support. """ - from ..ag_ui import AGUIApp + from pydantic_ai.ui.ag_ui.app import AGUIApp return AGUIApp( agent=self, diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index 96ef9a177d..7019688a42 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -7,7 +7,6 @@ from __future__ import annotations from .adapter import OnCompleteFunc, StateDeps, StateHandler, UIAdapter -from .app import UIApp from .event_stream import SSE_CONTENT_TYPE, UIEventStream from .messages_builder import MessagesBuilder @@ -18,6 +17,5 @@ 'StateDeps', 'StateHandler', 'OnCompleteFunc', - 'UIApp', 'MessagesBuilder', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 97d973ec1f..83b14d50a9 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -24,13 +24,14 @@ from pydantic import BaseModel, ValidationError from pydantic_ai import DeferredToolRequests, DeferredToolResults -from pydantic_ai.agent import AbstractAgent, AgentDepsT +from pydantic_ai.agent import AbstractAgent from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_ai.exceptions import UserError from pydantic_ai.messages import ModelMessage from pydantic_ai.models import KnownModelName, Model from pydantic_ai.output import OutputDataT, OutputSpec from pydantic_ai.settings import ModelSettings +from pydantic_ai.tools import AgentDepsT from pydantic_ai.toolsets import AbstractToolset from pydantic_ai.usage import RunUsage, UsageLimits diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py index 45d77fce83..6228771869 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/__init__.py @@ -1,24 +1,9 @@ """AG-UI protocol integration for Pydantic AI agents.""" -from typing import Any - -from pydantic_ai.agent import AbstractAgent -from pydantic_ai.output import OutputDataT -from pydantic_ai.tools import AgentDepsT - -from .. import UIApp from ._adapter import AGUIAdapter from ._event_stream import AGUIEventStream __all__ = [ 'AGUIAdapter', 'AGUIEventStream', - 'AGUIApp', ] - - -class AGUIApp(UIApp[AgentDepsT, OutputDataT]): - """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" - - def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): - super().__init__(AGUIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index e1960f5a9a..faaba72831 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -10,7 +10,6 @@ ) from ... import ExternalToolset, ToolDefinition -from ...agent import AgentDepsT from ...messages import ( BuiltinToolCallPart, BuiltinToolReturnPart, @@ -22,6 +21,7 @@ UserPromptPart, ) from ...output import OutputDataT +from ...tools import AgentDepsT from ...toolsets import AbstractToolset try: @@ -47,16 +47,8 @@ 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' ) from e -try: - from starlette.requests import Request -except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' - 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' - ) from e - if TYPE_CHECKING: - pass + from starlette.requests import Request __all__ = ['AGUIAdapter'] diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py new file mode 100644 index 0000000000..8ac2f459a0 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py @@ -0,0 +1,17 @@ +"""AG-UI protocol integration for Pydantic AI agents.""" + +from typing import Any + +from pydantic_ai.agent import AbstractAgent +from pydantic_ai.output import OutputDataT +from pydantic_ai.tools import AgentDepsT + +from ..app import UIApp +from ._adapter import AGUIAdapter + + +class AGUIApp(UIApp[AgentDepsT, OutputDataT]): + """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" + + def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): + super().__init__(AGUIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index b0013694b7..efb5a4b7d6 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -7,13 +7,6 @@ https://github.com/vercel/ai/blob/ai%405.0.34/packages/ai/src/ui/ui-messages.ts """ -from typing import Any - -from pydantic_ai.agent import AbstractAgent -from pydantic_ai.output import OutputDataT -from pydantic_ai.tools import AgentDepsT - -from .. import UIApp from ._adapter import VercelAIAdapter from ._event_stream import VercelAIEventStream from ._request_types import ( @@ -92,12 +85,4 @@ # Event stream and adapter 'VercelAIEventStream', 'VercelAIAdapter', - 'VercelAIApp', ] - - -class VercelAIApp(UIApp[AgentDepsT, OutputDataT]): - """ASGI application for running Pydantic AI agents with Vercel UI protocol support.""" - - def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): - super().__init__(VercelAIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 3ac40f8f68..ea7494ce9f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -5,6 +5,7 @@ from collections.abc import Sequence from dataclasses import dataclass from functools import cached_property +from typing import TYPE_CHECKING from typing_extensions import assert_never @@ -51,13 +52,8 @@ ) from ._response_types import BaseChunk -try: +if TYPE_CHECKING: from starlette.requests import Request -except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `starlette` package to use `Agent.to_ag_ui()` method, ' - 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' - ) from e __all__ = ['VercelAIAdapter'] @@ -98,8 +94,10 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # for msg in messages: if msg.role == 'system': for part in msg.parts: - if isinstance(part, TextUIPart): # pragma: no branch + if isinstance(part, TextUIPart): builder.add(SystemPromptPart(content=part.text)) + else: # pragma: no cover + raise ValueError(f'Unsupported system message part type: {type(part)}') elif msg.role == 'user': user_prompt_content: str | list[UserContent] = [] for part in msg.parts: @@ -120,6 +118,8 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # case _: file = DocumentUrl(url=part.url, media_type=part.media_type) user_prompt_content.append(file) + else: # pragma: no cover + raise ValueError(f'Unsupported user message part type: {type(part)}') if user_prompt_content: # pragma: no branch if len(user_prompt_content) == 1 and isinstance(user_prompt_content[0], str): @@ -188,16 +188,16 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # tool_name=tool_name, tool_call_id=tool_call_id, content=part.error_text ) ) - elif isinstance(part, DataUIPart): + elif isinstance(part, DataUIPart): # pragma: no cover # Contains custom data that shouldn't be sent to the model pass - elif isinstance(part, SourceUrlUIPart): + elif isinstance(part, SourceUrlUIPart): # pragma: no cover # TODO: Once we support citations: https://github.com/pydantic/pydantic-ai/issues/3126 pass - elif isinstance(part, SourceDocumentUIPart): + elif isinstance(part, SourceDocumentUIPart): # pragma: no cover # TODO: Once we support citations: https://github.com/pydantic/pydantic-ai/issues/3126 pass - elif isinstance(part, StepStartUIPart): + elif isinstance(part, StepStartUIPart): # pragma: no cover # Nothing to do here pass else: diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py new file mode 100644 index 0000000000..8cf41c5c8c --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py @@ -0,0 +1,17 @@ +"""Vercel UI protocol integration for Pydantic AI agents.""" + +from typing import Any + +from pydantic_ai.agent import AbstractAgent +from pydantic_ai.output import OutputDataT +from pydantic_ai.tools import AgentDepsT + +from ..app import UIApp +from ._adapter import VercelAIAdapter + + +class VercelAIApp(UIApp[AgentDepsT, OutputDataT]): + """ASGI application for running Pydantic AI agents with Vercel UI protocol support.""" + + def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): + super().__init__(VercelAIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 3899e7d470..24f0aef7e6 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -49,7 +49,7 @@ from pydantic_ai.models.test import TestModel from pydantic_ai.run import AgentRunResult from pydantic_ai.ui import SSE_CONTENT_TYPE -from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIApp, VercelAIEventStream +from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIEventStream from pydantic_ai.ui.vercel_ai._request_types import ( DynamicToolOutputAvailablePart, FileUIPart, @@ -62,6 +62,7 @@ UIMessage, ) from pydantic_ai.ui.vercel_ai._response_types import BaseChunk, DataChunk +from pydantic_ai.ui.vercel_ai.app import VercelAIApp from .conftest import IsDatetime, IsSameStr, IsStr, try_import From 7b4ad0d6092df58c03610c68a004e8fd7fa095cb Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Tue, 28 Oct 2025 23:18:58 +0000 Subject: [PATCH 41/46] Docs --- README.md | 4 +- docs-site/src/index.ts | 1 + docs/api/ui.md | 13 +++ docs/examples/ag-ui.md | 4 +- docs/index.md | 4 +- docs/{ => ui}/ag-ui.md | 26 +++--- docs/ui/overview.md | 84 +++++++++++++++++++ docs/ui/vercel-ai.md | 0 mkdocs.yml | 6 +- .../pydantic_ai/agent/abstract.py | 2 +- pydantic_ai_slim/pydantic_ai/ui/adapter.py | 6 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 17 ++-- .../pydantic_ai/ui/vercel_ai/_adapter.py | 18 +--- tests/test_ui.py | 4 +- 14 files changed, 138 insertions(+), 51 deletions(-) create mode 100644 docs/api/ui.md rename docs/{ => ui}/ag-ui.md (85%) create mode 100644 docs/ui/overview.md create mode 100644 docs/ui/vercel-ai.md diff --git a/README.md b/README.md index b56142790c..0b392a3fce 100644 --- a/README.md +++ b/README.md @@ -50,8 +50,8 @@ Designed to give your IDE or AI coding agent as much context as possible for aut 5. **Powerful Evals**: Enables you to systematically test and [evaluate](https://ai.pydantic.dev/evals) the performance and accuracy of the agentic systems you build, and monitor the performance over time in Pydantic Logfire. -6. **MCP, A2A, and AG-UI**: -Integrates the [Model Context Protocol](https://ai.pydantic.dev/mcp/client), [Agent2Agent](https://ai.pydantic.dev/a2a), and [AG-UI](https://ai.pydantic.dev/ag-ui) standards to give your agent access to external tools and data, let it interoperate with other agents, and build interactive applications with streaming event-based communication. +6. **MCP, A2A, and UI**: +Integrates the [Model Context Protocol](https://ai.pydantic.dev/mcp/overview), [Agent2Agent](https://ai.pydantic.dev/a2a), and various [UI event stream](https://ai.pydantic.dev/ui/overview) standards to give your agent access to external tools and data, let it interoperate with other agents, and build interactive applications with streaming event-based communication. 7. **Human-in-the-Loop Tool Approval**: Easily lets you flag that certain tool calls [require approval](https://ai.pydantic.dev/deferred-tools#human-in-the-loop-tool-approval) before they can proceed, possibly depending on tool call arguments, conversation history, or user preferences. diff --git a/docs-site/src/index.ts b/docs-site/src/index.ts index 57fe78c5c8..bd85ba9d2a 100644 --- a/docs-site/src/index.ts +++ b/docs-site/src/index.ts @@ -55,6 +55,7 @@ const redirect_lookup: Record = { '/examples': 'examples/setup/', '/mcp': '/mcp/overview/', '/models': '/models/overview/', + '/ag-ui': '/ui/ag-ui/' } function redirect(pathname: string): string | null { diff --git a/docs/api/ui.md b/docs/api/ui.md new file mode 100644 index 0000000000..c3951ae769 --- /dev/null +++ b/docs/api/ui.md @@ -0,0 +1,13 @@ +# `pydantic_ai.ui` + +::: pydantic_ai.ui + +::: pydantic_ai.ui.app + +::: pydantic_ai.ui.ag_ui + +::: pydantic_ai.ui.ag_ui.app + +::: pydantic_ai.ui.vercel_ai + +::: pydantic_ai.ui.vercel_ai.app diff --git a/docs/examples/ag-ui.md b/docs/examples/ag-ui.md index 27e64886e7..c0da742538 100644 --- a/docs/examples/ag-ui.md +++ b/docs/examples/ag-ui.md @@ -2,11 +2,11 @@ Example of using Pydantic AI agents with the [AG-UI Dojo](https://github.com/ag-ui-protocol/ag-ui/tree/main/typescript-sdk/apps/dojo) example app. -See the [AG-UI docs](../ag-ui.md) for more information about the AG-UI integration. +See the [AG-UI docs](../ui/ag-ui.md) for more information about the AG-UI integration. Demonstrates: -- [AG-UI](../ag-ui.md) +- [AG-UI](../ui/ag-ui.md) - [Tools](../tools.md) ## Prerequisites diff --git a/docs/index.md b/docs/index.md index 64c0938e4a..f1c323f140 100644 --- a/docs/index.md +++ b/docs/index.md @@ -25,8 +25,8 @@ Designed to give your IDE or AI coding agent as much context as possible for aut 5. **Powerful Evals**: Enables you to systematically test and [evaluate](evals.md) the performance and accuracy of the agentic systems you build, and monitor the performance over time in Pydantic Logfire. -6. **MCP, A2A, and AG-UI**: -Integrates the [Model Context Protocol](mcp/client.md), [Agent2Agent](a2a.md), and [AG-UI](ag-ui.md) standards to give your agent access to external tools and data, let it interoperate with other agents, and build interactive applications with streaming event-based communication. +6. **MCP, A2A, and UI**: +Integrates the [Model Context Protocol](mcp/overview.md), [Agent2Agent](a2a.md), and various [UI event stream](ui/overview.md) standards to give your agent access to external tools and data, let it interoperate with other agents, and build interactive applications with streaming event-based communication. 7. **Human-in-the-Loop Tool Approval**: Easily lets you flag that certain tool calls [require approval](deferred-tools.md#human-in-the-loop-tool-approval) before they can proceed, possibly depending on tool call arguments, conversation history, or user preferences. diff --git a/docs/ag-ui.md b/docs/ui/ag-ui.md similarity index 85% rename from docs/ag-ui.md rename to docs/ui/ag-ui.md index 85441b3cc7..da4c4bfbd0 100644 --- a/docs/ag-ui.md +++ b/docs/ui/ag-ui.md @@ -52,7 +52,8 @@ from fastapi.responses import Response, StreamingResponse from pydantic import ValidationError from pydantic_ai import Agent -from pydantic_ai.ui.ag_ui import AGUIAdapter, SSE_CONTENT_TYPE +from pydantic_ai.ui import SSE_CONTENT_TYPE +from pydantic_ai.ui.ag_ui import AGUIAdapter agent = Agent('openai:gpt-4.1', instructions='Be fun!') @@ -63,8 +64,8 @@ app = FastAPI() async def run_agent(request: Request) -> Response: accept = request.headers.get('accept', SSE_CONTENT_TYPE) try: - run_input = AGUIAdapter.build_run_input(await request.json()) # (1) - except ValidationError as e: # pragma: no cover + run_input = AGUIAdapter.build_run_input(await request.body()) # (1) + except ValidationError as e: return Response( content=json.dumps(e.json()), media_type='application/json', @@ -72,14 +73,15 @@ async def run_agent(request: Request) -> Response: ) adapter = AGUIAdapter(agent=agent, run_input=run_input, accept=accept) - events = adapter.run_stream() # (2) + event_stream = adapter.run_stream() # (2) - return StreamingResponse(adapter.encode_stream(events), media_type=accept) # (3) + sse_event_stream = adapter.encode_stream(event_stream) + return StreamingResponse(sse_event_stream, media_type=accept) # (3) ``` -1. You can also use the [`AGUIAdapter.from_request()`][pydantic_ai.ui.ag_ui.AGUIAdapter.from_request] class method to build an adapter directly from a request. -2. You can also use the [`AGUIAdapter.run_stream_native()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream_native] method to run the agent and return a stream of Pydantic AI events instead of AG-UI events. These can then be transformed into AG-UI events using the [`AGUIAdapter.transform_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.transform_stream] method. -3. The [`AGUIAdapter.encode_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.encode_stream] method encodes the stream of AG-UI events as strings according to the accept header value. You can also use the [`AGUIAdapter.streaming_response()`][pydantic_ai.ui.ag_ui.AGUIAdapter.streaming_response] method to generate a streaming response directly from the AG-UI event stream returned by `run_stream()`. +1. [`AGUIAdapter.build_run_input()`][pydantic_ai.ui.ag_ui.AGUIAdapter.build_run_input] takes the request body as bytes and returns an AG-UI [`RunAgentInput`](https://docs.ag-ui.com/sdk/python/core/types#runagentinput) object. You can also use the [`AGUIAdapter.from_request()`][pydantic_ai.ui.ag_ui.AGUIAdapter.from_request] class method to build an adapter directly from a request. +2. [`AGUIAdapter.run_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream] runs the agent and returns a stream of AG-UI events. It supports the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents), including `deps`. You can also use [`AGUIAdapter.run_stream_native()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream_native] to run the agent and return a stream of Pydantic AI events instead, which can then be transformed into AG-UI events using [`AGUIAdapter.transform_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.transform_stream]. +3. [`AGUIAdapter.encode_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.encode_stream] encodes the stream of AG-UI events as strings according to the accept header value. You can also use [`AGUIAdapter.streaming_response()`][pydantic_ai.ui.ag_ui.AGUIAdapter.streaming_response] to generate a streaming response directly from the AG-UI event stream returned by `run_stream()`. Since `app` is an ASGI application, it can be used with any ASGI server: @@ -167,7 +169,7 @@ The integration provides full support for real-time synchronization between agents and frontend applications. In the example below we have document state which is shared between the UI and -server using the [`StateDeps`][pydantic_ai.ag_ui.StateDeps] [dependencies type](./dependencies.md) that can be used to automatically +server using the [`StateDeps`][pydantic_ai.ag_ui.StateDeps] [dependencies type](../dependencies.md) that can be used to automatically validate state contained in [`RunAgentInput.state`](https://docs.ag-ui.com/sdk/js/core/types#runagentinput) using a Pydantic `BaseModel` specified as a generic parameter. !!! note "Custom dependencies type with AG-UI state" @@ -182,7 +184,7 @@ from pydantic import BaseModel from pydantic_ai import Agent from pydantic_ai.ui import StateDeps -from pydantic_ai.ui.ag_ui import AGUIApp +from pydantic_ai.ui.ag_ui.app import AGUIApp class DocumentState(BaseModel): @@ -213,7 +215,7 @@ user experiences with frontend user interfaces. ### Events Pydantic AI tools can send [AG-UI events](https://docs.ag-ui.com/concepts/events) simply by returning a -[`ToolReturn`](tools-advanced.md#advanced-tool-returns) object with a +[`ToolReturn`](../tools-advanced.md#advanced-tool-returns) object with a [`BaseEvent`](https://docs.ag-ui.com/sdk/python/core/events#baseevent) (or a list of events) as `metadata`, which allows for custom events and state updates. @@ -223,7 +225,7 @@ from pydantic import BaseModel from pydantic_ai import Agent, RunContext, ToolReturn from pydantic_ai.ui import StateDeps -from pydantic_ai.ui.ag_ui import AGUIApp +from pydantic_ai.ui.ag_ui.app import AGUIApp class DocumentState(BaseModel): diff --git a/docs/ui/overview.md b/docs/ui/overview.md new file mode 100644 index 0000000000..03f1f2b52b --- /dev/null +++ b/docs/ui/overview.md @@ -0,0 +1,84 @@ +# UI Event Streams + +If you're building a chat app or other interactive frontend for an AI agent, your backend will need to receive agent run input (like a chat message or full message history) from the frontend, and will need to stream the [agent's events](../agents.md#streaming-all-events) (like text, thinking, and tool calls) to the frontend so that the user knows what's happening in real time. + +While your frontend could use Pydantic AI's [`ModelRequest`s](../message-history.md) and [`AgentStreamEvent`s][pydantic_ai.messages.AgentStreamEvent] directly, you'll typically want to use a UI event stream protocol that's natively supported by your frontend framework. + +Pydantic AI natively supports two UI event stream protocols: + +- [Agent User Interaction (AG-UI) Protocol](./ag-ui.md) +- [Vercel AI Data Stream Protocol](./vercel-ai.md) + +These integrations are implemented as subclasses of the abstract [`UIAdapter`][pydantic_ai.ui.UIAdapter] class, so they also serve as a reference for integrating with other UI event stream protocols. + +## Usage + +The protocol-specific [`UIAdapter`][pydantic_ai.ui.UIAdapter] subclass (i.e. [`AGUIAdapter`][pydantic_ai.ui.ag_ui.AGUIAdapter] or [`VercelAIAdapter`][pydantic_ai.ui.vercel_ai.VercelAIAdapter]) is responsible for transforming agent run input received from the frontend into arguments for [`Agent.run_stream_events()`](../agents.md#running-agents), running the agent, and then transforming Pydantic AI events into protocol-specific events. The event stream transformation is handled by a protocol-specific [`UIEventStream`][pydantic_ai.ui.UIEventStream] subclass, but you typically won't use this directly. + +If you're using a Starlette-based web framework like FastAPI, you can use the [`UIAdapter.dispatch_request()`][pydantic_ai.ui.UIAdapter.dispatch_request] class method from an endpoint function to directly handle a request and return a streaming response of protocol-specific events. Besides the request, this method takes the agent and supports the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents), including `deps`. + +!!! note + These examples use the `VercelAIAdapter`, but the same patterns apply to all `UIAdapter` subclasses. + +```py {title="dispatch_request.py"} +from fastapi import FastAPI +from starlette.requests import Request +from starlette.responses import Response + +from pydantic_ai import Agent +from pydantic_ai.ui.vercel_ai import VercelAIAdapter + +agent = Agent('openai:gpt-5') + +app = FastAPI() + +@app.post('/chat') +async def chat(request: Request) -> Response: + return await VercelAIAdapter.dispatch_request(request, agent=agent) +``` + +If you're using a web framework not based on Starlette (e.g. Django or Flask) or want fine-grained control over the input or output, you can create and use a `UIAdapter` instance directly. + +!!! note + This example uses FastAPI, but can be modified to work with any web framework. + +```py {title="run_stream.py"} +import json +from http import HTTPStatus + +from fastapi import FastAPI +from fastapi.requests import Request +from fastapi.responses import Response, StreamingResponse +from pydantic import ValidationError + +from pydantic_ai import Agent +from pydantic_ai.ui import SSE_CONTENT_TYPE +from pydantic_ai.ui.vercel_ai import VercelAIAdapter + +agent = Agent('openai:gpt-5') + +app = FastAPI() + + +@app.post('/chat') +async def chat(request: Request) -> Response: + accept = request.headers.get('accept', SSE_CONTENT_TYPE) + try: + run_input = VercelAIAdapter.build_run_input(await request.body()) # (1) + except ValidationError as e: + return Response( + content=json.dumps(e.json()), + media_type='application/json', + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, + ) + + adapter = VercelAIAdapter(agent=agent, run_input=run_input, accept=accept) + event_stream = adapter.run_stream() # (2) + + sse_event_stream = adapter.encode_stream(event_stream) + return StreamingResponse(sse_event_stream, media_type=accept) # (3) +``` + +1. [`UIAdapter.build_run_input()`][pydantic_ai.ui.UIAdapter.build_run_input] takes the request body as bytes and returns a protocol-specific run input object. You can also use the [`UIAdapter.from_request()`][pydantic_ai.ui.UIAdapter.from_request] class method to build an adapter directly from a request. +2. [`UIAdapter.run_stream()`][pydantic_ai.ui.UIAdapter.run_stream] runs the agent and returns a stream of protocol-specific events. It supports the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents), including `deps`. You can also use [`UIAdapter.run_stream_native()`][pydantic_ai.ui.UIAdapter.run_stream_native] to run the agent and return a stream of Pydantic AI events instead, which can then be transformed into protocol-specific events using [`UIAdapter.transform_stream()`][pydantic_ai.ui.UIAdapter.transform_stream]. +3. [`UIAdapter.encode_stream()`][pydantic_ai.ui.UIAdapter.encode_stream] encodes the stream of protocol-specific events as strings according to the accept header value. You can also use [`UIAdapter.streaming_response()`][pydantic_ai.ui.UIAdapter.streaming_response] to generate a streaming response directly from the protocol-specific event stream returned by `run_stream()`. diff --git a/docs/ui/vercel-ai.md b/docs/ui/vercel-ai.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/mkdocs.yml b/mkdocs.yml index e0057f5ca1..7439086641 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -92,7 +92,10 @@ nav: - Temporal: durable_execution/temporal.md - DBOS: durable_execution/dbos.md - Prefect: durable_execution/prefect.md - - Agent-User Interaction (AG-UI): ag-ui.md + - UI Event Streams: + - Overview: ui/overview.md + - AG-UI: ui/ag-ui.md + - Vercel AI: ui/vercel-ai.md - Agent2Agent (A2A): a2a.md - Related Packages: @@ -160,6 +163,7 @@ nav: - api/providers.md - api/retries.md - api/run.md + - api/ui.md - pydantic_evals: - api/pydantic_evals/dataset.md - api/pydantic_evals/evaluators.md diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 458e2af7de..902336abb4 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -1040,7 +1040,7 @@ def to_ag_ui( uvicorn app:app --host 0.0.0.0 --port 8000 ``` - See [AG-UI docs](../ag-ui.md) for more information. + See [AG-UI docs](../ui/ag-ui.md) for more information. Args: output_type: Custom output type to use for this run, `output_type` may only be used if the agent has diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/adapter.py index 83b14d50a9..f488890ad2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/adapter.py @@ -127,14 +127,14 @@ async def from_request( """Create an adapter from a protocol-specific run input.""" return cls( agent=agent, - run_input=await cls.build_run_input(request), + run_input=cls.build_run_input(await request.body()), accept=request.headers.get('accept'), ) @classmethod @abstractmethod - async def build_run_input(cls, request: Request) -> RunInputT: - """Validate the HTTP request and return the validated protocol-specific run input.""" + def build_run_input(cls, body: bytes) -> RunInputT: + """Validate the request body and return the protocol-specific run input object.""" raise NotImplementedError @classmethod diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index faaba72831..1e51831b00 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -48,7 +48,7 @@ ) from e if TYPE_CHECKING: - from starlette.requests import Request + pass __all__ = ['AGUIAdapter'] @@ -86,9 +86,9 @@ class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, Outpu """TODO (DouweM): Docstring.""" @classmethod - async def build_run_input(cls, request: Request) -> RunAgentInput: - """Validate the request and return the validated run input.""" - return RunAgentInput.model_validate(await request.json()) + def build_run_input(cls, body: bytes) -> RunAgentInput: + """Validate the request body and return the validated run input.""" + return RunAgentInput.model_validate_json(body) def build_event_stream(self) -> UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: """Create an event stream for the adapter.""" @@ -108,14 +108,7 @@ def state(self) -> dict[str, Any] | None: @cached_property def messages(self) -> list[ModelMessage]: - """Convert AG-UI messages to Pydantic AI messages. - - Args: - messages: List of AG-UI messages. - - Returns: - List of Pydantic AI ModelMessage objects. - """ + """Convert AG-UI messages to Pydantic AI messages.""" return self.load_messages(self.run_input.messages) @classmethod diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index ea7494ce9f..5a6b7d7346 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -53,7 +53,7 @@ from ._response_types import BaseChunk if TYPE_CHECKING: - from starlette.requests import Request + pass __all__ = ['VercelAIAdapter'] @@ -64,26 +64,16 @@ class VercelAIAdapter(UIAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, O """TODO (DouweM): Docstring.""" @classmethod - async def build_run_input(cls, request: Request) -> RequestData: + def build_run_input(cls, body: bytes) -> RequestData: """Validate a Vercel AI request.""" - return request_data_ta.validate_json(await request.body()) + return request_data_ta.validate_json(body) def build_event_stream(self) -> UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: return VercelAIEventStream(self.run_input, accept=self.accept) @cached_property def messages(self) -> list[ModelMessage]: - """Convert Vercel AI protocol messages to Pydantic AI messages. - - Args: - messages: List of Vercel AI UIMessage objects. - - Returns: - List of Pydantic AI ModelMessage objects. - - Raises: - ValueError: If message format is not supported. - """ + """Convert Vercel AI protocol messages to Pydantic AI messages.""" return self.load_messages(self.run_input.messages) @classmethod diff --git a/tests/test_ui.py b/tests/test_ui.py index 251b231a70..04c7a4bc0a 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -85,8 +85,8 @@ class DummyUIDeps: class DummyUIAdapter(UIAdapter[DummyUIRunInput, ModelMessage, str, AgentDepsT, OutputDataT]): @classmethod - async def build_run_input(cls, request: Request) -> DummyUIRunInput: - return DummyUIRunInput.model_validate(await request.json()) + def build_run_input(cls, body: bytes) -> DummyUIRunInput: + return DummyUIRunInput.model_validate_json(body) @classmethod def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: From d59fdac76e0c5dcffa254d5ac418b4a16ac920dc Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 29 Oct 2025 17:32:40 +0000 Subject: [PATCH 42/46] coverage --- pydantic_ai_slim/pydantic_ai/messages.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index 89256d626c..c2167daebc 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -514,14 +514,14 @@ def narrow_type(bc: BinaryContent) -> BinaryContent | BinaryImage: vendor_metadata=bc.vendor_metadata, ) else: - return bc # pragma: no cover + return bc @classmethod def from_data_uri(cls, data_uri: str) -> BinaryContent: """Create a `BinaryContent` from a data URI.""" prefix = 'data:' if not data_uri.startswith(prefix): - raise ValueError('Data URI must start with "data:"') # pragma: no cover + raise ValueError('Data URI must start with "data:"') media_type, data = data_uri[len(prefix) :].split(';base64,', 1) return cls.narrow_type(cls(data=base64.b64decode(data), media_type=media_type)) From 9dbcee8659608095b9106843cede813b343c18cd Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 29 Oct 2025 17:51:21 +0000 Subject: [PATCH 43/46] Remove UIApp --- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 7 +- .../ui/{adapter.py => _adapter.py} | 2 +- .../ui/{event_stream.py => _event_stream.py} | 3 +- ...ssages_builder.py => _messages_builder.py} | 2 +- .../pydantic_ai/ui/ag_ui/_adapter.py | 4 +- pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py | 136 ++++++++++++++++- pydantic_ai_slim/pydantic_ai/ui/app.py | 139 ------------------ .../pydantic_ai/ui/vercel_ai/_adapter.py | 4 +- .../pydantic_ai/ui/vercel_ai/app.py | 17 --- tests/test_ui.py | 3 +- tests/test_vercel_ai.py | 53 ------- 11 files changed, 141 insertions(+), 229 deletions(-) rename pydantic_ai_slim/pydantic_ai/ui/{adapter.py => _adapter.py} (99%) rename pydantic_ai_slim/pydantic_ai/ui/{event_stream.py => _event_stream.py} (99%) rename pydantic_ai_slim/pydantic_ai/ui/{messages_builder.py => _messages_builder.py} (92%) delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/app.py delete mode 100644 pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index 7019688a42..0ea1826eb7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -6,9 +6,9 @@ from __future__ import annotations -from .adapter import OnCompleteFunc, StateDeps, StateHandler, UIAdapter -from .event_stream import SSE_CONTENT_TYPE, UIEventStream -from .messages_builder import MessagesBuilder +from ._adapter import StateDeps, StateHandler, UIAdapter +from ._event_stream import SSE_CONTENT_TYPE, NativeEvent, OnCompleteFunc, UIEventStream +from ._messages_builder import MessagesBuilder __all__ = [ 'UIAdapter', @@ -16,6 +16,7 @@ 'SSE_CONTENT_TYPE', 'StateDeps', 'StateHandler', + 'NativeEvent', 'OnCompleteFunc', 'MessagesBuilder', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/adapter.py b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py similarity index 99% rename from pydantic_ai_slim/pydantic_ai/ui/adapter.py rename to pydantic_ai_slim/pydantic_ai/ui/_adapter.py index f488890ad2..877cac5606 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py @@ -35,7 +35,7 @@ from pydantic_ai.toolsets import AbstractToolset from pydantic_ai.usage import RunUsage, UsageLimits -from .event_stream import NativeEvent, OnCompleteFunc, UIEventStream +from ._event_stream import NativeEvent, OnCompleteFunc, UIEventStream if TYPE_CHECKING: from starlette.requests import Request diff --git a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py similarity index 99% rename from pydantic_ai_slim/pydantic_ai/ui/event_stream.py rename to pydantic_ai_slim/pydantic_ai/ui/_event_stream.py index daf66e3254..5ea65745fc 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py @@ -57,7 +57,8 @@ RunInputT = TypeVar('RunInputT') """Type variable for protocol-specific run input types.""" -NativeEvent = AgentStreamEvent | AgentRunResultEvent[Any] +NativeEvent: TypeAlias = AgentStreamEvent | AgentRunResultEvent[Any] +"""Type alias for the native event type, which is either an `AgentStreamEvent` or an `AgentRunResultEvent`.""" OnCompleteFunc: TypeAlias = ( Callable[[AgentRunResult[Any]], None] diff --git a/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py b/pydantic_ai_slim/pydantic_ai/ui/_messages_builder.py similarity index 92% rename from pydantic_ai_slim/pydantic_ai/ui/messages_builder.py rename to pydantic_ai_slim/pydantic_ai/ui/_messages_builder.py index edbae248cf..6a2edf1715 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/messages_builder.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_messages_builder.py @@ -7,7 +7,7 @@ @dataclass class MessagesBuilder: - """Helper class to build Pydantic AI messages from protocol-specific messages.""" + """Helper class to build Pydantic AI messages from request/response parts.""" messages: list[ModelMessage] = field(default_factory=list) diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 1e51831b00..546074175b 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -37,9 +37,7 @@ UserMessage, ) - from ..adapter import UIAdapter - from ..event_stream import UIEventStream - from ..messages_builder import MessagesBuilder + from .. import MessagesBuilder, UIAdapter, UIEventStream from ._event_stream import BUILTIN_TOOL_CALL_ID_PREFIX, AGUIEventStream except ImportError as e: # pragma: no cover raise ImportError( diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py index 8ac2f459a0..fffb1bc843 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py @@ -1,17 +1,141 @@ """AG-UI protocol integration for Pydantic AI agents.""" -from typing import Any +from __future__ import annotations +from collections.abc import Callable, Mapping, Sequence +from typing import Any, Generic + +from typing_extensions import Self + +from pydantic_ai import DeferredToolResults from pydantic_ai.agent import AbstractAgent -from pydantic_ai.output import OutputDataT +from pydantic_ai.builtin_tools import AbstractBuiltinTool +from pydantic_ai.messages import ModelMessage +from pydantic_ai.models import KnownModelName, Model +from pydantic_ai.output import OutputDataT, OutputSpec +from pydantic_ai.settings import ModelSettings from pydantic_ai.tools import AgentDepsT +from pydantic_ai.toolsets import AbstractToolset +from pydantic_ai.usage import RunUsage, UsageLimits -from ..app import UIApp +from .. import OnCompleteFunc from ._adapter import AGUIAdapter +try: + from starlette.applications import Starlette + from starlette.middleware import Middleware + from starlette.requests import Request + from starlette.responses import Response + from starlette.routing import BaseRoute + from starlette.types import ExceptionHandler, Lifespan +except ImportError as e: # pragma: no cover + raise ImportError( + 'Please install the `starlette` package to use `AGUIApp`, ' + 'you can use the `ag-ui` optional group — `pip install "pydantic-ai-slim[ag-ui]"`' + ) from e + -class AGUIApp(UIApp[AgentDepsT, OutputDataT]): +class AGUIApp(Generic[AgentDepsT, OutputDataT], Starlette): """ASGI application for running Pydantic AI agents with AG-UI protocol support.""" - def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): - super().__init__(AGUIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) + def __init__( + self, + agent: AbstractAgent[AgentDepsT, OutputDataT], + *, + # AGUIAdapter.dispatch_request parameters + output_type: OutputSpec[Any] | None = None, + message_history: Sequence[ModelMessage] | None = None, + deferred_tool_results: DeferredToolResults | None = None, + model: Model | KnownModelName | str | None = None, + deps: AgentDepsT = None, + model_settings: ModelSettings | None = None, + usage_limits: UsageLimits | None = None, + usage: RunUsage | None = None, + infer_name: bool = True, + toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, + builtin_tools: Sequence[AbstractBuiltinTool] | None = None, + on_complete: OnCompleteFunc[Any] | None = None, + # Starlette parameters + debug: bool = False, + routes: Sequence[BaseRoute] | None = None, + middleware: Sequence[Middleware] | None = None, + exception_handlers: Mapping[Any, ExceptionHandler] | None = None, + on_startup: Sequence[Callable[[], Any]] | None = None, + on_shutdown: Sequence[Callable[[], Any]] | None = None, + lifespan: Lifespan[Self] | None = None, + ) -> None: + """An ASGI application that handles every request by running the agent and streaming the response. + + Note that the `deps` will be the same for each request, with the exception of the frontend state that's + injected into the `state` field of a `deps` object that implements the [`StateHandler`][pydantic_ai.ui.StateHandler] protocol. + To provide different `deps` for each request (e.g. based on the authenticated user), + use [`AGUIAdapter.run_stream()`][pydantic_ai.ui.ag_ui.AGUIAdapter.run_stream] or + [`AGUIAdapter.dispatch_request()`][pydantic_ai.ui.ag_ui.AGUIAdapter.dispatch_request] instead. + + Args: + agent: The agent to run. + + output_type: Custom output type to use for this run, `output_type` may only be used if the agent has + no output validators since output validators would expect an argument that matches the agent's + output type. + message_history: History of the conversation so far. + deferred_tool_results: Optional results for deferred tool calls in the message history. + model: Optional model to use for this run, required if `model` was not set when creating the agent. + deps: Optional dependencies to use for this run. + model_settings: Optional settings to use for this model's request. + usage_limits: Optional limits on model request count or token usage. + usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. + infer_name: Whether to try to infer the agent name from the call frame if it's not set. + toolsets: Optional additional toolsets for this run. + builtin_tools: Optional additional builtin tools for this run. + on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + + debug: Boolean indicating if debug tracebacks should be returned on errors. + routes: A list of routes to serve incoming HTTP and WebSocket requests. + middleware: A list of middleware to run for every request. A starlette application will always + automatically include two middleware classes. `ServerErrorMiddleware` is added as the very + outermost middleware, to handle any uncaught errors occurring anywhere in the entire stack. + `ExceptionMiddleware` is added as the very innermost middleware, to deal with handled + exception cases occurring in the routing or endpoints. + exception_handlers: A mapping of either integer status codes, or exception class types onto + callables which handle the exceptions. Exception handler callables should be of the form + `handler(request, exc) -> response` and may be either standard functions, or async functions. + on_startup: A list of callables to run on application startup. Startup handler callables do not + take any arguments, and may be either standard functions, or async functions. + on_shutdown: A list of callables to run on application shutdown. Shutdown handler callables do + not take any arguments, and may be either standard functions, or async functions. + lifespan: A lifespan context function, which can be used to perform startup and shutdown tasks. + This is a newer style that replaces the `on_startup` and `on_shutdown` handlers. Use one or + the other, not both. + """ + super().__init__( + debug=debug, + routes=routes, + middleware=middleware, + exception_handlers=exception_handlers, + on_startup=on_startup, + on_shutdown=on_shutdown, + lifespan=lifespan, + ) + + async def run_agent(request: Request) -> Response: + """Endpoint to run the agent with the provided input data.""" + return await AGUIAdapter.dispatch_request( + request, + agent=agent, + output_type=output_type, + message_history=message_history, + deferred_tool_results=deferred_tool_results, + model=model, + deps=deps, + model_settings=model_settings, + usage_limits=usage_limits, + usage=usage, + infer_name=infer_name, + toolsets=toolsets, + builtin_tools=builtin_tools, + on_complete=on_complete, + ) + + self.router.add_route('/', run_agent, methods=['POST']) diff --git a/pydantic_ai_slim/pydantic_ai/ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/app.py deleted file mode 100644 index d6d4cb0192..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/app.py +++ /dev/null @@ -1,139 +0,0 @@ -from __future__ import annotations - -from collections.abc import Callable, Mapping, Sequence -from typing import Any, Generic - -from typing_extensions import Self - -from .. import DeferredToolResults -from ..agent import AbstractAgent -from ..builtin_tools import AbstractBuiltinTool -from ..messages import ModelMessage -from ..models import KnownModelName, Model -from ..output import OutputDataT, OutputSpec -from ..settings import ModelSettings -from ..tools import AgentDepsT -from ..toolsets import AbstractToolset -from ..usage import RunUsage, UsageLimits -from .adapter import OnCompleteFunc, UIAdapter - -try: - from starlette.applications import Starlette - from starlette.middleware import Middleware - from starlette.requests import Request - from starlette.responses import Response - from starlette.routing import BaseRoute - from starlette.types import ExceptionHandler, Lifespan -except ImportError as e: # pragma: no cover - raise ImportError( - 'Please install the `starlette` package to use `UIApp`, ' - 'you can use the `ui` optional group — `pip install "pydantic-ai-slim[ui]"`' - ) from e - - -class UIApp(Generic[AgentDepsT, OutputDataT], Starlette): - """ASGI application for running Pydantic AI agents with UI protocol support.""" - - def __init__( - self, - adapter_type: type[UIAdapter[Any, Any, Any, AgentDepsT, OutputDataT]], - agent: AbstractAgent[AgentDepsT, OutputDataT], - *, - # UIAdapter.dispatch_request parameters - output_type: OutputSpec[Any] | None = None, - message_history: Sequence[ModelMessage] | None = None, - deferred_tool_results: DeferredToolResults | None = None, - model: Model | KnownModelName | str | None = None, - deps: AgentDepsT = None, - model_settings: ModelSettings | None = None, - usage_limits: UsageLimits | None = None, - usage: RunUsage | None = None, - infer_name: bool = True, - toolsets: Sequence[AbstractToolset[AgentDepsT]] | None = None, - builtin_tools: Sequence[AbstractBuiltinTool] | None = None, - on_complete: OnCompleteFunc[Any] | None = None, - # Starlette parameters - debug: bool = False, - routes: Sequence[BaseRoute] | None = None, - middleware: Sequence[Middleware] | None = None, - exception_handlers: Mapping[Any, ExceptionHandler] | None = None, - on_startup: Sequence[Callable[[], Any]] | None = None, - on_shutdown: Sequence[Callable[[], Any]] | None = None, - lifespan: Lifespan[Self] | None = None, - ) -> None: - """An ASGI application that handles every request by running the agent and streaming the response. - - Note that the `deps` will be the same for each request, with the exception of the frontend state that's - injected into the `state` field of a `deps` object that implements the [`StateHandler`][pydantic_ai.ui.StateHandler] protocol. - To provide different `deps` for each request (e.g. based on the authenticated user), - use [`UIAdapter.run_stream()`][pydantic_ai.ui.UIAdapter.run_stream] or - [`UIAdapter.dispatch_request()`][pydantic_ai.ui.UIAdapter.dispatch_request] instead. - - Args: - adapter_type: The type of the UI adapter to use. - agent: The agent to run. - - output_type: Custom output type to use for this run, `output_type` may only be used if the agent has - no output validators since output validators would expect an argument that matches the agent's - output type. - message_history: History of the conversation so far. - deferred_tool_results: Optional results for deferred tool calls in the message history. - model: Optional model to use for this run, required if `model` was not set when creating the agent. - deps: Optional dependencies to use for this run. - model_settings: Optional settings to use for this model's request. - usage_limits: Optional limits on model request count or token usage. - usage: Optional usage to start with, useful for resuming a conversation or agents used in tools. - infer_name: Whether to try to infer the agent name from the call frame if it's not set. - toolsets: Optional additional toolsets for this run. - builtin_tools: Optional additional builtin tools for this run. - on_complete: Optional callback function called when the agent run completes successfully. - The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. - - debug: Boolean indicating if debug tracebacks should be returned on errors. - routes: A list of routes to serve incoming HTTP and WebSocket requests. - middleware: A list of middleware to run for every request. A starlette application will always - automatically include two middleware classes. `ServerErrorMiddleware` is added as the very - outermost middleware, to handle any uncaught errors occurring anywhere in the entire stack. - `ExceptionMiddleware` is added as the very innermost middleware, to deal with handled - exception cases occurring in the routing or endpoints. - exception_handlers: A mapping of either integer status codes, or exception class types onto - callables which handle the exceptions. Exception handler callables should be of the form - `handler(request, exc) -> response` and may be either standard functions, or async functions. - on_startup: A list of callables to run on application startup. Startup handler callables do not - take any arguments, and may be either standard functions, or async functions. - on_shutdown: A list of callables to run on application shutdown. Shutdown handler callables do - not take any arguments, and may be either standard functions, or async functions. - lifespan: A lifespan context function, which can be used to perform startup and shutdown tasks. - This is a newer style that replaces the `on_startup` and `on_shutdown` handlers. Use one or - the other, not both. - """ - super().__init__( - debug=debug, - routes=routes, - middleware=middleware, - exception_handlers=exception_handlers, - on_startup=on_startup, - on_shutdown=on_shutdown, - lifespan=lifespan, - ) - - async def run_agent(request: Request) -> Response: - """Endpoint to run the agent with the provided input data.""" - return await adapter_type.dispatch_request( - request, - agent=agent, - output_type=output_type, - message_history=message_history, - deferred_tool_results=deferred_tool_results, - model=model, - deps=deps, - model_settings=model_settings, - usage_limits=usage_limits, - usage=usage, - infer_name=infer_name, - toolsets=toolsets, - builtin_tools=builtin_tools, - on_complete=on_complete, - ) - - self.router.add_route('/', run_agent, methods=['POST']) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 5a6b7d7346..c226e7434e 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -30,9 +30,7 @@ ) from ...output import OutputDataT from ...tools import AgentDepsT -from ..adapter import UIAdapter -from ..event_stream import UIEventStream -from ..messages_builder import MessagesBuilder +from .. import MessagesBuilder, UIAdapter, UIEventStream from ._event_stream import VercelAIEventStream from ._request_types import ( DataUIPart, diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py deleted file mode 100644 index 8cf41c5c8c..0000000000 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/app.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Vercel UI protocol integration for Pydantic AI agents.""" - -from typing import Any - -from pydantic_ai.agent import AbstractAgent -from pydantic_ai.output import OutputDataT -from pydantic_ai.tools import AgentDepsT - -from ..app import UIApp -from ._adapter import VercelAIAdapter - - -class VercelAIApp(UIApp[AgentDepsT, OutputDataT]): - """ASGI application for running Pydantic AI agents with Vercel UI protocol support.""" - - def __init__(self, agent: AbstractAgent[AgentDepsT, OutputDataT], **kwargs: Any): - super().__init__(VercelAIAdapter[AgentDepsT, OutputDataT], agent, **kwargs) diff --git a/tests/test_ui.py b/tests/test_ui.py index 04c7a4bc0a..38f9950ad5 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -46,8 +46,7 @@ from pydantic_ai.run import AgentRunResult, AgentRunResultEvent from pydantic_ai.tools import ToolDefinition from pydantic_ai.toolsets import AbstractToolset, ExternalToolset -from pydantic_ai.ui.adapter import UIAdapter -from pydantic_ai.ui.event_stream import NativeEvent, UIEventStream +from pydantic_ai.ui import NativeEvent, UIAdapter, UIEventStream from .conftest import try_import diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 24f0aef7e6..64d8f76d0c 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -2,12 +2,9 @@ import json from collections.abc import AsyncIterator, MutableMapping -from http import HTTPStatus from typing import Any, cast -import httpx import pytest -from asgi_lifespan import LifespanManager from inline_snapshot import snapshot from pydantic_ai import Agent @@ -48,7 +45,6 @@ ) from pydantic_ai.models.test import TestModel from pydantic_ai.run import AgentRunResult -from pydantic_ai.ui import SSE_CONTENT_TYPE from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIEventStream from pydantic_ai.ui.vercel_ai._request_types import ( DynamicToolOutputAvailablePart, @@ -62,7 +58,6 @@ UIMessage, ) from pydantic_ai.ui.vercel_ai._response_types import BaseChunk, DataChunk -from pydantic_ai.ui.vercel_ai.app import VercelAIApp from .conftest import IsDatetime, IsSameStr, IsStr, try_import @@ -1694,54 +1689,6 @@ async def send(data: MutableMapping[str, Any]) -> None: ) -async def test_app(): - agent = Agent(model=TestModel()) - - run_input = SubmitMessage( - id='foo', - messages=[ - UIMessage( - id='bar', - role='user', - parts=[TextUIPart(text='Hello')], - ), - ], - ) - - app = VercelAIApp(agent) - async with LifespanManager(app): - transport = httpx.ASGITransport(app) - async with httpx.AsyncClient(transport=transport) as client: - client.base_url = 'http://localhost:8000' - async with client.stream( - 'POST', - '/', - content=run_input.model_dump_json(), - headers={'Content-Type': 'application/json', 'Accept': SSE_CONTENT_TYPE}, - ) as response: - assert response.status_code == HTTPStatus.OK, f'Unexpected status code: {response.status_code}' - events: list[str | dict[str, Any]] = [] - async for event in response.aiter_lines(): - if event: - events.append('[DONE]' if '[DONE]' in event else json.loads(event.removeprefix('data: '))) - - assert events == snapshot( - [ - {'type': 'start'}, - {'type': 'start-step'}, - {'type': 'text-start', 'id': IsStr()}, - {'type': 'text-delta', 'delta': 'success ', 'id': IsStr()}, - {'type': 'text-delta', 'delta': '(no ', 'id': IsStr()}, - {'type': 'text-delta', 'delta': 'tool ', 'id': IsStr()}, - {'type': 'text-delta', 'delta': 'calls)', 'id': IsStr()}, - {'type': 'text-end', 'id': IsStr()}, - {'type': 'finish-step'}, - {'type': 'finish'}, - '[DONE]', - ] - ) - - async def test_adapter_load_messages(): data = SubmitMessage( trigger='submit-message', From 1d56cb2f9eb38d82da051f8996526c1ee51da099 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 29 Oct 2025 20:08:58 +0000 Subject: [PATCH 44/46] Docs --- docs/api/ui.md | 13 - docs/api/ui/ag_ui.md | 5 + docs/api/ui/base.md | 3 + docs/api/ui/vercel_ai.md | 7 + docs/install.md | 56 ++- docs/ui/ag-ui.md | 4 +- docs/ui/overview.md | 35 +- docs/ui/vercel-ai.md | 83 ++++ mkdocs.yml | 6 +- pydantic_ai_slim/pydantic_ai/messages.py | 10 +- pydantic_ai_slim/pydantic_ai/ui/__init__.py | 6 - pydantic_ai_slim/pydantic_ai/ui/_adapter.py | 39 +- .../pydantic_ai/ui/_event_stream.py | 366 +++++++++--------- .../pydantic_ai/ui/ag_ui/_adapter.py | 8 +- .../pydantic_ai/ui/ag_ui/_event_stream.py | 28 +- pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py | 2 +- .../pydantic_ai/ui/vercel_ai/__init__.py | 72 ---- .../pydantic_ai/ui/vercel_ai/_adapter.py | 13 +- .../pydantic_ai/ui/vercel_ai/_event_stream.py | 30 +- .../pydantic_ai/ui/vercel_ai/_utils.py | 7 - .../{_request_types.py => request_types.py} | 52 +-- .../{_response_types.py => response_types.py} | 49 +-- tests/test_vercel_ai.py | 4 +- 23 files changed, 422 insertions(+), 476 deletions(-) delete mode 100644 docs/api/ui.md create mode 100644 docs/api/ui/ag_ui.md create mode 100644 docs/api/ui/base.md create mode 100644 docs/api/ui/vercel_ai.md rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{_request_types.py => request_types.py} (85%) rename pydantic_ai_slim/pydantic_ai/ui/vercel_ai/{_response_types.py => response_types.py} (87%) diff --git a/docs/api/ui.md b/docs/api/ui.md deleted file mode 100644 index c3951ae769..0000000000 --- a/docs/api/ui.md +++ /dev/null @@ -1,13 +0,0 @@ -# `pydantic_ai.ui` - -::: pydantic_ai.ui - -::: pydantic_ai.ui.app - -::: pydantic_ai.ui.ag_ui - -::: pydantic_ai.ui.ag_ui.app - -::: pydantic_ai.ui.vercel_ai - -::: pydantic_ai.ui.vercel_ai.app diff --git a/docs/api/ui/ag_ui.md b/docs/api/ui/ag_ui.md new file mode 100644 index 0000000000..1515dbfd8b --- /dev/null +++ b/docs/api/ui/ag_ui.md @@ -0,0 +1,5 @@ +# `pydantic_ai.ui.ag_ui` + +::: pydantic_ai.ui.ag_ui + +::: pydantic_ai.ui.ag_ui.app diff --git a/docs/api/ui/base.md b/docs/api/ui/base.md new file mode 100644 index 0000000000..2217dac701 --- /dev/null +++ b/docs/api/ui/base.md @@ -0,0 +1,3 @@ +# `pydantic_ai.ui` + +::: pydantic_ai.ui diff --git a/docs/api/ui/vercel_ai.md b/docs/api/ui/vercel_ai.md new file mode 100644 index 0000000000..e2dd79bc6a --- /dev/null +++ b/docs/api/ui/vercel_ai.md @@ -0,0 +1,7 @@ +# `pydantic_ai.ui.vercel_ai` + +::: pydantic_ai.ui.vercel_ai + +::: pydantic_ai.ui.vercel_ai.request_types + +::: pydantic_ai.ui.vercel_ai.response_types diff --git a/docs/install.md b/docs/install.md index 0c042a0438..aafc21a5a5 100644 --- a/docs/install.md +++ b/docs/install.md @@ -40,37 +40,35 @@ pip/uv-add "pydantic-ai-slim[openai]" `pydantic-ai-slim` has the following optional groups: -* `logfire` — installs [`logfire`](logfire.md) [PyPI ↗](https://pypi.org/project/logfire){:target="_blank"} -* `evals` — installs [`pydantic-evals`](evals.md) [PyPI ↗](https://pypi.org/project/pydantic-evals){:target="_blank"} -* `openai` — installs `openai` [PyPI ↗](https://pypi.org/project/openai){:target="_blank"} -* `vertexai` — installs `google-auth` [PyPI ↗](https://pypi.org/project/google-auth){:target="_blank"} and `requests` [PyPI ↗](https://pypi.org/project/requests){:target="_blank"} -* `google` — installs `google-genai` [PyPI ↗](https://pypi.org/project/google-genai){:target="_blank"} -* `anthropic` — installs `anthropic` [PyPI ↗](https://pypi.org/project/anthropic){:target="_blank"} -* `groq` — installs `groq` [PyPI ↗](https://pypi.org/project/groq){:target="_blank"} -* `mistral` — installs `mistralai` [PyPI ↗](https://pypi.org/project/mistralai){:target="_blank"} -* `cohere` - installs `cohere` [PyPI ↗](https://pypi.org/project/cohere){:target="_blank"} -* `bedrock` - installs `boto3` [PyPI ↗](https://pypi.org/project/boto3){:target="_blank"} -* `huggingface` - installs `huggingface-hub[inference]` [PyPI ↗](https://pypi.org/project/huggingface-hub){:target="_blank"} -* `outlines-transformers` - installs `outlines[transformers]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} -* `outlines-llamacpp` - installs `outlines[llamacpp]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} -* `outlines-mlxlm` - installs `outlines[mlxlm]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} -* `outlines-sglang` - installs `outlines[sglang]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} -* `outlines-vllm-offline` - installs `outlines[vllm-offline]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} -* `duckduckgo` - installs `ddgs` [PyPI ↗](https://pypi.org/project/ddgs){:target="_blank"} -* `tavily` - installs `tavily-python` [PyPI ↗](https://pypi.org/project/tavily-python){:target="_blank"} -* `cli` - installs `rich` [PyPI ↗](https://pypi.org/project/rich){:target="_blank"}, `prompt-toolkit` [PyPI ↗](https://pypi.org/project/prompt-toolkit){:target="_blank"}, and `argcomplete` [PyPI ↗](https://pypi.org/project/argcomplete){:target="_blank"} -* `mcp` - installs `mcp` [PyPI ↗](https://pypi.org/project/mcp){:target="_blank"} -* `fastmcp` - installs `fastmcp` [PyPI ↗](https://pypi.org/project/fastmcp){:target="_blank"} -* `a2a` - installs `fasta2a` [PyPI ↗](https://pypi.org/project/fasta2a){:target="_blank"} -* `ui` - installs `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} -* `ag-ui` - installs `ag-ui-protocol` [PyPI ↗](https://pypi.org/project/ag-ui-protocol){:target="_blank"} and `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} -* `dbos` - installs [`dbos`](durable_execution/dbos.md) [PyPI ↗](https://pypi.org/project/dbos){:target="_blank"} -* `prefect` - installs [`prefect`](durable_execution/prefect.md) [PyPI ↗](https://pypi.org/project/prefect){:target="_blank"} - -See the [models](models/overview.md) documentation for information on which optional dependencies are required for each model. +* `logfire` — installs [Pydantic Logfire](logfire.md) dependency `logfire` [PyPI ↗](https://pypi.org/project/logfire){:target="_blank"} +* `evals` — installs [Pydantic Evals](evals.md) dependency `pydantic-evals` [PyPI ↗](https://pypi.org/project/pydantic-evals){:target="_blank"} +* `openai` — installs [OpenAI Model](models/openai.md) dependency `openai` [PyPI ↗](https://pypi.org/project/openai){:target="_blank"} +* `vertexai` — installs [Google Vertex AI Model](models/google.md#vertex-ai) dependencies `google-auth` [PyPI ↗](https://pypi.org/project/google-auth){:target="_blank"} and `requests` [PyPI ↗](https://pypi.org/project/requests){:target="_blank"} +* `google` — installs [Google Model](models/google.md) dependency `google-genai` [PyPI ↗](https://pypi.org/project/google-genai){:target="_blank"} +* `anthropic` — installs [Anthropic Model](models/anthropic.md) dependency `anthropic` [PyPI ↗](https://pypi.org/project/anthropic){:target="_blank"} +* `groq` — installs [Groq Model](models/groq.md) dependency `groq` [PyPI ↗](https://pypi.org/project/groq){:target="_blank"} +* `mistral` — installs [Mistral Model](models/mistral.md) dependency `mistralai` [PyPI ↗](https://pypi.org/project/mistralai){:target="_blank"} +* `cohere` - installs [Cohere Model](models/cohere.md) dependency `cohere` [PyPI ↗](https://pypi.org/project/cohere){:target="_blank"} +* `bedrock` - installs [Bedrock Model](models/bedrock.md) dependency `boto3` [PyPI ↗](https://pypi.org/project/boto3){:target="_blank"} +* `huggingface` - installs [Hugging Face Model](models/huggingface.md) dependency `huggingface-hub[inference]` [PyPI ↗](https://pypi.org/project/huggingface-hub){:target="_blank"} +* `outlines-transformers` - installs [Outlines Model](outlines.md) dependency `outlines[transformers]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} +* `outlines-llamacpp` - installs [Outlines Model](outlines.md) dependency `outlines[llamacpp]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} +* `outlines-mlxlm` - installs [Outlines Model](models/outlines.md) dependency `outlines[mlxlm]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} +* `outlines-sglang` - installs [Outlines Model](models/outlines.md) dependency `outlines[sglang]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} +* `outlines-vllm-offline` - installs [Outlines Model](outlines.md) dependencies `outlines` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} and `vllm` [PyPI ↗](https://pypi.org/project/vllm){:target="_blank"} +* `duckduckgo` - installs [DuckDuckGo Search Tool](common-tools.md#duckduckgo-search-tool) dependency `ddgs` [PyPI ↗](https://pypi.org/project/ddgs){:target="_blank"} +* `tavily` - installs [Tavily Search Tool](common-tools.md#tavily-search-tool) dependency `tavily-python` [PyPI ↗](https://pypi.org/project/tavily-python){:target="_blank"} +* `cli` - installs [CLI](cli.md) dependencies `rich` [PyPI ↗](https://pypi.org/project/rich){:target="_blank"}, `prompt-toolkit` [PyPI ↗](https://pypi.org/project/prompt-toolkit){:target="_blank"}, and `argcomplete` [PyPI ↗](https://pypi.org/project/argcomplete){:target="_blank"} +* `mcp` - installs [MCP](mcp/client.md) dependency `mcp` [PyPI ↗](https://pypi.org/project/mcp){:target="_blank"} +* `fastmcp` - installs [FastMCP](mcp/fastmcp-client.md) dependency `fastmcp` [PyPI ↗](https://pypi.org/project/fastmcp){:target="_blank"} +* `a2a` - installs [A2A](a2a.md) dependency `fasta2a` [PyPI ↗](https://pypi.org/project/fasta2a){:target="_blank"} +* `ui` - installs [UI Event Streams](ui/overview.md) dependency `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} +* `ag-ui` - installs [AG-UI Event Stream Protocol](ui/ag-ui.md) dependencies `ag-ui-protocol` [PyPI ↗](https://pypi.org/project/ag-ui-protocol){:target="_blank"} and `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"} +* `dbos` - installs [DBOS Durable Execution](durable_execution/dbos.md) dependency `dbos` [PyPI ↗](https://pypi.org/project/dbos){:target="_blank"} +* `prefect` - installs [Prefect Durable Execution](durable_execution/prefect.md) dependency `prefect` [PyPI ↗](https://pypi.org/project/prefect){:target="_blank"} You can also install dependencies for multiple models and use cases, for example: ```bash -pip/uv-add "pydantic-ai-slim[openai,vertexai,logfire]" +pip/uv-add "pydantic-ai-slim[openai,google,logfire]" ``` diff --git a/docs/ui/ag-ui.md b/docs/ui/ag-ui.md index 6cac32a775..1e85b64760 100644 --- a/docs/ui/ag-ui.md +++ b/docs/ui/ag-ui.md @@ -1,6 +1,6 @@ -# Agent User Interaction (AG-UI) Protocol +# Agent-User Interaction (AG-UI) Protocol -The [Agent User Interaction (AG-UI) Protocol](https://docs.ag-ui.com/introduction) is an open standard introduced by the +The [Agent-User Interaction (AG-UI) Protocol](https://docs.ag-ui.com/introduction) is an open standard introduced by the [CopilotKit](https://webflow.copilotkit.ai/blog/introducing-ag-ui-the-protocol-where-agents-meet-users) team that standardises how frontend applications communicate with AI agents, with support for streaming, frontend tools, shared state, and custom events. diff --git a/docs/ui/overview.md b/docs/ui/overview.md index 03f1f2b52b..869089affb 100644 --- a/docs/ui/overview.md +++ b/docs/ui/overview.md @@ -1,12 +1,12 @@ # UI Event Streams -If you're building a chat app or other interactive frontend for an AI agent, your backend will need to receive agent run input (like a chat message or full message history) from the frontend, and will need to stream the [agent's events](../agents.md#streaming-all-events) (like text, thinking, and tool calls) to the frontend so that the user knows what's happening in real time. +If you're building a chat app or other interactive frontend for an AI agent, your backend will need to receive agent run input (like a chat message or complete [message history](../message-history.md)) from the frontend, and will need to stream the [agent's events](../agents.md#streaming-all-events) (like text, thinking, and tool calls) to the frontend so that the user knows what's happening in real time. -While your frontend could use Pydantic AI's [`ModelRequest`s](../message-history.md) and [`AgentStreamEvent`s][pydantic_ai.messages.AgentStreamEvent] directly, you'll typically want to use a UI event stream protocol that's natively supported by your frontend framework. +While your frontend could use Pydantic AI's [`ModelRequest`][pydantic_ai.messages.ModelRequest] and [`AgentStreamEvent`][pydantic_ai.messages.AgentStreamEvent] directly, you'll typically want to use a UI event stream protocol that's natively supported by your frontend framework. Pydantic AI natively supports two UI event stream protocols: -- [Agent User Interaction (AG-UI) Protocol](./ag-ui.md) +- [Agent-User Interaction (AG-UI) Protocol](./ag-ui.md) - [Vercel AI Data Stream Protocol](./vercel-ai.md) These integrations are implemented as subclasses of the abstract [`UIAdapter`][pydantic_ai.ui.UIAdapter] class, so they also serve as a reference for integrating with other UI event stream protocols. @@ -15,7 +15,13 @@ These integrations are implemented as subclasses of the abstract [`UIAdapter`][p The protocol-specific [`UIAdapter`][pydantic_ai.ui.UIAdapter] subclass (i.e. [`AGUIAdapter`][pydantic_ai.ui.ag_ui.AGUIAdapter] or [`VercelAIAdapter`][pydantic_ai.ui.vercel_ai.VercelAIAdapter]) is responsible for transforming agent run input received from the frontend into arguments for [`Agent.run_stream_events()`](../agents.md#running-agents), running the agent, and then transforming Pydantic AI events into protocol-specific events. The event stream transformation is handled by a protocol-specific [`UIEventStream`][pydantic_ai.ui.UIEventStream] subclass, but you typically won't use this directly. -If you're using a Starlette-based web framework like FastAPI, you can use the [`UIAdapter.dispatch_request()`][pydantic_ai.ui.UIAdapter.dispatch_request] class method from an endpoint function to directly handle a request and return a streaming response of protocol-specific events. Besides the request, this method takes the agent and supports the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents), including `deps`. +If you're using a Starlette-based web framework like FastAPI, you can use the [`UIAdapter.dispatch_request()`][pydantic_ai.ui.UIAdapter.dispatch_request] class method from an endpoint function to directly handle a request and return a streaming response of protocol-specific events. This is demonstrated in the next section. + +If you're using a web framework not based on Starlette (e.g. Django or Flask) or need fine-grained control over the input or output, you can create a `UIAdapter` instance and directly use its methods. This is demonstrated in "Advanced Usage" section below. + +### Usage with Starlette/FastAPI + +Besides the request, [`UIAdapter.dispatch_request()`][pydantic_ai.ui.UIAdapter.dispatch_request] takes the agent, the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents), and an optional `on_complete` callback function that receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events. !!! note These examples use the `VercelAIAdapter`, but the same patterns apply to all `UIAdapter` subclasses. @@ -37,7 +43,16 @@ async def chat(request: Request) -> Response: return await VercelAIAdapter.dispatch_request(request, agent=agent) ``` -If you're using a web framework not based on Starlette (e.g. Django or Flask) or want fine-grained control over the input or output, you can create and use a `UIAdapter` instance directly. +### Advanced Usage + +If you're using a web framework not based on Starlette (e.g. Django or Flask) or need fine-grained control over the input or output, you can create a `UIAdapter` instance and directly use its methods, which can be chained to accomplish the same thing as the `UIAdapter.dispatch_request()` class method shown above: + +1. The [`UIAdapter.build_run_input()`][pydantic_ai.ui.UIAdapter.build_run_input] class method takes the request body as bytes and returns a protocol-specific run input object, which you can then pass to the [`UIAdapter()`][pydantic_ai.ui.UIAdapter] constructor along with the agent. + - You can also use the [`UIAdapter.from_request()`][pydantic_ai.ui.UIAdapter.from_request] class method to build an adapter directly from a Starlette/FastAPI request. +2. The [`UIAdapter.run_stream()`][pydantic_ai.ui.UIAdapter.run_stream] method runs the agent and returns a stream of protocol-specific events. It supports the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents) and an optional `on_complete` callback function that receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events. + - You can also use [`UIAdapter.run_stream_native()`][pydantic_ai.ui.UIAdapter.run_stream_native] to run the agent and return a stream of Pydantic AI events instead, which can then be transformed into protocol-specific events using [`UIAdapter.transform_stream()`][pydantic_ai.ui.UIAdapter.transform_stream]. +3. The [`UIAdapter.encode_stream()`][pydantic_ai.ui.UIAdapter.encode_stream] method encodes the stream of protocol-specific events as SSE (HTTP Server-Sent Events) strings, which you can then return as a streaming response. + - You can also use [`UIAdapter.streaming_response()`][pydantic_ai.ui.UIAdapter.streaming_response] to generate a Starlette/FastAPI streaming response directly from the protocol-specific event stream returned by `run_stream()`. !!! note This example uses FastAPI, but can be modified to work with any web framework. @@ -64,7 +79,7 @@ app = FastAPI() async def chat(request: Request) -> Response: accept = request.headers.get('accept', SSE_CONTENT_TYPE) try: - run_input = VercelAIAdapter.build_run_input(await request.body()) # (1) + run_input = VercelAIAdapter.build_run_input(await request.body()) except ValidationError as e: return Response( content=json.dumps(e.json()), @@ -73,12 +88,8 @@ async def chat(request: Request) -> Response: ) adapter = VercelAIAdapter(agent=agent, run_input=run_input, accept=accept) - event_stream = adapter.run_stream() # (2) + event_stream = adapter.run_stream() sse_event_stream = adapter.encode_stream(event_stream) - return StreamingResponse(sse_event_stream, media_type=accept) # (3) + return StreamingResponse(sse_event_stream, media_type=accept) ``` - -1. [`UIAdapter.build_run_input()`][pydantic_ai.ui.UIAdapter.build_run_input] takes the request body as bytes and returns a protocol-specific run input object. You can also use the [`UIAdapter.from_request()`][pydantic_ai.ui.UIAdapter.from_request] class method to build an adapter directly from a request. -2. [`UIAdapter.run_stream()`][pydantic_ai.ui.UIAdapter.run_stream] runs the agent and returns a stream of protocol-specific events. It supports the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents), including `deps`. You can also use [`UIAdapter.run_stream_native()`][pydantic_ai.ui.UIAdapter.run_stream_native] to run the agent and return a stream of Pydantic AI events instead, which can then be transformed into protocol-specific events using [`UIAdapter.transform_stream()`][pydantic_ai.ui.UIAdapter.transform_stream]. -3. [`UIAdapter.encode_stream()`][pydantic_ai.ui.UIAdapter.encode_stream] encodes the stream of protocol-specific events as strings according to the accept header value. You can also use [`UIAdapter.streaming_response()`][pydantic_ai.ui.UIAdapter.streaming_response] to generate a streaming response directly from the protocol-specific event stream returned by `run_stream()`. diff --git a/docs/ui/vercel-ai.md b/docs/ui/vercel-ai.md index e69de29bb2..3e9d4288b0 100644 --- a/docs/ui/vercel-ai.md +++ b/docs/ui/vercel-ai.md @@ -0,0 +1,83 @@ +# Vercel AI Data Stream Protocol + +Pydantic AI natively supports the [Vercel AI Data Stream Protocol](https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol) to receive agent run input from, and stream events to, a [Vercel AI Elements](https://ai-sdk.dev/elements) frontend. + +## Usage + +The [`VercelAIAdapter`][pydantic_ai.ui.vercel_ai.VercelAIAdapter] class is responsible for transforming agent run input received from the frontend into arguments for [`Agent.run_stream_events()`](../agents.md#running-agents), running the agent, and then transforming Pydantic AI events into Vercel AI events. The event stream transformation is handled by the [`VercelAIEventStream`][pydantic_ai.ui.vercel_ai.VercelAIEventStream] class, but you typically won't use this directly. + +If you're using a Starlette-based web framework like FastAPI, you can use the [`VercelAIAdapter.dispatch_request()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.dispatch_request] class method from an endpoint function to directly handle a request and return a streaming response of Vercel AI events. This is demonstrated in the next section. + +If you're using a web framework not based on Starlette (e.g. Django or Flask) or need fine-grained control over the input or output, you can create a `VercelAIAdapter` instance and directly use its methods. This is demonstrated in "Advanced Usage" section below. + +### Usage with Starlette/FastAPI + +Besides the request, [`VercelAIAdapter.dispatch_request()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.dispatch_request] takes the agent, the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents), and an optional `on_complete` callback function that receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional Vercel AI events. + +```py {title="dispatch_request.py"} +from fastapi import FastAPI +from starlette.requests import Request +from starlette.responses import Response + +from pydantic_ai import Agent +from pydantic_ai.ui.vercel_ai import VercelAIAdapter + +agent = Agent('openai:gpt-5') + +app = FastAPI() + +@app.post('/chat') +async def chat(request: Request) -> Response: + return await VercelAIAdapter.dispatch_request(request, agent=agent) +``` + +### Advanced Usage + +If you're using a web framework not based on Starlette (e.g. Django or Flask) or need fine-grained control over the input or output, you can create a `VercelAIAdapter` instance and directly use its methods, which can be chained to accomplish the same thing as the `VercelAIAdapter.dispatch_request()` class method shown above: + +1. The [`VercelAIAdapter.build_run_input()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.build_run_input] class method takes the request body as bytes and returns a Vercel AI [`RequestData`][pydantic_ai.ui.vercel_ai.request_types.RequestData] run input object, which you can then pass to the [`VercelAIAdapter()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter] constructor along with the agent. + - You can also use the [`VercelAIAdapter.from_request()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.from_request] class method to build an adapter directly from a Starlette/FastAPI request. +2. The [`VercelAIAdapter.run_stream()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.run_stream] method runs the agent and returns a stream of Vercel AI events. It supports the same optional arguments as [`Agent.run_stream_events()`](../agents.md#running-agents) and an optional `on_complete` callback function that receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional Vercel AI events. + - You can also use [`VercelAIAdapter.run_stream_native()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.run_stream_native] to run the agent and return a stream of Pydantic AI events instead, which can then be transformed into Vercel AI events using [`VercelAIAdapter.transform_stream()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.transform_stream]. +3. The [`VercelAIAdapter.encode_stream()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.encode_stream] method encodes the stream of Vercel AI events as SSE (HTTP Server-Sent Events) strings, which you can then return as a streaming response. + - You can also use [`VercelAIAdapter.streaming_response()`][pydantic_ai.ui.vercel_ai.VercelAIAdapter.streaming_response] to generate a Starlette/FastAPI streaming response directly from the Vercel AI event stream returned by `run_stream()`. + +!!! note + This example uses FastAPI, but can be modified to work with any web framework. + +```py {title="run_stream.py"} +import json +from http import HTTPStatus + +from fastapi import FastAPI +from fastapi.requests import Request +from fastapi.responses import Response, StreamingResponse +from pydantic import ValidationError + +from pydantic_ai import Agent +from pydantic_ai.ui import SSE_CONTENT_TYPE +from pydantic_ai.ui.vercel_ai import VercelAIAdapter + +agent = Agent('openai:gpt-5') + +app = FastAPI() + + +@app.post('/chat') +async def chat(request: Request) -> Response: + accept = request.headers.get('accept', SSE_CONTENT_TYPE) + try: + run_input = VercelAIAdapter.build_run_input(await request.body()) + except ValidationError as e: + return Response( + content=json.dumps(e.json()), + media_type='application/json', + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, + ) + + adapter = VercelAIAdapter(agent=agent, run_input=run_input, accept=accept) + event_stream = adapter.run_stream() + + sse_event_stream = adapter.encode_stream(event_stream) + return StreamingResponse(sse_event_stream, media_type=accept) +``` diff --git a/mkdocs.yml b/mkdocs.yml index 7439086641..1da603eda7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -126,7 +126,6 @@ nav: - API Reference: - pydantic_ai: - - api/ag_ui.md - api/agent.md - api/tools.md - api/toolsets.md @@ -163,7 +162,10 @@ nav: - api/providers.md - api/retries.md - api/run.md - - api/ui.md + - api/ag_ui.md + - api/ui/base.md + - api/ui/ag_ui.md + - api/ui/vercel_ai.md - pydantic_evals: - api/pydantic_evals/dataset.md - api/pydantic_evals/evaluators.md diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index c2167daebc..5c38860eec 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -1615,7 +1615,10 @@ class PartStartEvent: previous_part_kind: ( Literal['text', 'thinking', 'tool-call', 'builtin-tool-call', 'builtin-tool-return', 'file'] | None ) = None - """The kind of the previous part, if known.""" + """The kind of the previous part, if any. + + This is useful for UI event streams to know whether to group parts of the same kind together when emitting events. + """ event_kind: Literal['part_start'] = 'part_start' """Event type identifier, used as a discriminator.""" @@ -1652,7 +1655,10 @@ class PartEndEvent: next_part_kind: ( Literal['text', 'thinking', 'tool-call', 'builtin-tool-call', 'builtin-tool-return', 'file'] | None ) = None - """The kind of the next part, if known.""" + """The kind of the next part, if any. + + This is useful for UI event streams to know whether to group parts of the same kind together when emitting events. + """ event_kind: Literal['part_end'] = 'part_end' """Event type identifier, used as a discriminator.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/__init__.py index 0ea1826eb7..3caca97083 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/__init__.py @@ -1,9 +1,3 @@ -"""Base classes for UI event stream protocols. - -This module provides abstract base classes for implementing UI event stream adapters -that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). -""" - from __future__ import annotations from ._adapter import StateDeps, StateHandler, UIAdapter diff --git a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py index 877cac5606..b274cfd78e 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py @@ -1,9 +1,3 @@ -"""Base classes for UI event stream protocols. - -This module provides abstract base classes for implementing UI event stream adapters -that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). -""" - from __future__ import annotations from abc import ABC, abstractmethod @@ -107,7 +101,12 @@ class StateDeps(Generic[StateT]): @dataclass class UIAdapter(ABC, Generic[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]): - """TODO (DouweM): Docstring.""" + """Base class for UI adapters. + + This class is responsible for transforming agent run input received from the frontend into arguments for [`Agent.run_stream_events()`][pydantic_ai.Agent.run_stream_events], running the agent, and then transforming Pydantic AI events into protocol-specific events. + + The event stream transformation is handled by a protocol-specific [`UIEventStream`][pydantic_ai.ui.UIEventStream] subclass. + """ agent: AbstractAgent[AgentDepsT, OutputDataT] """The Pydantic AI agent to run.""" @@ -118,13 +117,13 @@ class UIAdapter(ABC, Generic[RunInputT, MessageT, EventT, AgentDepsT, OutputData _: KW_ONLY accept: str | None = None - """The accept header value.""" + """The `Accept` header value of the request, used to determine how to encode the protocol-specific events for the streaming response.""" @classmethod async def from_request( cls, request: Request, *, agent: AbstractAgent[AgentDepsT, OutputDataT] ) -> UIAdapter[RunInputT, MessageT, EventT, AgentDepsT, OutputDataT]: - """Create an adapter from a protocol-specific run input.""" + """Create an adapter from a request.""" return cls( agent=agent, run_input=cls.build_run_input(await request.body()), @@ -134,18 +133,18 @@ async def from_request( @classmethod @abstractmethod def build_run_input(cls, body: bytes) -> RunInputT: - """Validate the request body and return the protocol-specific run input object.""" + """Build a protocol-specific run input object from the request body.""" raise NotImplementedError @classmethod @abstractmethod def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: - """Convert protocol-specific messages into Pydantic AI messages.""" + """Transform protocol-specific messages into Pydantic AI messages.""" raise NotImplementedError @abstractmethod def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, OutputDataT]: - """Create a protocol-specific event stream.""" + """Build a protocol-specific event stream.""" raise NotImplementedError @cached_property @@ -161,7 +160,7 @@ def toolset(self) -> AbstractToolset[AgentDepsT] | None: @cached_property def state(self) -> dict[str, Any] | None: - """Run state from the protocol-specific run input.""" + """Frontend state from the protocol-specific run input.""" return None def transform_stream( @@ -174,11 +173,12 @@ def transform_stream( Args: stream: The stream of Pydantic AI events to transform. on_complete: Optional callback function called when the agent run completes successfully. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events. """ return self.build_event_stream().transform_stream(stream, on_complete=on_complete) def encode_stream(self, stream: AsyncIterator[EventT]) -> AsyncIterator[str]: - """Encode a stream of protocol-specific events as strings according to the accept header value. + """Encode a stream of protocol-specific events as strings according to the `Accept` header value. Args: stream: The stream of protocol-specific events to encode. @@ -190,9 +190,6 @@ def streaming_response(self, stream: AsyncIterator[EventT]) -> StreamingResponse Args: stream: The stream of protocol-specific events to encode. - - Returns: - A streaming Starlette response with encoded protocol-specific events. """ return self.build_event_stream().streaming_response(stream) @@ -293,7 +290,7 @@ def run_stream( toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools to use for this run. on_complete: Optional callback function called when the agent run completes successfully. - The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events. """ return self.transform_stream( self.run_stream_native( @@ -331,7 +328,7 @@ async def dispatch_request( builtin_tools: Sequence[AbstractBuiltinTool] | None = None, on_complete: OnCompleteFunc[EventT] | None = None, ) -> Response: - """Handle an protocol-specific HTTP request by running the agent and return a streaming response of protocol-specific events. + """Handle a protocol-specific HTTP request by running the agent and returning a streaming response of protocol-specific events. Args: request: The incoming Starlette/FastAPI request. @@ -349,10 +346,10 @@ async def dispatch_request( toolsets: Optional additional toolsets for this run. builtin_tools: Optional additional builtin tools to use for this run. on_complete: Optional callback function called when the agent run completes successfully. - The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can access `all_messages()` and other result data. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events. Returns: - A streaming Starlette response with protocol-specific events encoded per the request's accept header value. + A streaming Starlette response with protocol-specific events encoded per the request's `Accept` header value. """ try: from starlette.responses import Response diff --git a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py index 5ea65745fc..aa5d73c6ff 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py @@ -1,9 +1,3 @@ -"""Base classes for UI event stream protocols. - -This module provides abstract base classes for implementing UI event stream adapters -that transform Pydantic AI agent events into protocol-specific events (e.g., AG-UI, Vercel AI). -""" - from __future__ import annotations import inspect @@ -44,10 +38,6 @@ from starlette.responses import StreamingResponse -__all__ = [ - 'UIEventStream', -] - SSE_CONTENT_TYPE = 'text/event-stream' """Content type header value for Server-Sent Events (SSE).""" @@ -65,19 +55,23 @@ | Callable[[AgentRunResult[Any]], Awaitable[None]] | Callable[[AgentRunResult[Any]], AsyncIterator[EventT]] ) -"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync, async, or an async generator of protocol-specificevents.""" +"""Callback function type that receives the `AgentRunResult` of the completed run. Can be sync, async, or an async generator of protocol-specific events.""" @dataclass class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]): - """TODO (DouweM): Docstring.""" + """Base class for UI event stream transformers. + + This class is responsible for transforming Pydantic AI events into protocol-specific events. + """ run_input: RunInputT accept: str | None = None - """TODO (DouweM): Docstring""" + """The `Accept` header value of the request, used to determine how to encode the protocol-specific events for the streaming response.""" message_id: str = field(default_factory=lambda: str(uuid4())) + """The message ID to use for the next event.""" _turn: Literal['request', 'response'] | None = None @@ -85,11 +79,7 @@ class UIEventStream(ABC, Generic[RunInputT, EventT, AgentDepsT, OutputDataT]): _final_result_event: FinalResultEvent | None = None def new_message_id(self) -> str: - """Generate and store a new message ID. - - Returns: - A new UUID-based message ID. - """ + """Generate and store a new message ID.""" self.message_id = str(uuid4()) return self.message_id @@ -100,40 +90,25 @@ def response_headers(self) -> Mapping[str, str] | None: @property def content_type(self) -> str: - """Get the content type for the event stream, compatible with the accept header value. + """Get the content type for the event stream, compatible with the `Accept` header value. - By default, this returns the SSE content type (`text/event-stream`). - If a subclass supports other types as well, it should consider `self.accept` in `encode_event` and return the resulting content type here. + By default, this returns the Server-Sent Events content type (`text/event-stream`). + If a subclass supports other types as well, it should consider `self.accept` in [`encode_event()`][pydantic_ai.ui.UIEventStream.encode_event] and return the resulting content type. """ return SSE_CONTENT_TYPE @abstractmethod def encode_event(self, event: EventT) -> str: - """Encode an event as a string. - - Args: - event: The event to encode. - """ + """Encode a protocol-specific event as a string.""" raise NotImplementedError async def encode_stream(self, stream: AsyncIterator[EventT]) -> AsyncIterator[str]: - """Encode a stream of events as SSE strings. - - Args: - stream: The stream of events to encode. - """ + """Encode a stream of protocol-specific events as strings according to the `Accept` header value.""" async for event in stream: yield self.encode_event(event) def streaming_response(self, stream: AsyncIterator[EventT]) -> StreamingResponse: - """Generate a streaming response from a stream of encoded protocol-specific events. - - Args: - stream: The stream of protocol-specific events to encode. - - Returns: - A streaming Starlette response with encoded protocol-specific events. - """ + """Generate a streaming response from a stream of protocol-specific events.""" try: from starlette.responses import StreamingResponse except ImportError as e: # pragma: no cover @@ -151,14 +126,22 @@ def streaming_response(self, stream: AsyncIterator[EventT]) -> StreamingResponse async def transform_stream( # noqa: C901 self, stream: AsyncIterator[NativeEvent], on_complete: OnCompleteFunc[EventT] | None = None ) -> AsyncIterator[EventT]: - """Handle a stream of agent events. + """Transform a stream of Pydantic AI events into protocol-specific events. + + This method dispatches to specific hooks and `handle_*` methods that subclasses can override: + - [`before_stream()`][pydantic_ai.ui.UIEventStream.before_stream] + - [`after_stream()`][pydantic_ai.ui.UIEventStream.after_stream] + - [`on_error()`][pydantic_ai.ui.UIEventStream.on_error] + - [`before_request()`][pydantic_ai.ui.UIEventStream.before_request] + - [`after_request()`][pydantic_ai.ui.UIEventStream.after_request] + - [`before_response()`][pydantic_ai.ui.UIEventStream.before_response] + - [`after_response()`][pydantic_ai.ui.UIEventStream.after_response] + - [`handle_event()`][pydantic_ai.ui.UIEventStream.handle_event] Args: - stream: The stream of agent events to handle. + stream: The stream of Pydantic AI events to transform. on_complete: Optional callback function called when the agent run completes successfully. - - Yields: - Protocol-specific events. + The callback receives the completed [`AgentRunResult`][pydantic_ai.agent.AgentRunResult] and can optionally yield additional protocol-specific events. """ async for e in self.before_stream(): yield e @@ -181,6 +164,7 @@ async def transform_stream( # noqa: C901 yield e self._final_result_event = None + # Ensure the stream does not end on a dangling tool call without a result. output_tool_result_event = FunctionToolResultEvent( result=ToolReturnPart( tool_call_id=tool_call_id, @@ -209,7 +193,7 @@ async def transform_stream( # noqa: C901 self._final_result_event = event if isinstance(event, BuiltinToolCallEvent | BuiltinToolResultEvent): # pyright: ignore[reportDeprecated] - # The events were deprecated before this feature was introduced + # These events were deprecated before this feature was introduced continue async for e in self.handle_event(event): @@ -225,12 +209,7 @@ async def transform_stream( # noqa: C901 yield e async def _turn_to(self, to_turn: Literal['request', 'response'] | None) -> AsyncIterator[EventT]: - """Handle a turn. - - Args: - from_turn: The turn to start from. - to_turn: The turn to end at. - """ + """Fire hooks when turning from request to response or vice versa.""" if to_turn == self._turn: return @@ -251,16 +230,20 @@ async def _turn_to(self, to_turn: Literal['request', 'response'] | None) -> Asyn yield e async def handle_event(self, event: NativeEvent) -> AsyncIterator[EventT]: - """Transform a Pydantic AI agent event into protocol-specific events. + """Transform a Pydantic AI event into one or more protocol-specific events. - This method dispatches to specific `handle_*` methods based on event and part type. - Subclasses should implement the individual handler methods rather than overriding this. + This method dispatches to specific `handle_*` methods based on event type: - Args: - event: The Pydantic AI agent event to transform. + - [`PartStartEvent`][pydantic_ai.messages.PartStartEvent] -> [`handle_part_start()`][pydantic_ai.ui.UIEventStream.handle_part_start] + - [`PartDeltaEvent`][pydantic_ai.messages.PartDeltaEvent] -> `handle_part_delta` + - [`PartEndEvent`][pydantic_ai.messages.PartEndEvent] -> `handle_part_end` + - [`FinalResultEvent`][pydantic_ai.messages.FinalResultEvent] -> `handle_final_result` + - [`FunctionToolCallEvent`][pydantic_ai.messages.FunctionToolCallEvent] -> `handle_function_tool_call` + - [`FunctionToolResultEvent`][pydantic_ai.messages.FunctionToolResultEvent] -> `handle_function_tool_result` + - [`AgentRunResultEvent`][pydantic_ai.messages.AgentRunResultEvent] -> `handle_run_result` - Yields: - Protocol-specific events. + Subclasses are encouraged to override the individual `handle_*` methods rather than this one. + If you need specific behavior for all events, make sure you call the super method. """ match event: case PartStartEvent(): @@ -288,10 +271,22 @@ async def handle_event(self, event: NativeEvent) -> AsyncIterator[EventT]: pass async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT]: - """Handle a PartStartEvent. + """Handle a `PartStartEvent`. + + This method dispatches to specific `handle_*` methods based on part type: + + - [`TextPart`][pydantic_ai.messages.TextPart] -> [`handle_text_start()`][pydantic_ai.ui.UIEventStream.handle_text_start] + - [`ThinkingPart`][pydantic_ai.messages.ThinkingPart] -> [`handle_thinking_start()`][pydantic_ai.ui.UIEventStream.handle_thinking_start] + - [`ToolCallPart`][pydantic_ai.messages.ToolCallPart] -> [`handle_tool_call_start()`][pydantic_ai.ui.UIEventStream.handle_tool_call_start] + - [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] -> [`handle_builtin_tool_call_start()`][pydantic_ai.ui.UIEventStream.handle_builtin_tool_call_start] + - [`BuiltinToolReturnPart`][pydantic_ai.messages.BuiltinToolReturnPart] -> [`handle_builtin_tool_return()`][pydantic_ai.ui.UIEventStream.handle_builtin_tool_return] + - [`FilePart`][pydantic_ai.messages.FilePart] -> [`handle_file()`][pydantic_ai.ui.UIEventStream.handle_file] + + Subclasses are encouraged to override the individual `handle_*` methods rather than this one. + If you need specific behavior for all part start events, make sure you call the super method. Args: - event: The PartStartEvent. + event: The part start event. """ part = event.part previous_part_kind = event.previous_part_kind @@ -318,6 +313,15 @@ async def handle_part_start(self, event: PartStartEvent) -> AsyncIterator[EventT async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[EventT]: """Handle a PartDeltaEvent. + This method dispatches to specific `handle_*_delta` methods based on part delta type: + + - [`TextPartDelta`][pydantic_ai.messages.TextPartDelta] -> [`handle_text_delta()`][pydantic_ai.ui.UIEventStream.handle_text_delta] + - [`ThinkingPartDelta`][pydantic_ai.messages.ThinkingPartDelta] -> [`handle_thinking_delta()`][pydantic_ai.ui.UIEventStream.handle_thinking_delta] + - [`ToolCallPartDelta`][pydantic_ai.messages.ToolCallPartDelta] -> [`handle_tool_call_delta()`][pydantic_ai.ui.UIEventStream.handle_tool_call_delta] + + Subclasses are encouraged to override the individual `handle_*_delta` methods rather than this one. + If you need specific behavior for all part delta events, make sure you call the super method. + Args: event: The PartDeltaEvent. """ @@ -334,10 +338,20 @@ async def handle_part_delta(self, event: PartDeltaEvent) -> AsyncIterator[EventT yield e async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[EventT]: - """Handle a PartEndEvent. + """Handle a `PartEndEvent`. + + This method dispatches to specific `handle_*_end` methods based on part type: + + - [`TextPart`][pydantic_ai.messages.TextPart] -> [`handle_text_end()`][pydantic_ai.ui.UIEventStream.handle_text_end] + - [`ThinkingPart`][pydantic_ai.messages.ThinkingPart] -> [`handle_thinking_end()`][pydantic_ai.ui.UIEventStream.handle_thinking_end] + - [`ToolCallPart`][pydantic_ai.messages.ToolCallPart] -> [`handle_tool_call_end()`][pydantic_ai.ui.UIEventStream.handle_tool_call_end] + - [`BuiltinToolCallPart`][pydantic_ai.messages.BuiltinToolCallPart] -> [`handle_builtin_tool_call_end()`][pydantic_ai.ui.UIEventStream.handle_builtin_tool_call_end] + + Subclasses are encouraged to override the individual `handle_*_end` methods rather than this one. + If you need specific behavior for all part end events, make sure you call the super method. Args: - event: The PartEndEvent. + event: The part end event. """ part = event.part next_part_kind = event.next_part_kind @@ -358,57 +372,109 @@ async def handle_part_end(self, event: PartEndEvent) -> AsyncIterator[EventT]: # These don't have deltas, so they don't need to be ended. pass - async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[EventT]: - """Handle a TextPart at start. + async def before_stream(self) -> AsyncIterator[EventT]: + """Yield events before agent streaming starts. + + This hook is called before any agent events are processed. + Override this to inject custom events at the start of the stream. + """ + return # pragma: no cover + yield # Make this an async generator + + async def after_stream(self) -> AsyncIterator[EventT]: + """Yield events after agent streaming completes. + + This hook is called after all agent events have been processed. + Override this to inject custom events at the end of the stream. + """ + return # pragma: no cover + yield # Make this an async generator + + async def on_error(self, error: Exception) -> AsyncIterator[EventT]: + """Handle errors that occur during streaming. Args: - part: The TextPart. - follows_text: Whether the part follows a text part. + error: The error that occurred during streaming. + """ + return # pragma: no cover + yield # Make this an async generator + + async def before_request(self) -> AsyncIterator[EventT]: + """Yield events before a model request is processed. + + Override this to inject custom events at the start of the request. + """ + return + yield # Make this an async generator + + async def after_request(self) -> AsyncIterator[EventT]: + """Yield events after a model request is processed. + + Override this to inject custom events at the end of the request. + """ + return + yield # Make this an async generator + + async def before_response(self) -> AsyncIterator[EventT]: + """Yield events before a model response is processed. - Yields: - Protocol-specific events. + Override this to inject custom events at the start of the response. + """ + return + yield # Make this an async generator + + async def after_response(self) -> AsyncIterator[EventT]: + """Yield events after a model response is processed. + + Override this to inject custom events at the end of the response. + """ + return + yield # Make this an async generator + + async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[EventT]: + """Handle the start of a `TextPart`. + + Args: + part: The text part. + follows_text: Whether the part is directly preceded by another text part. In this case, you may want to yield a "text-delta" event instead of a "text-start" event. """ return # pragma: no cover yield # Make this an async generator async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[EventT]: - """Handle a TextPartDelta. + """Handle a `TextPartDelta`. Args: - delta: The TextPartDelta. - - Yields: - Protocol-specific events. + delta: The text part delta. """ return # pragma: no cover yield # Make this an async generator async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[EventT]: - """Handle the end of a TextPart.""" + """Handle the end of a `TextPart`. + + Args: + part: The text part. + followed_by_text: Whether the part is directly followed by another text part. In this case, you may not want to yield a "text-end" event yet. + """ return # pragma: no cover yield # Make this an async generator async def handle_thinking_start(self, part: ThinkingPart, follows_thinking: bool = False) -> AsyncIterator[EventT]: - """Handle a ThinkingPart at start. + """Handle the start of a `ThinkingPart`. Args: - part: The ThinkingPart. - follows_thinking: Whether the part follows a thinking part. - - Yields: - Protocol-specific events. + part: The thinking part. + follows_thinking: Whether the part is directly preceded by another thinking part. In this case, you may want to yield a "thinking-delta" event instead of a "thinking-start" event. """ return # pragma: no cover yield # Make this an async generator async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[EventT]: - """Handle a ThinkingPartDelta. + """Handle a `ThinkingPartDelta`. Args: - delta: The ThinkingPartDelta. - - Yields: - Protocol-specific events. + delta: The thinking part delta. """ return # pragma: no cover yield # Make this an async generator @@ -416,182 +482,110 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[EventT]: - """Handle the end of a ThinkingPart.""" + """Handle the end of a `ThinkingPart`. + + Args: + part: The thinking part. + followed_by_thinking: Whether the part is directly followed by another thinking part. In this case, you may not want to yield a "thinking-end" event yet. + """ return # pragma: no cover yield # Make this an async generator async def handle_tool_call_start(self, part: ToolCallPart) -> AsyncIterator[EventT]: - """Handle a ToolCallPart at start. + """Handle the start of a `ToolCallPart`. Args: part: The tool call part. - - Yields: - Protocol-specific events. """ return # pragma: no cover yield # Make this an async generator async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[EventT]: - """Handle a ToolCallPartDelta. + """Handle a `ToolCallPartDelta`. Args: - delta: The ToolCallPartDelta. - - Yields: - Protocol-specific events. + delta: The tool call part delta. """ return # pragma: no cover yield # Make this an async generator async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[EventT]: - """Handle the end of a ToolCallPart.""" + """Handle the end of a `ToolCallPart`. + + Args: + part: The tool call part. + """ return # pragma: no cover yield # Make this an async generator async def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: - """Handle a BuiltinToolCallPart at start. + """Handle a `BuiltinToolCallPart` at start. Args: - part: The tool call part. - - Yields: - Protocol-specific events. + part: The builtin tool call part. """ return # pragma: no cover yield # Make this an async generator async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[EventT]: - """Handle the end of a BuiltinToolCallPart.""" + """Handle the end of a `BuiltinToolCallPart`. + + Args: + part: The builtin tool call part. + """ return # pragma: no cover yield # Make this an async generator async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[EventT]: - """Handle a BuiltinToolReturnPart. + """Handle a `BuiltinToolReturnPart`. Args: - part: The BuiltinToolReturnPart. - - Yields: - Protocol-specific events. + part: The builtin tool return part. """ return # pragma: no cover yield # Make this an async generator async def handle_file(self, part: FilePart) -> AsyncIterator[EventT]: - """Handle a FilePart. + """Handle a `FilePart`. Args: - part: The FilePart. - - Yields: - Protocol-specific events. + part: The file part. """ return # pragma: no cover yield # Make this an async generator async def handle_final_result(self, event: FinalResultEvent) -> AsyncIterator[EventT]: - """Handle a FinalResultEvent. + """Handle a `FinalResultEvent`. Args: event: The final result event. - - Yields: - Protocol-specific events. """ return yield # Make this an async generator async def handle_function_tool_call(self, event: FunctionToolCallEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolCallEvent. + """Handle a `FunctionToolCallEvent`. Args: event: The function tool call event. - - Yields: - Protocol-specific events. """ return yield # Make this an async generator async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[EventT]: - """Handle a FunctionToolResultEvent. + """Handle a `FunctionToolResultEvent`. Args: event: The function tool result event. - - Yields: - Protocol-specific events. """ return # pragma: no cover yield # Make this an async generator async def handle_run_result(self, event: AgentRunResultEvent) -> AsyncIterator[EventT]: - """Handle an AgentRunResultEvent (final event with result/usage). + """Handle an `AgentRunResultEvent`. Args: event: The agent run result event. - - Yields: - Protocol-specific events. """ return yield # Make this an async generator - - # Lifecycle hooks (optional overrides) - - async def before_request(self) -> AsyncIterator[EventT]: - """Handle a request before it is processed.""" - return - yield # Make this an async generator - - async def after_request(self) -> AsyncIterator[EventT]: - """Handle a request after it is processed.""" - return - yield # Make this an async generator - - async def before_response(self) -> AsyncIterator[EventT]: - """Handle a response before it is processed.""" - return - yield # Make this an async generator - - async def after_response(self) -> AsyncIterator[EventT]: - """Handle a response after it is processed.""" - return - yield # Make this an async generator - - async def before_stream(self) -> AsyncIterator[EventT]: - """Yield events before agent streaming starts. - - This hook is called before any agent events are processed. - Override this to inject custom events at the start of the stream. - - Yields: - Protocol-specific events to emit before streaming. - """ - return # pragma: no cover - yield # Make this an async generator - - async def after_stream(self) -> AsyncIterator[EventT]: - """Yield events after agent streaming completes. - - This hook is called after all agent events have been processed. - Override this to inject custom events at the end of the stream. - - Yields: - Protocol-specific events to emit after streaming. - """ - return # pragma: no cover - yield # Make this an async generator - - async def on_error(self, error: Exception) -> AsyncIterator[EventT]: - """Handle errors that occur during streaming (after stream has started). - - Args: - error: The error that occurred during streaming. - - Yields: - Protocol-specific error events. - """ - return # pragma: no cover - yield # Make this an async generator diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 546074175b..6d5a9e5560 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -81,37 +81,31 @@ def label(self) -> str: class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, OutputDataT]): - """TODO (DouweM): Docstring.""" + """UI adapter for the Agent-User Interaction (AG-UI) protocol.""" @classmethod def build_run_input(cls, body: bytes) -> RunAgentInput: - """Validate the request body and return the validated run input.""" return RunAgentInput.model_validate_json(body) def build_event_stream(self) -> UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: - """Create an event stream for the adapter.""" return AGUIEventStream(self.run_input, accept=self.accept) @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: - """Get the toolset for the adapter.""" if self.run_input.tools: return _AGUIFrontendToolset[AgentDepsT](self.run_input.tools) return None @cached_property def state(self) -> dict[str, Any] | None: - """Get the state of the agent run.""" return self.run_input.state @cached_property def messages(self) -> list[ModelMessage]: - """Convert AG-UI messages to Pydantic AI messages.""" return self.load_messages(self.run_input.messages) @classmethod def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: - """Load messages from the request and return the loaded messages.""" builder = MessagesBuilder() tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py index 55d683dfe5..0a6f354abf 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_event_stream.py @@ -69,7 +69,7 @@ @dataclass class AGUIEventStream(UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]): - """TODO (DouweM): Docstring.""" + """UI event stream transformer for the Agent-User Interaction (AG-UI) protocol.""" _thinking_text: bool = False _builtin_tool_call_ids: dict[str, str] = field(default_factory=dict) @@ -81,29 +81,18 @@ def _event_encoder(self) -> EventEncoder: @property def content_type(self) -> str: - """Get the content type for the event stream, compatible with the accept header value.""" return self._event_encoder.get_content_type() def encode_event(self, event: BaseEvent) -> str: - """Encode an AG-UI event as SSE. - - Args: - event: The AG-UI event to encode. - - Returns: - The SSE-formatted string. - """ return self._event_encoder.encode(event) async def before_stream(self) -> AsyncIterator[BaseEvent]: - """Yield events before agent streaming starts.""" yield RunStartedEvent( thread_id=self.run_input.thread_id, run_id=self.run_input.run_id, ) async def after_stream(self) -> AsyncIterator[BaseEvent]: - """Handle an AgentRunResultEvent, cleaning up any pending state.""" if not self._error: yield RunFinishedEvent( thread_id=self.run_input.thread_id, @@ -111,12 +100,10 @@ async def after_stream(self) -> AsyncIterator[BaseEvent]: ) async def on_error(self, error: Exception) -> AsyncIterator[BaseEvent]: - """Handle errors during streaming.""" self._error = True yield RunErrorEvent(message=str(error)) async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseEvent]: - """Handle a TextPart at start.""" if follows_text: message_id = self.message_id else: @@ -127,19 +114,16 @@ async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> yield TextMessageContentEvent(message_id=message_id, delta=part.content) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a TextPartDelta.""" if delta.content_delta: # pragma: no branch yield TextMessageContentEvent(message_id=self.message_id, delta=delta.content_delta) async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseEvent]: - """Handle a TextPart at end.""" if not followed_by_text: yield TextMessageEndEvent(message_id=self.message_id) async def handle_thinking_start( self, part: ThinkingPart, follows_thinking: bool = False ) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPart at start.""" if not follows_thinking: yield ThinkingStartEvent(type=EventType.THINKING_START) @@ -149,7 +133,6 @@ async def handle_thinking_start( self._thinking_text = True async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPartDelta.""" if not delta.content_delta: return # pragma: no cover @@ -162,7 +145,6 @@ async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[BaseEvent]: - """Handle a ThinkingPart at end.""" if self._thinking_text: yield ThinkingTextMessageEndEvent(type=EventType.THINKING_TEXT_MESSAGE_END) self._thinking_text = False @@ -171,11 +153,9 @@ async def handle_thinking_end( yield ThinkingEndEvent(type=EventType.THINKING_END) def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" return self._handle_tool_call_start(part) def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a BuiltinToolCallPart at start.""" tool_call_id = part.tool_call_id builtin_tool_call_id = '|'.join([BUILTIN_TOOL_CALL_ID_PREFIX, part.provider_name or '', tool_call_id]) self._builtin_tool_call_ids[tool_call_id] = builtin_tool_call_id @@ -186,7 +166,6 @@ def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIter async def _handle_tool_call_start( self, part: ToolCallPart | BuiltinToolCallPart, tool_call_id: str | None = None ) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" tool_call_id = tool_call_id or part.tool_call_id message_id = self.message_id or self.new_message_id() @@ -195,7 +174,6 @@ async def _handle_tool_call_start( yield ToolCallArgsEvent(tool_call_id=tool_call_id, delta=part.args_as_json_str()) async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' if tool_call_id in self._builtin_tool_call_ids: @@ -206,15 +184,12 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato ) async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a ToolCallPart at end.""" yield ToolCallEndEvent(tool_call_id=part.tool_call_id) async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseEvent]: - """Handle a BuiltinToolCallPart at end.""" yield ToolCallEndEvent(tool_call_id=self._builtin_tool_call_ids[part.tool_call_id]) async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseEvent]: - """Handle a BuiltinToolReturnPart.""" tool_call_id = self._builtin_tool_call_ids[part.tool_call_id] yield ToolCallResultEvent( message_id=self.new_message_id(), @@ -225,7 +200,6 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async ) async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseEvent]: - """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result output = result.model_response() if isinstance(result, RetryPromptPart) else result.model_response_str() diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py index fffb1bc843..2d9cb434f0 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/app.py @@ -121,7 +121,7 @@ def __init__( async def run_agent(request: Request) -> Response: """Endpoint to run the agent with the provided input data.""" - return await AGUIAdapter.dispatch_request( + return await AGUIAdapter[AgentDepsT, OutputDataT].dispatch_request( request, agent=agent, output_type=output_type, diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py index efb5a4b7d6..40c9210de2 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/__init__.py @@ -9,80 +9,8 @@ from ._adapter import VercelAIAdapter from ._event_stream import VercelAIEventStream -from ._request_types import ( - RequestData, - SubmitMessage, - TextUIPart, - ToolOutputAvailablePart, - UIMessage, - UIMessagePart, - request_data_ta, -) -from ._response_types import ( - AbortChunk, - BaseChunk, - DataChunk, - ErrorChunk, - FileChunk, - FinishChunk, - FinishStepChunk, - MessageMetadataChunk, - ReasoningDeltaChunk, - ReasoningEndChunk, - ReasoningStartChunk, - SourceDocumentChunk, - SourceUrlChunk, - StartChunk, - StartStepChunk, - TextDeltaChunk, - TextEndChunk, - TextStartChunk, - ToolInputAvailableChunk, - ToolInputDeltaChunk, - ToolInputErrorChunk, - ToolInputStartChunk, - ToolOutputAvailableChunk, - ToolOutputErrorChunk, -) -from ._utils import ProviderMetadata __all__ = [ - # Utilities - 'ProviderMetadata', - # Request types - 'RequestData', - 'TextUIPart', - 'UIMessage', - 'ToolOutputAvailablePart', - 'UIMessagePart', - 'SubmitMessage', - 'request_data_ta', - # Response types - 'BaseChunk', - 'TextStartChunk', - 'TextDeltaChunk', - 'TextEndChunk', - 'ReasoningStartChunk', - 'ReasoningDeltaChunk', - 'ReasoningEndChunk', - 'ErrorChunk', - 'ToolInputStartChunk', - 'ToolInputDeltaChunk', - 'ToolInputAvailableChunk', - 'ToolInputErrorChunk', - 'ToolOutputAvailableChunk', - 'ToolOutputErrorChunk', - 'SourceUrlChunk', - 'SourceDocumentChunk', - 'FileChunk', - 'DataChunk', - 'StartStepChunk', - 'FinishStepChunk', - 'StartChunk', - 'FinishChunk', - 'AbortChunk', - 'MessageMetadataChunk', - # Event stream and adapter 'VercelAIEventStream', 'VercelAIAdapter', ] diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index c226e7434e..73e2f56fe7 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -7,6 +7,7 @@ from functools import cached_property from typing import TYPE_CHECKING +from pydantic import TypeAdapter from typing_extensions import assert_never from ...messages import ( @@ -32,7 +33,7 @@ from ...tools import AgentDepsT from .. import MessagesBuilder, UIAdapter, UIEventStream from ._event_stream import VercelAIEventStream -from ._request_types import ( +from .request_types import ( DataUIPart, DynamicToolUIPart, FileUIPart, @@ -46,9 +47,8 @@ ToolOutputErrorPart, ToolUIPart, UIMessage, - request_data_ta, ) -from ._response_types import BaseChunk +from .response_types import BaseChunk if TYPE_CHECKING: pass @@ -56,14 +56,15 @@ __all__ = ['VercelAIAdapter'] +request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) + @dataclass class VercelAIAdapter(UIAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, OutputDataT]): - """TODO (DouweM): Docstring.""" + """UI adapter for the Vercel AI protocol.""" @classmethod def build_run_input(cls, body: bytes) -> RequestData: - """Validate a Vercel AI request.""" return request_data_ta.validate_json(body) def build_event_stream(self) -> UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: @@ -71,12 +72,10 @@ def build_event_stream(self) -> UIEventStream[RequestData, BaseChunk, AgentDepsT @cached_property def messages(self) -> list[ModelMessage]: - """Convert Vercel AI protocol messages to Pydantic AI messages.""" return self.load_messages(self.run_input.messages) @classmethod def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # noqa: C901 - """Load messages from the request and return the loaded messages.""" builder = MessagesBuilder() for msg in messages: diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py index 2b1386a00c..b3a0e79f5c 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_event_stream.py @@ -24,8 +24,8 @@ from ...output import OutputDataT from ...tools import AgentDepsT from .. import UIEventStream -from ._request_types import RequestData -from ._response_types import ( +from .request_types import RequestData +from .response_types import ( BaseChunk, DoneChunk, ErrorChunk, @@ -60,26 +60,21 @@ def _json_dumps(obj: Any) -> str: @dataclass class VercelAIEventStream(UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]): - """TODO (DouweM): Docstring.""" + """UI event stream transformer for the Vercel AI protocol.""" _step_started: bool = False @property def response_headers(self) -> Mapping[str, str] | None: - """Get the response headers for the adapter.""" return VERCEL_AI_DSP_HEADERS def encode_event(self, event: BaseChunk) -> str: - if isinstance(event, DoneChunk): - return 'data: [DONE]\n\n' - return f'data: {event.model_dump_json(by_alias=True, exclude_none=True)}\n\n' + return f'data: {event.encode()}\n\n' async def before_stream(self) -> AsyncIterator[BaseChunk]: - """Yield events before agent streaming starts.""" yield StartChunk() async def before_response(self) -> AsyncIterator[BaseChunk]: - """Yield events before the request is processed.""" if self._step_started: yield FinishStepChunk() @@ -87,18 +82,15 @@ async def before_response(self) -> AsyncIterator[BaseChunk]: yield StartStepChunk() async def after_stream(self) -> AsyncIterator[BaseChunk]: - """Yield events after agent streaming completes.""" yield FinishStepChunk() yield FinishChunk() yield DoneChunk() async def on_error(self, error: Exception) -> AsyncIterator[BaseChunk]: - """Handle errors during streaming.""" yield ErrorChunk(error_text=str(error)) async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> AsyncIterator[BaseChunk]: - """Handle a TextPart at start.""" if follows_text: message_id = self.message_id else: @@ -109,41 +101,34 @@ async def handle_text_start(self, part: TextPart, follows_text: bool = False) -> yield TextDeltaChunk(id=message_id, delta=part.content) async def handle_text_delta(self, delta: TextPartDelta) -> AsyncIterator[BaseChunk]: - """Handle a TextPartDelta.""" if delta.content_delta: # pragma: no branch yield TextDeltaChunk(id=self.message_id, delta=delta.content_delta) async def handle_text_end(self, part: TextPart, followed_by_text: bool = False) -> AsyncIterator[BaseChunk]: - """Handle a TextPart at end.""" if not followed_by_text: yield TextEndChunk(id=self.message_id) async def handle_thinking_start( self, part: ThinkingPart, follows_thinking: bool = False ) -> AsyncIterator[BaseChunk]: - """Handle a ThinkingPart at start.""" message_id = self.new_message_id() yield ReasoningStartChunk(id=message_id) if part.content: yield ReasoningDeltaChunk(id=message_id, delta=part.content) async def handle_thinking_delta(self, delta: ThinkingPartDelta) -> AsyncIterator[BaseChunk]: - """Handle a ThinkingPartDelta.""" if delta.content_delta: # pragma: no branch yield ReasoningDeltaChunk(id=self.message_id, delta=delta.content_delta) async def handle_thinking_end( self, part: ThinkingPart, followed_by_thinking: bool = False ) -> AsyncIterator[BaseChunk]: - """Handle a ThinkingPart at end.""" yield ReasoningEndChunk(id=self.message_id) def handle_tool_call_start(self, part: ToolCallPart | BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" return self._handle_tool_call_start(part) def handle_builtin_tool_call_start(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: - """Handle a BuiltinToolCallEvent, emitting tool input events.""" return self._handle_tool_call_start(part, provider_executed=True) async def _handle_tool_call_start( @@ -152,7 +137,6 @@ async def _handle_tool_call_start( tool_call_id: str | None = None, provider_executed: bool | None = None, ) -> AsyncIterator[BaseChunk]: - """Handle a ToolCallPart or BuiltinToolCallPart at start.""" tool_call_id = tool_call_id or part.tool_call_id yield ToolInputStartChunk( tool_call_id=tool_call_id, @@ -163,7 +147,6 @@ async def _handle_tool_call_start( yield ToolInputDeltaChunk(tool_call_id=tool_call_id, input_text_delta=part.args_as_json_str()) async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterator[BaseChunk]: - """Handle a ToolCallPartDelta.""" tool_call_id = delta.tool_call_id or '' assert tool_call_id, '`ToolCallPartDelta.tool_call_id` must be set' yield ToolInputDeltaChunk( @@ -172,11 +155,9 @@ async def handle_tool_call_delta(self, delta: ToolCallPartDelta) -> AsyncIterato ) async def handle_tool_call_end(self, part: ToolCallPart) -> AsyncIterator[BaseChunk]: - """Handle a ToolCallPart at end.""" yield ToolInputAvailableChunk(tool_call_id=part.tool_call_id, tool_name=part.tool_name, input=part.args) async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> AsyncIterator[BaseChunk]: - """Handle a BuiltinToolCallPart at end.""" yield ToolInputAvailableChunk( tool_call_id=part.tool_call_id, tool_name=part.tool_name, @@ -186,7 +167,6 @@ async def handle_builtin_tool_call_end(self, part: BuiltinToolCallPart) -> Async ) async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> AsyncIterator[BaseChunk]: - """Handle a BuiltinToolReturnPart.""" yield ToolOutputAvailableChunk( tool_call_id=part.tool_call_id, output=part.content, @@ -194,12 +174,10 @@ async def handle_builtin_tool_return(self, part: BuiltinToolReturnPart) -> Async ) async def handle_file(self, part: FilePart) -> AsyncIterator[BaseChunk]: - """Handle a FilePart.""" file = part.content yield FileChunk(url=file.data_uri, media_type=file.media_type) async def handle_function_tool_result(self, event: FunctionToolResultEvent) -> AsyncIterator[BaseChunk]: - """Handle a FunctionToolResultEvent, emitting tool result events.""" result = event.result if isinstance(result, RetryPromptPart): yield ToolOutputErrorChunk(tool_call_id=result.tool_call_id, error_text=result.model_response()) diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py index b517a7c23d..0daf8f35de 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_utils.py @@ -5,17 +5,10 @@ """ from abc import ABC -from typing import Any from pydantic import BaseModel, ConfigDict from pydantic.alias_generators import to_camel -__all__ = ('CamelBaseModel', 'ProviderMetadata', 'JSONValue') - -# Technically this is recursive union of JSON types; for simplicity, we call it Any -JSONValue = Any -ProviderMetadata = dict[str, dict[str, JSONValue]] - class CamelBaseModel(BaseModel, ABC): """Base model with camelCase aliases.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py similarity index 85% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py index 9df592b7f1..1fe9a593af 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_request_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/request_types.py @@ -4,14 +4,24 @@ https://github.com/vercel/ai/blob/ai%405.0.59/packages/ai/src/ui/ui-messages.ts """ +from abc import ABC from typing import Annotated, Any, Literal -from pydantic import Discriminator, Field, TypeAdapter +from pydantic import Discriminator, Field -from ._utils import CamelBaseModel, ProviderMetadata +from ._utils import CamelBaseModel +# Technically this is recursive union of JSON types; for simplicity, we call it Any +JSONValue = Any +ProviderMetadata = dict[str, dict[str, JSONValue]] +"""Provider metadata.""" -class TextUIPart(CamelBaseModel): + +class BaseUIPart(CamelBaseModel, ABC): + """Abstract base class for all UI parts.""" + + +class TextUIPart(BaseUIPart): """A text part of a message.""" type: Literal['text'] = 'text' @@ -26,7 +36,7 @@ class TextUIPart(CamelBaseModel): """The provider metadata.""" -class ReasoningUIPart(CamelBaseModel): +class ReasoningUIPart(BaseUIPart): """A reasoning part of a message.""" type: Literal['reasoning'] = 'reasoning' @@ -41,7 +51,7 @@ class ReasoningUIPart(CamelBaseModel): """The provider metadata.""" -class SourceUrlUIPart(CamelBaseModel): +class SourceUrlUIPart(BaseUIPart): """A source part of a message.""" type: Literal['source-url'] = 'source-url' @@ -51,7 +61,7 @@ class SourceUrlUIPart(CamelBaseModel): provider_metadata: ProviderMetadata | None = None -class SourceDocumentUIPart(CamelBaseModel): +class SourceDocumentUIPart(BaseUIPart): """A document source part of a message.""" type: Literal['source-document'] = 'source-document' @@ -62,7 +72,7 @@ class SourceDocumentUIPart(CamelBaseModel): provider_metadata: ProviderMetadata | None = None -class FileUIPart(CamelBaseModel): +class FileUIPart(BaseUIPart): """A file part of a message.""" type: Literal['file'] = 'file' @@ -86,13 +96,13 @@ class FileUIPart(CamelBaseModel): """The provider metadata.""" -class StepStartUIPart(CamelBaseModel): +class StepStartUIPart(BaseUIPart): """A step boundary part of a message.""" type: Literal['step-start'] = 'step-start' -class DataUIPart(CamelBaseModel): +class DataUIPart(BaseUIPart): """Data part with dynamic type based on data name.""" type: Annotated[str, Field(pattern=r'^data-')] @@ -101,7 +111,7 @@ class DataUIPart(CamelBaseModel): # Tool part states as separate models -class ToolInputStreamingPart(CamelBaseModel): +class ToolInputStreamingPart(BaseUIPart): """Tool part in input-streaming state.""" type: Annotated[str, Field(pattern=r'^tool-')] @@ -111,7 +121,7 @@ class ToolInputStreamingPart(CamelBaseModel): provider_executed: bool | None = None -class ToolInputAvailablePart(CamelBaseModel): +class ToolInputAvailablePart(BaseUIPart): """Tool part in input-available state.""" type: Annotated[str, Field(pattern=r'^tool-')] @@ -122,7 +132,7 @@ class ToolInputAvailablePart(CamelBaseModel): call_provider_metadata: ProviderMetadata | None = None -class ToolOutputAvailablePart(CamelBaseModel): +class ToolOutputAvailablePart(BaseUIPart): """Tool part in output-available state.""" type: Annotated[str, Field(pattern=r'^tool-')] @@ -135,7 +145,7 @@ class ToolOutputAvailablePart(CamelBaseModel): preliminary: bool | None = None -class ToolOutputErrorPart(CamelBaseModel): +class ToolOutputErrorPart(BaseUIPart): """Tool part in output-error state.""" type: Annotated[str, Field(pattern=r'^tool-')] @@ -148,12 +158,12 @@ class ToolOutputErrorPart(CamelBaseModel): call_provider_metadata: ProviderMetadata | None = None -# Union of all tool part states ToolUIPart = ToolInputStreamingPart | ToolInputAvailablePart | ToolOutputAvailablePart | ToolOutputErrorPart +"""Union of all tool part types.""" # Dynamic tool part states as separate models -class DynamicToolInputStreamingPart(CamelBaseModel): +class DynamicToolInputStreamingPart(BaseUIPart): """Dynamic tool part in input-streaming state.""" type: Literal['dynamic-tool'] = 'dynamic-tool' @@ -163,7 +173,7 @@ class DynamicToolInputStreamingPart(CamelBaseModel): input: Any | None = None -class DynamicToolInputAvailablePart(CamelBaseModel): +class DynamicToolInputAvailablePart(BaseUIPart): """Dynamic tool part in input-available state.""" type: Literal['dynamic-tool'] = 'dynamic-tool' @@ -174,7 +184,7 @@ class DynamicToolInputAvailablePart(CamelBaseModel): call_provider_metadata: ProviderMetadata | None = None -class DynamicToolOutputAvailablePart(CamelBaseModel): +class DynamicToolOutputAvailablePart(BaseUIPart): """Dynamic tool part in output-available state.""" type: Literal['dynamic-tool'] = 'dynamic-tool' @@ -187,7 +197,7 @@ class DynamicToolOutputAvailablePart(CamelBaseModel): preliminary: bool | None = None -class DynamicToolOutputErrorPart(CamelBaseModel): +class DynamicToolOutputErrorPart(BaseUIPart): """Dynamic tool part in output-error state.""" type: Literal['dynamic-tool'] = 'dynamic-tool' @@ -199,13 +209,13 @@ class DynamicToolOutputErrorPart(CamelBaseModel): call_provider_metadata: ProviderMetadata | None = None -# Union of all dynamic tool part states DynamicToolUIPart = ( DynamicToolInputStreamingPart | DynamicToolInputAvailablePart | DynamicToolOutputAvailablePart | DynamicToolOutputErrorPart ) +"""Union of all dynamic tool part types.""" UIMessagePart = ( @@ -262,6 +272,4 @@ class RegenerateMessage(CamelBaseModel, extra='allow'): RequestData = Annotated[SubmitMessage | RegenerateMessage, Discriminator('trigger')] - -# Type adapter for parsing requests -request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) +"""Union of all request data types.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py similarity index 87% rename from pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py rename to pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py index 1fb97836f2..1255503107 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_response_types.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/response_types.py @@ -1,46 +1,28 @@ """Vercel AI response types (SSE chunks). Converted to Python from: -https://github.com/vercel/ai/blob/ai%405.0.59/packages/ai/src/ui/ui-messages.ts +https://github.com/vercel/ai/blob/ai%405.0.59/packages/ai/src/ui-message-stream/ui-message-chunks.ts """ +from abc import ABC from typing import Annotated, Any, Literal from pydantic import Field -from ._utils import CamelBaseModel, ProviderMetadata - -__all__ = [ - 'BaseChunk', - 'TextStartChunk', - 'TextDeltaChunk', - 'TextEndChunk', - 'ReasoningStartChunk', - 'ReasoningDeltaChunk', - 'ReasoningEndChunk', - 'ErrorChunk', - 'ToolInputStartChunk', - 'ToolInputDeltaChunk', - 'ToolInputAvailableChunk', - 'ToolInputErrorChunk', - 'ToolOutputAvailableChunk', - 'ToolOutputErrorChunk', - 'SourceUrlChunk', - 'SourceDocumentChunk', - 'FileChunk', - 'DataChunk', - 'StartStepChunk', - 'FinishStepChunk', - 'StartChunk', - 'FinishChunk', - 'AbortChunk', - 'MessageMetadataChunk', -] - - -class BaseChunk(CamelBaseModel): +from ._utils import CamelBaseModel + +# Technically this is recursive union of JSON types; for simplicity, we call it Any +JSONValue = Any +ProviderMetadata = dict[str, dict[str, JSONValue]] +"""Provider metadata.""" + + +class BaseChunk(CamelBaseModel, ABC): """Abstract base class for response SSE events.""" + def encode(self) -> str: + return self.model_dump_json(by_alias=True, exclude_none=True) + class TextStartChunk(BaseChunk): """Text start chunk.""" @@ -243,3 +225,6 @@ class DoneChunk(BaseChunk): """Done chunk.""" type: Literal['done'] = 'done' + + def encode(self) -> str: + return '[DONE]' diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 64d8f76d0c..085cd38631 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -46,7 +46,7 @@ from pydantic_ai.models.test import TestModel from pydantic_ai.run import AgentRunResult from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIEventStream -from pydantic_ai.ui.vercel_ai._request_types import ( +from pydantic_ai.ui.vercel_ai.request_types import ( DynamicToolOutputAvailablePart, FileUIPart, ReasoningUIPart, @@ -57,7 +57,7 @@ ToolOutputErrorPart, UIMessage, ) -from pydantic_ai.ui.vercel_ai._response_types import BaseChunk, DataChunk +from pydantic_ai.ui.vercel_ai.response_types import BaseChunk, DataChunk from .conftest import IsDatetime, IsSameStr, IsStr, try_import From 81be052b50e75a87b678f2e5c9d5846184dbf836 Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 29 Oct 2025 20:44:19 +0000 Subject: [PATCH 45/46] fix docs --- docs/install.md | 2 +- pydantic_ai_slim/pydantic_ai/ui/_adapter.py | 2 +- pydantic_ai_slim/pydantic_ai/ui/_event_stream.py | 2 +- pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py | 14 ++++++++++---- .../pydantic_ai/ui/vercel_ai/_adapter.py | 4 ++++ 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/docs/install.md b/docs/install.md index aafc21a5a5..7739011605 100644 --- a/docs/install.md +++ b/docs/install.md @@ -43,7 +43,7 @@ pip/uv-add "pydantic-ai-slim[openai]" * `logfire` — installs [Pydantic Logfire](logfire.md) dependency `logfire` [PyPI ↗](https://pypi.org/project/logfire){:target="_blank"} * `evals` — installs [Pydantic Evals](evals.md) dependency `pydantic-evals` [PyPI ↗](https://pypi.org/project/pydantic-evals){:target="_blank"} * `openai` — installs [OpenAI Model](models/openai.md) dependency `openai` [PyPI ↗](https://pypi.org/project/openai){:target="_blank"} -* `vertexai` — installs [Google Vertex AI Model](models/google.md#vertex-ai) dependencies `google-auth` [PyPI ↗](https://pypi.org/project/google-auth){:target="_blank"} and `requests` [PyPI ↗](https://pypi.org/project/requests){:target="_blank"} +* `vertexai` — installs `GoogleVertexProvider` dependencies `google-auth` [PyPI ↗](https://pypi.org/project/google-auth){:target="_blank"} and `requests` [PyPI ↗](https://pypi.org/project/requests){:target="_blank"} * `google` — installs [Google Model](models/google.md) dependency `google-genai` [PyPI ↗](https://pypi.org/project/google-genai){:target="_blank"} * `anthropic` — installs [Anthropic Model](models/anthropic.md) dependency `anthropic` [PyPI ↗](https://pypi.org/project/anthropic){:target="_blank"} * `groq` — installs [Groq Model](models/groq.md) dependency `groq` [PyPI ↗](https://pypi.org/project/groq){:target="_blank"} diff --git a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py index b274cfd78e..bb03147b71 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py @@ -144,7 +144,7 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: @abstractmethod def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, OutputDataT]: - """Build a protocol-specific event stream.""" + """Build a protocol-specific event stream transformer.""" raise NotImplementedError @cached_property diff --git a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py index aa5d73c6ff..c18c1f1c98 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_event_stream.py @@ -240,7 +240,7 @@ async def handle_event(self, event: NativeEvent) -> AsyncIterator[EventT]: - [`FinalResultEvent`][pydantic_ai.messages.FinalResultEvent] -> `handle_final_result` - [`FunctionToolCallEvent`][pydantic_ai.messages.FunctionToolCallEvent] -> `handle_function_tool_call` - [`FunctionToolResultEvent`][pydantic_ai.messages.FunctionToolResultEvent] -> `handle_function_tool_result` - - [`AgentRunResultEvent`][pydantic_ai.messages.AgentRunResultEvent] -> `handle_run_result` + - [`AgentRunResultEvent`][pydantic_ai.run.AgentRunResultEvent] -> `handle_run_result` Subclasses are encouraged to override the individual `handle_*` methods rather than this one. If you need specific behavior for all events, make sure you call the super method. diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 6d5a9e5560..5d45f50a7b 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -85,27 +85,33 @@ class AGUIAdapter(UIAdapter[RunAgentInput, Message, BaseEvent, AgentDepsT, Outpu @classmethod def build_run_input(cls, body: bytes) -> RunAgentInput: + """Build an AG-UI run input object from the request body.""" return RunAgentInput.model_validate_json(body) def build_event_stream(self) -> UIEventStream[RunAgentInput, BaseEvent, AgentDepsT, OutputDataT]: + """Build an AG-UI event stream transformer.""" return AGUIEventStream(self.run_input, accept=self.accept) + @cached_property + def messages(self) -> list[ModelMessage]: + """Pydantic AI messages from the AG-UI run input.""" + return self.load_messages(self.run_input.messages) + @cached_property def toolset(self) -> AbstractToolset[AgentDepsT] | None: + """Toolset representing frontend tools from the AG-UI run input.""" if self.run_input.tools: return _AGUIFrontendToolset[AgentDepsT](self.run_input.tools) return None @cached_property def state(self) -> dict[str, Any] | None: + """Frontend state from the AG-UI run input.""" return self.run_input.state - @cached_property - def messages(self) -> list[ModelMessage]: - return self.load_messages(self.run_input.messages) - @classmethod def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: + """Transform AG-UI messages into Pydantic AI messages.""" builder = MessagesBuilder() tool_calls: dict[str, str] = {} # Tool call ID to tool name mapping. diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 73e2f56fe7..7eee52c419 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -65,17 +65,21 @@ class VercelAIAdapter(UIAdapter[RequestData, UIMessage, BaseChunk, AgentDepsT, O @classmethod def build_run_input(cls, body: bytes) -> RequestData: + """Build a Vercel AI run input object from the request body.""" return request_data_ta.validate_json(body) def build_event_stream(self) -> UIEventStream[RequestData, BaseChunk, AgentDepsT, OutputDataT]: + """Build a Vercel AI event stream transformer.""" return VercelAIEventStream(self.run_input, accept=self.accept) @cached_property def messages(self) -> list[ModelMessage]: + """Pydantic AI messages from the Vercel AI run input.""" return self.load_messages(self.run_input.messages) @classmethod def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # noqa: C901 + """Transform Vercel AI messages into Pydantic AI messages.""" builder = MessagesBuilder() for msg in messages: From e7701767edeba90274e615c13ca2352c44bc7d2c Mon Sep 17 00:00:00 2001 From: Douwe Maan Date: Wed, 29 Oct 2025 20:50:46 +0000 Subject: [PATCH 46/46] fix docs links --- docs/install.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/install.md b/docs/install.md index 7739011605..77ff4e56c1 100644 --- a/docs/install.md +++ b/docs/install.md @@ -51,11 +51,11 @@ pip/uv-add "pydantic-ai-slim[openai]" * `cohere` - installs [Cohere Model](models/cohere.md) dependency `cohere` [PyPI ↗](https://pypi.org/project/cohere){:target="_blank"} * `bedrock` - installs [Bedrock Model](models/bedrock.md) dependency `boto3` [PyPI ↗](https://pypi.org/project/boto3){:target="_blank"} * `huggingface` - installs [Hugging Face Model](models/huggingface.md) dependency `huggingface-hub[inference]` [PyPI ↗](https://pypi.org/project/huggingface-hub){:target="_blank"} -* `outlines-transformers` - installs [Outlines Model](outlines.md) dependency `outlines[transformers]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} -* `outlines-llamacpp` - installs [Outlines Model](outlines.md) dependency `outlines[llamacpp]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} +* `outlines-transformers` - installs [Outlines Model](models/outlines.md) dependency `outlines[transformers]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} +* `outlines-llamacpp` - installs [Outlines Model](models/outlines.md) dependency `outlines[llamacpp]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} * `outlines-mlxlm` - installs [Outlines Model](models/outlines.md) dependency `outlines[mlxlm]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} * `outlines-sglang` - installs [Outlines Model](models/outlines.md) dependency `outlines[sglang]` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} -* `outlines-vllm-offline` - installs [Outlines Model](outlines.md) dependencies `outlines` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} and `vllm` [PyPI ↗](https://pypi.org/project/vllm){:target="_blank"} +* `outlines-vllm-offline` - installs [Outlines Model](models/outlines.md) dependencies `outlines` [PyPI ↗](https://pypi.org/project/outlines){:target="_blank"} and `vllm` [PyPI ↗](https://pypi.org/project/vllm){:target="_blank"} * `duckduckgo` - installs [DuckDuckGo Search Tool](common-tools.md#duckduckgo-search-tool) dependency `ddgs` [PyPI ↗](https://pypi.org/project/ddgs){:target="_blank"} * `tavily` - installs [Tavily Search Tool](common-tools.md#tavily-search-tool) dependency `tavily-python` [PyPI ↗](https://pypi.org/project/tavily-python){:target="_blank"} * `cli` - installs [CLI](cli.md) dependencies `rich` [PyPI ↗](https://pypi.org/project/rich){:target="_blank"}, `prompt-toolkit` [PyPI ↗](https://pypi.org/project/prompt-toolkit){:target="_blank"}, and `argcomplete` [PyPI ↗](https://pypi.org/project/argcomplete){:target="_blank"}