20
20
from llmling_agent .common_types import EndStrategy , ModelProtocol
21
21
from llmling_agent .log import get_logger
22
22
from llmling_agent .messaging .messages import ChatMessage , TokenCost
23
- from llmling_agent .models .content import BaseContent
24
23
from llmling_agent .observability import track_action
25
- from llmling_agent .prompts .convert import format_prompts
26
24
from llmling_agent .tasks .exceptions import (
27
25
ChainAbortedError ,
28
26
RunAbortedError ,
31
29
from llmling_agent .utils .inspection import execute , has_argument_type
32
30
from llmling_agent_providers .base import AgentLLMProvider , ProviderResponse , UsageLimits
33
31
from llmling_agent_providers .pydanticai .utils import (
32
+ convert_prompts_to_user_content ,
34
33
format_part ,
35
34
get_tool_calls ,
36
35
to_model_message ,
@@ -252,25 +251,15 @@ async def generate_response(
252
251
use_model = infer_model (use_model )
253
252
self .model_changed .emit (use_model )
254
253
try :
255
- text_prompts = [p for p in prompts if isinstance (p , str )]
256
- content_prompts = [p for p in prompts if isinstance (p , BaseContent )]
257
-
258
- # Get normal text prompt
259
- prompt = await format_prompts (text_prompts )
260
-
261
- # Convert Content objects to ModelMessages
262
- if content_prompts :
263
- prompts_msgs = [
264
- ChatMessage (role = "user" , content = p ) for p in content_prompts
265
- ]
266
- message_history = [* message_history , * prompts_msgs ]
254
+ # Convert prompts to pydantic-ai format
255
+ converted_prompts = await convert_prompts_to_user_content (prompts )
267
256
268
257
# Run with complete history
269
258
to_use = model or self .model
270
259
to_use = infer_model (to_use ) if isinstance (to_use , str ) else to_use
271
260
limits = asdict (usage_limits ) if usage_limits else {}
272
261
result : AgentRunResult = await agent .run (
273
- prompt ,
262
+ converted_prompts , # Pass converted prompts
274
263
deps = self ._context , # type: ignore
275
264
message_history = [to_model_message (m ) for m in message_history ],
276
265
model = to_use , # type: ignore
@@ -292,10 +281,11 @@ async def generate_response(
292
281
use_model .model_name if isinstance (use_model , Model ) else str (use_model )
293
282
)
294
283
usage = result .usage ()
295
- cost_str = prompt + str (content_prompts ) # dirty
284
+ # Create input content representation for cost calculations
285
+ cost_input = "\n " .join (str (p ) for p in prompts )
296
286
cost_info = (
297
287
await TokenCost .from_usage (
298
- usage , resolved_model , cost_str , str (result .data )
288
+ usage , resolved_model , cost_input , str (result .data )
299
289
)
300
290
if resolved_model and usage
301
291
else None
@@ -376,22 +366,14 @@ async def stream_response( # type: ignore[override]
376
366
if model :
377
367
self .model_changed .emit (use_model )
378
368
379
- text_prompts = [p for p in prompts if isinstance (p , str )]
380
- content_prompts = [p for p in prompts if isinstance (p , BaseContent )]
381
-
382
- # Get normal text prompt
383
- prompt = await format_prompts (text_prompts )
384
-
385
- # Convert Content objects to ChatMessages
386
- if content_prompts :
387
- prompts_msgs = [ChatMessage (role = "user" , content = p ) for p in content_prompts ]
388
- message_history = [* message_history , * prompts_msgs ]
369
+ # Convert prompts to pydantic-ai format
370
+ converted_prompts = await convert_prompts_to_user_content (prompts )
389
371
390
372
# Convert all messages to pydantic-ai format
391
373
model_messages = [to_model_message (m ) for m in message_history ]
392
374
393
375
async with agent .run_stream (
394
- prompt ,
376
+ converted_prompts ,
395
377
deps = self ._context ,
396
378
message_history = model_messages ,
397
379
model = model or self .model , # type: ignore
0 commit comments