Skip to content

Commit b58b143

Browse files
fix(workflow): preserve event.output for cached LlmAgent results
The runner stripped event.output to None before persisting any non-partial event with node_info.message_as_output=True. On resume, _reconstruct_node_state fell back to event.content (a raw genai.types.Content) for the cached child output, so subscripting the result of ctx.run_node on an LlmAgent across a HITL pause crashed with "'Content' object is not subscriptable". Removing the strip lets the validated dict produced by process_llm_agent_output survive the persist/rehydrate roundtrip. Closes #5553
1 parent 02ffd98 commit b58b143

3 files changed

Lines changed: 52 additions & 6 deletions

File tree

src/google/adk/runners.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -611,10 +611,6 @@ async def _consume_event_queue(
611611
if event_or_done is done_sentinel:
612612
break
613613
event: Event = event_or_done
614-
if not event.partial:
615-
if event.node_info.message_as_output and event.content is not None:
616-
event = event.model_copy()
617-
event.output = None
618614

619615
_apply_run_config_custom_metadata(event, ic.run_config)
620616
modified_event = await ic.plugin_manager.run_on_event_callback(

tests/unittests/workflow/test_workflow_dynamic_nodes.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1153,8 +1153,9 @@ async def parent(*, ctx, node_input):
11531153
assert len(agent_events) > 0
11541154
agent_event = agent_events[-1]
11551155

1156-
# Verify that runners.py cleared the output
1157-
assert agent_event.output is None
1156+
# The persisted event keeps the LlmAgent's output so resume can replay
1157+
# it without re-firing the model
1158+
assert agent_event.output == 'LLM output content'
11581159

11591160
# When the workflow is resumed by resolving the interrupt
11601161
resume_events = await _resume(

tests/unittests/workflow/test_workflow_hitl.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2051,3 +2051,52 @@ def node_b():
20512051
)
20522052
outputs3 = workflow_testing_utils.get_outputs(events3)
20532053
assert outputs3 == [f'c_from_b_response 2']
2054+
2055+
2056+
@pytest.mark.asyncio
2057+
async def test_cached_llm_agent_output_remains_subscriptable_after_resume(
2058+
request: pytest.FixtureRequest,
2059+
):
2060+
"""Cached LlmAgent output must stay a dict on resume."""
2061+
2062+
class Greeting(BaseModel):
2063+
text: str
2064+
2065+
greeter = LlmAgent(
2066+
name='greeter',
2067+
model=testing_utils.MockModel.create(
2068+
responses=[types.Part.from_text(text='{"text": "hi"}')],
2069+
),
2070+
mode='single_turn',
2071+
output_schema=Greeting,
2072+
instruction='Return JSON.',
2073+
)
2074+
2075+
@node(rerun_on_resume=True)
2076+
async def orchestrator(ctx: Context, node_input: Any):
2077+
out = await ctx.run_node(greeter, 'go')
2078+
if 'confirm' not in ctx.resume_inputs:
2079+
yield RequestInput(message='ok?', interrupt_id='confirm')
2080+
return
2081+
yield Event(output={'echoed': out['text']})
2082+
2083+
agent = Workflow(name='wf', edges=[(START, orchestrator)])
2084+
app = App(
2085+
name=request.function.__name__,
2086+
root_agent=agent,
2087+
resumability_config=ResumabilityConfig(is_resumable=True),
2088+
)
2089+
runner = testing_utils.InMemoryRunner(app=app)
2090+
2091+
events1 = await runner.run_async(testing_utils.get_user_content('start'))
2092+
req = workflow_testing_utils.get_request_input_events(events1)[0]
2093+
interrupt_id = get_request_input_interrupt_ids(req)[0]
2094+
invocation_id = events1[0].invocation_id
2095+
2096+
events2 = await runner.run_async(
2097+
new_message=testing_utils.UserContent(
2098+
create_request_input_response(interrupt_id, {'approved': True})
2099+
),
2100+
invocation_id=invocation_id,
2101+
)
2102+
assert {'echoed': 'hi'} in workflow_testing_utils.get_outputs(events2)

0 commit comments

Comments
 (0)