From d04e9640874faf86b26063e8b5ec99c984d1c002 Mon Sep 17 00:00:00 2001 From: "hgyun.lee" Date: Fri, 24 Oct 2025 19:55:31 +0900 Subject: [PATCH 1/4] Fix: protect instruction insertion from being placed directly before a function call. --- src/google/adk/flows/llm_flows/contents.py | 25 ++++- .../flows/llm_flows/test_contents.py | 100 ++++++++++++++++++ 2 files changed, 124 insertions(+), 1 deletion(-) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index be471a44eb..9f182ba374 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -695,7 +695,7 @@ async def _add_instructions_to_user_content( if llm_request.contents: for i in range(len(llm_request.contents) - 1, -1, -1): - if llm_request.contents[i].role != 'user': + if _is_valid_instruction_position(llm_request, i): insert_index = i + 1 break elif i == 0: @@ -708,3 +708,26 @@ async def _add_instructions_to_user_content( # Insert all instruction contents at the proper position using efficient slicing llm_request.contents[insert_index:insert_index] = instruction_contents + +def _is_valid_instruction_position(llm_request: LlmRequest, index: int) -> bool: + """Check if a position is valid for inserting instructions. + + Instructions should not be inserted before: + - User content + - Function call content + + Args: + llm_request: The LLM request containing contents + index: The index to check + + Returns: + True if this is a valid position to insert after, False otherwise + """ + user_message = llm_request.contents[index].role == 'user' + tool_request = False + for part in llm_request.contents[index].parts: + if part.function_call: + tool_request = True + break + + return not user_message and not tool_request diff --git a/tests/unittests/flows/llm_flows/test_contents.py b/tests/unittests/flows/llm_flows/test_contents.py index 9e77407b27..590e08a4f6 100644 --- a/tests/unittests/flows/llm_flows/test_contents.py +++ b/tests/unittests/flows/llm_flows/test_contents.py @@ -427,3 +427,103 @@ async def test_events_with_empty_content_are_skipped(): types.UserContent("Hello"), types.UserContent("How are you?"), ] + + +@pytest.mark.asyncio +async def test_add_instructions_skips_function_call_position(): + """Test that instructions are not inserted before function_call content.""" + agent = Agent(model="gemini-2.5-flash", name="test_agent") + invocation_context = await testing_utils.create_invocation_context( + agent=agent + ) + + # Create instruction contents to be inserted + instruction_contents = [ + types.Content( + parts=[types.Part(text="System instruction")], role="user" + ) + ] + + # Create llm_request with 5 contents including function_call in the middle + llm_request = LlmRequest(model="gemini-2.5-flash") + llm_request.contents = [ + types.UserContent("First user message"), + types.ModelContent("Model response"), + types.ModelContent( + [types.Part(function_call=types.FunctionCall( + id="fc_123", + name="test_tool", + args={"param": "value"} + ))] + ), + types.Content( + parts=[types.Part(function_response=types.FunctionResponse( + id="fc_123", + name="test_tool", + response={"result": "success"} + ))], + role="user" + ), + types.UserContent("Final user message"), + ] + + # Call the function + await contents._add_instructions_to_user_content( + invocation_context, llm_request, instruction_contents + ) + + # Verify instructions are inserted after model content (not before function_call) + # The function walks backwards and finds the first valid position (model content + # without function_call), which is index 1, so it inserts at index 2 + assert len(llm_request.contents) == 6 + assert llm_request.contents[0] == types.UserContent("First user message") + assert llm_request.contents[1] == types.ModelContent("Model response") + # Instruction should be inserted at position 2 (after model response) + assert llm_request.contents[2].parts[0].text == "System instruction" + # Function call should be pushed to position 3 + assert llm_request.contents[3].parts[0].function_call is not None + # Function response should be pushed to position 4 + assert llm_request.contents[4].parts[0].function_response is not None + # Final user message should be pushed to position 5 + assert llm_request.contents[5] == types.UserContent("Final user message") + + +@pytest.mark.asyncio +async def test_add_instructions_skips_leading_user_content(): + """Test that instructions are not inserted before leading user content.""" + agent = Agent(model="gemini-2.5-flash", name="test_agent") + invocation_context = await testing_utils.create_invocation_context( + agent=agent + ) + + # Create instruction contents to be inserted + instruction_contents = [ + types.Content( + parts=[types.Part(text="System instruction")], role="user" + ) + ] + + # Create llm_request with 5 contents starting with user messages + llm_request = LlmRequest(model="gemini-2.5-flash") + llm_request.contents = [ + types.UserContent("First user message"), + types.UserContent("Second user message"), + types.ModelContent("Model response"), + types.UserContent("Third user message"), + types.UserContent("Fourth user message"), + ] + + # Call the function + await contents._add_instructions_to_user_content( + invocation_context, llm_request, instruction_contents + ) + + # Verify instructions are inserted after model content, not at the beginning + assert len(llm_request.contents) == 6 + assert llm_request.contents[0] == types.UserContent("First user message") + assert llm_request.contents[1] == types.UserContent("Second user message") + assert llm_request.contents[2] == types.ModelContent("Model response") + # Instruction should be inserted at position 3 (after model content) + assert llm_request.contents[3].parts[0].text == "System instruction" + assert llm_request.contents[4] == types.UserContent("Third user message") + assert llm_request.contents[5] == types.UserContent("Fourth user message") From 35d42442e3360b708951276cf1318724d5b8d406 Mon Sep 17 00:00:00 2001 From: "hgyun.lee" Date: Fri, 24 Oct 2025 20:02:08 +0900 Subject: [PATCH 2/4] apply feedback --- src/google/adk/flows/llm_flows/contents.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index 9f182ba374..a1187bd877 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -725,9 +725,6 @@ def _is_valid_instruction_position(llm_request: LlmRequest, index: int) -> bool: """ user_message = llm_request.contents[index].role == 'user' tool_request = False - for part in llm_request.contents[index].parts: - if part.function_call: - tool_request = True - break + tool_request = any(part.function_call for part in llm_request.contents[index].parts) return not user_message and not tool_request From 58d82234caef57d358938273c39604a7fc4661c1 Mon Sep 17 00:00:00 2001 From: Eliza Huang Date: Wed, 29 Oct 2025 15:36:43 -0700 Subject: [PATCH 3/4] formating --- src/google/adk/flows/llm_flows/contents.py | 52 +++--- .../flows/llm_flows/test_contents.py | 156 ++++++++---------- 2 files changed, 95 insertions(+), 113 deletions(-) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index a1187bd877..3a03e54f9b 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -686,6 +686,29 @@ async def _add_instructions_to_user_content( llm_request: The LLM request to modify instruction_contents: List of instruction-related contents to insert """ + + def is_valid_instruction_position( + llm_request: LlmRequest, index: int + ) -> bool: + """Checks if instructions can be inserted after a given index. + + A valid insertion point is after a model response that is not a tool call. + This prevents injecting instructions in the middle of a user's turn or a + tool-use sequence. + + Args: + llm_request: The LLM request containing the conversation contents. + index: The index of the content to check. + + Returns: + True if the position after this index is a valid insertion point. + """ + content_at_index = llm_request.contents[index] + is_user_message = content_at_index.role == 'user' + is_tool_request = any(part.function_call for part in content_at_index.parts) + + return not is_user_message and not is_tool_request + if not instruction_contents: return @@ -695,36 +718,9 @@ async def _add_instructions_to_user_content( if llm_request.contents: for i in range(len(llm_request.contents) - 1, -1, -1): - if _is_valid_instruction_position(llm_request, i): + if is_valid_instruction_position(llm_request, i): insert_index = i + 1 break elif i == 0: # All content from start is user content insert_index = 0 - break - else: - # No contents remaining, just append at the end - insert_index = 0 - - # Insert all instruction contents at the proper position using efficient slicing - llm_request.contents[insert_index:insert_index] = instruction_contents - -def _is_valid_instruction_position(llm_request: LlmRequest, index: int) -> bool: - """Check if a position is valid for inserting instructions. - - Instructions should not be inserted before: - - User content - - Function call content - - Args: - llm_request: The LLM request containing contents - index: The index to check - - Returns: - True if this is a valid position to insert after, False otherwise - """ - user_message = llm_request.contents[index].role == 'user' - tool_request = False - tool_request = any(part.function_call for part in llm_request.contents[index].parts) - - return not user_message and not tool_request diff --git a/tests/unittests/flows/llm_flows/test_contents.py b/tests/unittests/flows/llm_flows/test_contents.py index 590e08a4f6..549c09440d 100644 --- a/tests/unittests/flows/llm_flows/test_contents.py +++ b/tests/unittests/flows/llm_flows/test_contents.py @@ -430,100 +430,86 @@ async def test_events_with_empty_content_are_skipped(): @pytest.mark.asyncio -async def test_add_instructions_skips_function_call_position(): - """Test that instructions are not inserted before function_call content.""" +@pytest.mark.parametrize( + "initial_contents, expected_insertion_index", + [ + ( + [ + types.UserContent("First user message"), + types.ModelContent("Model response"), + types.ModelContent([ + types.Part( + function_call=types.FunctionCall( + name="test_tool", args={} + ) + ) + ]), + types.Content( + parts=[ + types.Part( + function_response=types.FunctionResponse( + name="test_tool", response={} + ) + ) + ], + role="user", + ), + types.UserContent("Final user message"), + ], + 2, + ), + ( + [ + types.UserContent("First user message"), + types.UserContent("Second user message"), + types.ModelContent("Model response"), + types.UserContent("Third user message"), + types.UserContent("Fourth user message"), + ], + 3, + ), + ( + [ + types.UserContent("First user message"), + types.UserContent("Second user message"), + ], + 0, + ), + ([], 0), + ( + [ + types.UserContent("User message"), + types.ModelContent("Model response"), + ], + 2, + ), + ], + ids=[ + "skips_function_call_and_user_content", + "skips_trailing_user_content", + "inserts_at_start_when_all_user_content", + "inserts_at_start_for_empty_content", + "inserts_at_end_when_last_is_model_content", + ], +) +async def test_add_instructions_to_user_content( + initial_contents, expected_insertion_index +): + """Tests that instructions are correctly inserted into the content list.""" agent = Agent(model="gemini-2.5-flash", name="test_agent") invocation_context = await testing_utils.create_invocation_context( agent=agent ) - - # Create instruction contents to be inserted instruction_contents = [ - types.Content( - parts=[types.Part(text="System instruction")], role="user" - ) - ] - - # Create llm_request with 5 contents including function_call in the middle - llm_request = LlmRequest(model="gemini-2.5-flash") - llm_request.contents = [ - types.UserContent("First user message"), - types.ModelContent("Model response"), - types.ModelContent( - [types.Part(function_call=types.FunctionCall( - id="fc_123", - name="test_tool", - args={"param": "value"} - ))] - ), - types.Content( - parts=[types.Part(function_response=types.FunctionResponse( - id="fc_123", - name="test_tool", - response={"result": "success"} - ))], - role="user" - ), - types.UserContent("Final user message"), + types.Content(parts=[types.Part(text="System instruction")], role="user") ] + llm_request = LlmRequest(model="gemini-2.5-flash", contents=initial_contents) - # Call the function await contents._add_instructions_to_user_content( invocation_context, llm_request, instruction_contents ) - # Verify instructions are inserted after model content (not before function_call) - # The function walks backwards and finds the first valid position (model content - # without function_call), which is index 1, so it inserts at index 2 - assert len(llm_request.contents) == 6 - assert llm_request.contents[0] == types.UserContent("First user message") - assert llm_request.contents[1] == types.ModelContent("Model response") - # Instruction should be inserted at position 2 (after model response) - assert llm_request.contents[2].parts[0].text == "System instruction" - # Function call should be pushed to position 3 - assert llm_request.contents[3].parts[0].function_call is not None - # Function response should be pushed to position 4 - assert llm_request.contents[4].parts[0].function_response is not None - # Final user message should be pushed to position 5 - assert llm_request.contents[5] == types.UserContent("Final user message") - - -@pytest.mark.asyncio -async def test_add_instructions_skips_leading_user_content(): - """Test that instructions are not inserted before leading user content.""" - agent = Agent(model="gemini-2.5-flash", name="test_agent") - invocation_context = await testing_utils.create_invocation_context( - agent=agent + assert len(llm_request.contents) == len(initial_contents) + 1 + assert ( + llm_request.contents[expected_insertion_index] == instruction_contents[0] ) - - # Create instruction contents to be inserted - instruction_contents = [ - types.Content( - parts=[types.Part(text="System instruction")], role="user" - ) - ] - - # Create llm_request with 5 contents starting with user messages - llm_request = LlmRequest(model="gemini-2.5-flash") - llm_request.contents = [ - types.UserContent("First user message"), - types.UserContent("Second user message"), - types.ModelContent("Model response"), - types.UserContent("Third user message"), - types.UserContent("Fourth user message"), - ] - - # Call the function - await contents._add_instructions_to_user_content( - invocation_context, llm_request, instruction_contents - ) - - # Verify instructions are inserted after model content, not at the beginning - assert len(llm_request.contents) == 6 - assert llm_request.contents[0] == types.UserContent("First user message") - assert llm_request.contents[1] == types.UserContent("Second user message") - assert llm_request.contents[2] == types.ModelContent("Model response") - # Instruction should be inserted at position 3 (after model content) - assert llm_request.contents[3].parts[0].text == "System instruction" - assert llm_request.contents[4] == types.UserContent("Third user message") - assert llm_request.contents[5] == types.UserContent("Fourth user message") From 582661ff9f2a8d0ac6f4f6ee7b3b22a0cc85fb1e Mon Sep 17 00:00:00 2001 From: Eliza Huang Date: Wed, 29 Oct 2025 15:40:52 -0700 Subject: [PATCH 4/4] revert removal --- src/google/adk/flows/llm_flows/contents.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index 3a03e54f9b..0d45950196 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -724,3 +724,10 @@ def is_valid_instruction_position( elif i == 0: # All content from start is user content insert_index = 0 + break + else: + # No contents remaining, just append at the end + insert_index = 0 + + # Insert all instruction contents at the proper position using efficient slicing + llm_request.contents[insert_index:insert_index] = instruction_contents