Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions docs/agents/custom-agents.md
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,9 @@ Let's illustrate the power of custom agents with an example pattern: a multi-sta

These are standard `LlmAgent` definitions, responsible for specific tasks. Their `output key` parameter is crucial for placing results into the `session.state` where other agents or the custom orchestrator can access them.

!!! tip "Direct State Injection in Instructions"
Notice the `story_generator`'s instruction. The `{var}` syntax is a placeholder. Before the instruction is sent to the LLM, the ADK framework automatically replaces (Example:`{topic}`) with the value of `session.state['topic']`. This is the recommended way to provide context to an agent, using templating in the instructions. For more details, see the [State documentation](../sessions/state.md#accessing-session-state-in-agent-instructions).

=== "Python"

```python
Expand Down
28 changes: 14 additions & 14 deletions docs/agents/multi-agents.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ ADK includes specialized agents derived from `BaseAgent` that don't perform task
from google.adk.agents import SequentialAgent, LlmAgent

step1 = LlmAgent(name="Step1_Fetch", output_key="data") # Saves output to state['data']
step2 = LlmAgent(name="Step2_Process", instruction="Process data from state key 'data'.")
step2 = LlmAgent(name="Step2_Process", instruction="Process data from {data}.")

pipeline = SequentialAgent(name="MyPipeline", sub_agents=[step1, step2])
# When pipeline runs, Step2 can access the state['data'] set by Step1.
Expand All @@ -105,7 +105,7 @@ ADK includes specialized agents derived from `BaseAgent` that don't perform task
import com.google.adk.agents.LlmAgent;

LlmAgent step1 = LlmAgent.builder().name("Step1_Fetch").outputKey("data").build(); // Saves output to state.get("data")
LlmAgent step2 = LlmAgent.builder().name("Step2_Process").instruction("Process data from state key 'data'.").build();
LlmAgent step2 = LlmAgent.builder().name("Step2_Process").instruction("Process data from {data}.").build();

SequentialAgent pipeline = SequentialAgent.builder().name("MyPipeline").subAgents(step1, step2).build();
// When pipeline runs, Step2 can access the state.get("data") set by Step1.
Expand Down Expand Up @@ -243,7 +243,7 @@ The most fundamental way for agents operating within the same invocation (and th
from google.adk.agents import LlmAgent, SequentialAgent

agent_A = LlmAgent(name="AgentA", instruction="Find the capital of France.", output_key="capital_city")
agent_B = LlmAgent(name="AgentB", instruction="Tell me about the city stored in state key 'capital_city'.")
agent_B = LlmAgent(name="AgentB", instruction="Tell me about the city stored in {capital_city}.")

pipeline = SequentialAgent(name="CityInfo", sub_agents=[agent_A, agent_B])
# AgentA runs, saves "Paris" to state['capital_city'].
Expand All @@ -265,7 +265,7 @@ The most fundamental way for agents operating within the same invocation (and th

LlmAgent agentB = LlmAgent.builder()
.name("AgentB")
.instruction("Tell me about the city stored in state key 'capital_city'.")
.instruction("Tell me about the city stored in {capital_city}.")
.outputKey("capital_city")
.build();

Expand Down Expand Up @@ -524,8 +524,8 @@ By combining ADK's composition primitives, you can implement various established
from google.adk.agents import SequentialAgent, LlmAgent

validator = LlmAgent(name="ValidateInput", instruction="Validate the input.", output_key="validation_status")
processor = LlmAgent(name="ProcessData", instruction="Process data if state key 'validation_status' is 'valid'.", output_key="result")
reporter = LlmAgent(name="ReportResult", instruction="Report the result from state key 'result'.")
processor = LlmAgent(name="ProcessData", instruction="Process data if {validation_status} is 'valid'.", output_key="result")
reporter = LlmAgent(name="ReportResult", instruction="Report the result from {result}.")

data_pipeline = SequentialAgent(
name="DataPipeline",
Expand All @@ -550,13 +550,13 @@ By combining ADK's composition primitives, you can implement various established

LlmAgent processor = LlmAgent.builder()
.name("ProcessData")
.instruction("Process data if state key 'validation_status' is 'valid'")
.instruction("Process data if {validation_status} is 'valid'")
.outputKey("result") // Saves its main text output to session.state["result"]
.build();

LlmAgent reporter = LlmAgent.builder()
.name("ReportResult")
.instruction("Report the result from state key 'result'")
.instruction("Report the result from {result}")
.build();

SequentialAgent dataPipeline = SequentialAgent.builder()
Expand Down Expand Up @@ -593,7 +593,7 @@ By combining ADK's composition primitives, you can implement various established

synthesizer = LlmAgent(
name="Synthesizer",
instruction="Combine results from state keys 'api1_data' and 'api2_data'."
instruction="Combine results from {api1_data} and {api2_data}."
)

overall_workflow = SequentialAgent(
Expand Down Expand Up @@ -630,7 +630,7 @@ By combining ADK's composition primitives, you can implement various established

LlmAgent synthesizer = LlmAgent.builder()
.name("Synthesizer")
.instruction("Combine results from state keys 'api1_data' and 'api2_data'.")
.instruction("Combine results from {api1_data} and {api2_data}.")
.build();

SequentialAgent overallWorfklow = SequentialAgent.builder()
Expand Down Expand Up @@ -747,7 +747,7 @@ By combining ADK's composition primitives, you can implement various established

reviewer = LlmAgent(
name="FactChecker",
instruction="Review the text in state key 'draft_text' for factual accuracy. Output 'valid' or 'invalid' with reasons.",
instruction="Review the text in {draft_text} for factual accuracy. Output 'valid' or 'invalid' with reasons.",
output_key="review_status"
)

Expand Down Expand Up @@ -776,7 +776,7 @@ By combining ADK's composition primitives, you can implement various established

LlmAgent reviewer = LlmAgent.builder()
.name("FactChecker")
.instruction("Review the text in state key 'draft_text' for factual accuracy. Output 'valid' or 'invalid' with reasons.")
.instruction("Review the text in {draft_text} for factual accuracy. Output 'valid' or 'invalid' with reasons.")
.outputKey("review_status")
.build();

Expand Down Expand Up @@ -940,7 +940,7 @@ By combining ADK's composition primitives, you can implement various established
# Agent that proceeds based on human decision
process_decision = LlmAgent(
name="ProcessDecision",
instruction="Check state key 'human_decision'. If 'approved', proceed. If 'rejected', inform user."
instruction="Check {human_decision}. If 'approved', proceed. If 'rejected', inform user."
)

approval_workflow = SequentialAgent(
Expand Down Expand Up @@ -984,7 +984,7 @@ By combining ADK's composition primitives, you can implement various established
// Agent that proceeds based on human decision
LlmAgent processDecision = LlmAgent.builder()
.name("ProcessDecision")
.instruction("Check state key 'human_decision'. If 'approved', proceed. If 'rejected', inform user.")
.instruction("Check {human_decision}. If 'approved', proceed. If 'rejected', inform user.")
.build();

SequentialAgent approvalWorkflow = SequentialAgent.builder()
Expand Down
93 changes: 93 additions & 0 deletions docs/sessions/state.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,101 @@ Prefixes on state keys define their scope and persistence behavior, especially w

**How the Agent Sees It:** Your agent code interacts with the *combined* state through the single `session.state` collection (dict/ Map). The `SessionService` handles fetching/merging state from the correct underlying storage based on prefixes.

### Accessing Session State in Agent Instructions

When working with `LlmAgent` instances, you can directly inject session state values into the agent's instruction string using a simple templating syntax. This allows you to create dynamic and context-aware instructions without relying solely on natural language directives.

#### Using `{key}` Templating

To inject a value from the session state, enclose the key of the desired state variable within curly braces: `{key}`. The framework will automatically replace this placeholder with the corresponding value from `session.state` before passing the instruction to the LLM.

**Example:**

```python
from google.adk.agents import LlmAgent

story_generator = LlmAgent(
name="StoryGenerator",
model="gemini-2.0-flash",
instruction="""Write a short story about a cat, focusing on the theme: {topic}."""
)

# Assuming session.state['topic'] is set to "friendship", the LLM
# will receive the following instruction:
# "Write a short story about a cat, focusing on the theme: friendship."
```

#### Important Considerations

* Key Existence: Ensure that the key you reference in the instruction string exists in the session.state. If the key is missing, the agent might misbehave or throw an error.
* Data Types: The value associated with the key should be a string or a type that can be easily converted to a string.
* Escaping: If you need to use literal curly braces in your instruction (e.g., for JSON formatting), you'll need to escape them.

#### Bypassing State Injection with `InstructionProvider`

In some cases, you might want to use `{{` and `}}` literally in your instructions without triggering the state injection mechanism. For example, you might be writing instructions for an agent that helps with a templating language that uses the same syntax.

To achieve this, you can provide a function to the `instruction` parameter instead of a string. This function is called an `InstructionProvider`. When you use an `InstructionProvider`, the ADK will not attempt to inject state, and your instruction string will be passed to the model as-is.

The `InstructionProvider` function receives a `ReadonlyContext` object, which you can use to access session state or other contextual information if you need to build the instruction dynamically.

=== "Python"

```python
from google.adk.agents import LlmAgent
from google.adk.agents.readonly_context import ReadonlyContext

# This is an InstructionProvider
def my_instruction_provider(context: ReadonlyContext) -> str:
# You can optionally use the context to build the instruction
# For this example, we'll return a static string with literal braces.
return "This is an instruction with {{literal_braces}} that will not be replaced."

agent = LlmAgent(
model="gemini-2.0-flash",
name="template_helper_agent",
instruction=my_instruction_provider
)
```

If you want to both use an `InstructionProvider` *and* inject state into your instructions, you can use the `inject_session_state` utility function.

=== "Python"

```python
from google.adk.agents import LlmAgent
from google.adk.agents.readonly_context import ReadonlyContext
from google.adk.utils import instructions_utils

async def my_dynamic_instruction_provider(context: ReadonlyContext) -> str:
template = "This is a {adjective} instruction with {{literal_braces}}."
# This will inject the 'adjective' state variable but leave the literal braces.
return await instructions_utils.inject_session_state(template, context)

agent = LlmAgent(
model="gemini-2.0-flash",
name="dynamic_template_helper_agent",
instruction=my_dynamic_instruction_provider
)
```

**Benefits of Direct Injection**

* Clarity: Makes it explicit which parts of the instruction are dynamic and based on session state.
* Reliability: Avoids relying on the LLM to correctly interpret natural language instructions to access state.
* Maintainability: Simplifies instruction strings and reduces the risk of errors when updating state variable names.

**Relation to Other State Access Methods**

This direct injection method is specific to LlmAgent instructions. Refer to the following section for more information on other state access methods.

### How State is Updated: Recommended Methods

!!! note "The Right Way to Modify State"
When you need to change the session state, the correct and safest method is to **directly modify the `state` object on the `Context`** provided to your function (e.g., `callback_context.state['my_key'] = 'new_value'`). This is considered "direct state manipulation" in the right way, as the framework automatically tracks these changes.

This is critically different from directly modifying the `state` on a `Session` object you retrieve from the `SessionService` (e.g., `my_session.state['my_key'] = 'new_value'`). **You should avoid this**, as it bypasses the ADK's event tracking and can lead to lost data. The "Warning" section at the end of this page has more details on this important distinction.

State should **always** be updated as part of adding an `Event` to the session history using `session_service.append_event()`. This ensures changes are tracked, persistence works correctly, and updates are thread-safe.

**1\. The Easy Way: `output_key` (for Agent Text Responses)**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ public static void main(String[] args) {
.instruction(
"""
You are a story writer. Write a short story (around 100 words) about a cat,
based on the topic provided in session state with key 'topic'
based on the topic: {topic}
""")
.inputSchema(null)
.outputKey("current_story") // Key for storing output in session state
Expand All @@ -76,8 +76,7 @@ You are a story writer. Write a short story (around 100 words) about a cat,
.description("Critiques the story.")
.instruction(
"""
You are a story critic. Review the story provided in
session state with key 'current_story'. Provide 1-2 sentences of constructive criticism
You are a story critic. Review the story: {current_story}. Provide 1-2 sentences of constructive criticism
on how to improve it. Focus on plot or character.
""")
.inputSchema(null)
Expand All @@ -91,9 +90,7 @@ You are a story writer. Write a short story (around 100 words) about a cat,
.description("Revises the story based on criticism.")
.instruction(
"""
You are a story reviser. Revise the story provided in
session state with key 'current_story', based on the criticism in
session state with key 'criticism'. Output only the revised story.
You are a story reviser. Revise the story: {current_story}, based on the criticism: {criticism}. Output only the revised story.
""")
.inputSchema(null)
.outputKey("current_story") // Overwrites the original story
Expand All @@ -106,8 +103,7 @@ You are a story writer. Write a short story (around 100 words) about a cat,
.description("Checks grammar and suggests corrections.")
.instruction(
"""
You are a grammar checker. Check the grammar of the story
provided in session state with key 'current_story'. Output only the suggested
You are a grammar checker. Check the grammar of the story: {current_story}. Output only the suggested
corrections as a list, or output 'Grammar is good!' if there are no errors.
""")
.outputKey("grammar_suggestions")
Expand All @@ -120,8 +116,7 @@ You are a story writer. Write a short story (around 100 words) about a cat,
.description("Analyzes the tone of the story.")
.instruction(
"""
You are a tone analyzer. Analyze the tone of the story
provided in session state with key 'current_story'. Output only one word: 'positive' if
You are a tone analyzer. Analyze the tone of the story: {current_story}. Output only one word: 'positive' if
the tone is generally positive, 'negative' if the tone is generally negative, or 'neutral'
otherwise.
""")
Expand Down
17 changes: 6 additions & 11 deletions examples/python/snippets/agents/custom-agent/storyflow_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,17 +171,15 @@ async def _run_async_impl(
story_generator = LlmAgent(
name="StoryGenerator",
model=GEMINI_2_FLASH,
instruction="""You are a story writer. Write a short story (around 100 words) about a cat,
based on the topic provided in session state with key 'topic'""",
instruction="""You are a story writer. Write a short story (around 100 words), on the following topic: {topic}""",
input_schema=None,
output_key="current_story", # Key for storing output in session state
)

critic = LlmAgent(
name="Critic",
model=GEMINI_2_FLASH,
instruction="""You are a story critic. Review the story provided in
session state with key 'current_story'. Provide 1-2 sentences of constructive criticism
instruction="""You are a story critic. Review the story provided: {{current_story}}. Provide 1-2 sentences of constructive criticism
on how to improve it. Focus on plot or character.""",
input_schema=None,
output_key="criticism", # Key for storing criticism in session state
Expand All @@ -190,18 +188,16 @@ async def _run_async_impl(
reviser = LlmAgent(
name="Reviser",
model=GEMINI_2_FLASH,
instruction="""You are a story reviser. Revise the story provided in
session state with key 'current_story', based on the criticism in
session state with key 'criticism'. Output only the revised story.""",
instruction="""You are a story reviser. Revise the story provided: {{current_story}}, based on the criticism in
{{criticism}}. Output only the revised story.""",
input_schema=None,
output_key="current_story", # Overwrites the original story
)

grammar_check = LlmAgent(
name="GrammarCheck",
model=GEMINI_2_FLASH,
instruction="""You are a grammar checker. Check the grammar of the story
provided in session state with key 'current_story'. Output only the suggested
instruction="""You are a grammar checker. Check the grammar of the story provided: {current_story}. Output only the suggested
corrections as a list, or output 'Grammar is good!' if there are no errors.""",
input_schema=None,
output_key="grammar_suggestions",
Expand All @@ -210,8 +206,7 @@ async def _run_async_impl(
tone_check = LlmAgent(
name="ToneCheck",
model=GEMINI_2_FLASH,
instruction="""You are a tone analyzer. Analyze the tone of the story
provided in session state with key 'current_story'. Output only one word: 'positive' if
instruction="""You are a tone analyzer. Analyze the tone of the story provided: {current_story}. Output only one word: 'positive' if
the tone is generally positive, 'negative' if the tone is generally negative, or 'neutral'
otherwise.""",
input_schema=None,
Expand Down