Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion contributing/samples/gepa/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
from tau_bench.types import EnvRunResult
from tau_bench.types import RunConfig
import tau_bench_agent as tau_bench_agent_lib

import utils


Expand Down
1 change: 0 additions & 1 deletion contributing/samples/gepa/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
from absl import flags
import experiment
from google.genai import types

import utils

_OUTPUT_DIR = flags.DEFINE_string(
Expand Down
13 changes: 13 additions & 0 deletions contributing/samples/hello_world_anthropic_thinking/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
61 changes: 61 additions & 0 deletions contributing/samples/hello_world_anthropic_thinking/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from google.adk import Agent
from google.adk.models.anthropic_llm import AnthropicLlm
from google.adk.planners.built_in_planner import BuiltInPlanner
from google.genai import types


def check_prime(nums: list[int]) -> str:
"""Check if a given list of numbers are prime.

Args:
nums: The list of numbers to check.

Returns:
A str indicating which numbers are prime.
"""
primes = set()
for number in nums:
number = int(number)
if number <= 1:
continue
is_prime = True
for i in range(2, int(number**0.5) + 1):
if number % i == 0:
is_prime = False
break
if is_prime:
primes.add(number)
return (
"No prime numbers found."
if not primes
else f"{', '.join(str(num) for num in sorted(primes))} are prime numbers."
)


root_agent = Agent(
model=AnthropicLlm(model="claude-sonnet-4-6"),
name="anthropic_thinking_agent",
description="An agent that uses Claude extended thinking via Vertex AI.",
instruction="""
You are a helpful assistant. Use your reasoning carefully before answering.
When asked to check prime numbers, use the check_prime tool.
""",
tools=[check_prime],
planner=BuiltInPlanner(
thinking_config=types.ThinkingConfig(thinking_budget=5000),
),
)
103 changes: 101 additions & 2 deletions src/google/adk/models/anthropic_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,13 @@ class _ToolUseAccumulator:
args_json: str


class _ThinkingAccumulator(BaseModel):
"""Accumulates streamed thinking content block data."""

thinking: str = ""
signature: str = ""


class ClaudeRequest(BaseModel):
system_instruction: str
messages: Iterable[anthropic_types.MessageParam]
Expand Down Expand Up @@ -108,7 +115,24 @@ def part_to_message_block(
anthropic_types.DocumentBlockParam,
anthropic_types.ToolUseBlockParam,
anthropic_types.ToolResultBlockParam,
anthropic_types.ThinkingBlockParam,
anthropic_types.RedactedThinkingBlockParam,
]:
if part.thought:
signature_str = (
part.thought_signature.decode("utf-8") if part.thought_signature else ""
)
if part.text:
return anthropic_types.ThinkingBlockParam(
type="thinking",
thinking=part.text,
signature=signature_str,
)
else:
return anthropic_types.RedactedThinkingBlockParam(
type="redacted_thinking",
data=signature_str,
)
if part.text:
return anthropic_types.TextBlockParam(text=part.text, type="text")
elif part.function_call:
Expand Down Expand Up @@ -229,6 +253,18 @@ def content_block_to_part(
)
part.function_call.id = content_block.id
return part
if isinstance(content_block, anthropic_types.ThinkingBlock):
return types.Part(
text=content_block.thinking,
thought=True,
thought_signature=content_block.signature.encode("utf-8"),
)
if isinstance(content_block, anthropic_types.RedactedThinkingBlock):
return types.Part(
text="",
thought=True,
thought_signature=content_block.data.encode("utf-8"),
)
raise NotImplementedError("Not supported yet.")


Expand Down Expand Up @@ -349,6 +385,26 @@ def function_declaration_to_tool_param(
)


def _build_thinking_param(
thinking_config: Optional[types.ThinkingConfig],
max_tokens: int,
) -> Union[anthropic_types.ThinkingConfigEnabledParam, NotGiven]:
"""Converts ADK ThinkingConfig to Anthropic ThinkingConfigEnabledParam.

Returns NOT_GIVEN if thinking is not configured or budget is 0.
Clamps budget_tokens to max_tokens - 1 to satisfy the API constraint.
"""
if thinking_config is None:
return NOT_GIVEN
budget = thinking_config.thinking_budget
if not budget:
return NOT_GIVEN
return anthropic_types.ThinkingConfigEnabledParam(
type="enabled",
budget_tokens=min(budget, max_tokens - 1),
)


class AnthropicLlm(BaseLlm):
"""Integration with Claude models via the Anthropic API.

Expand Down Expand Up @@ -401,6 +457,10 @@ async def generate_content_async(
if llm_request.tools_dict
else NOT_GIVEN
)
thinking = _build_thinking_param(
llm_request.config.thinking_config if llm_request.config else None,
self.max_tokens,
)

if not stream:
message = await self._anthropic_client.messages.create(
Expand All @@ -410,11 +470,12 @@ async def generate_content_async(
tools=tools,
tool_choice=tool_choice,
max_tokens=self.max_tokens,
thinking=thinking,
)
yield message_to_generate_content_response(message)
else:
async for response in self._generate_content_streaming(
llm_request, messages, tools, tool_choice
llm_request, messages, tools, tool_choice, thinking
):
yield response

Expand All @@ -424,6 +485,9 @@ async def _generate_content_streaming(
messages: list[anthropic_types.MessageParam],
tools: Union[Iterable[anthropic_types.ToolUnionParam], NotGiven],
tool_choice: Union[anthropic_types.ToolChoiceParam, NotGiven],
thinking: Union[
anthropic_types.ThinkingConfigEnabledParam, NotGiven
] = NOT_GIVEN,
) -> AsyncGenerator[LlmResponse, None]:
"""Handles streaming responses from Anthropic models.

Expand All @@ -439,12 +503,15 @@ async def _generate_content_streaming(
tool_choice=tool_choice,
max_tokens=self.max_tokens,
stream=True,
thinking=thinking,
)

# Track content blocks being built during streaming.
# Each entry maps a block index to its accumulated state.
text_blocks: dict[int, str] = {}
tool_use_blocks: dict[int, _ToolUseAccumulator] = {}
thinking_blocks: dict[int, _ThinkingAccumulator] = {}
redacted_thinking_blocks: dict[int, str] = {}
input_tokens = 0
output_tokens = 0

Expand All @@ -463,6 +530,10 @@ async def _generate_content_streaming(
name=block.name,
args_json="",
)
elif isinstance(block, anthropic_types.ThinkingBlock):
thinking_blocks[event.index] = _ThinkingAccumulator()
elif isinstance(block, anthropic_types.RedactedThinkingBlock):
redacted_thinking_blocks[event.index] = block.data

elif event.type == "content_block_delta":
delta = event.delta
Expand All @@ -479,16 +550,44 @@ async def _generate_content_streaming(
elif isinstance(delta, anthropic_types.InputJSONDelta):
if event.index in tool_use_blocks:
tool_use_blocks[event.index].args_json += delta.partial_json
elif isinstance(delta, anthropic_types.ThinkingDelta):
if event.index in thinking_blocks:
thinking_blocks[event.index].thinking += delta.thinking
elif isinstance(delta, anthropic_types.SignatureDelta):
if event.index in thinking_blocks:
thinking_blocks[event.index].signature = delta.signature

elif event.type == "message_delta":
output_tokens = event.usage.output_tokens

# Build the final aggregated response with all content.
all_parts: list[types.Part] = []
all_indices = sorted(
set(list(text_blocks.keys()) + list(tool_use_blocks.keys()))
set(
list(text_blocks.keys())
+ list(tool_use_blocks.keys())
+ list(thinking_blocks.keys())
+ list(redacted_thinking_blocks.keys())
)
)
for idx in all_indices:
if idx in thinking_blocks:
acc = thinking_blocks[idx]
all_parts.append(
types.Part(
text=acc.thinking,
thought=True,
thought_signature=acc.signature.encode("utf-8"),
)
)
if idx in redacted_thinking_blocks:
all_parts.append(
types.Part(
text="",
thought=True,
thought_signature=redacted_thinking_blocks[idx].encode("utf-8"),
)
)
if idx in text_blocks:
all_parts.append(types.Part.from_text(text=text_blocks[idx]))
if idx in tool_use_blocks:
Expand Down
Loading