Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions .github/pr_poll_3622.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -euo pipefail

PR_NUMBER=3622
REPO=google/adk-python
LOG=/workspaces/adk-python/.pr_poll_3622.log

echo "Starting PR poller for $REPO#$PR_NUMBER, logging to $LOG"

# Ensure log exists
mkdir -p "$(dirname "$LOG")"
: > "$LOG"

while true; do
ts=$(date --iso-8601=seconds)
if command -v gh >/dev/null 2>&1; then
# Try to get a concise status line with gh; fallback to full JSON on failure
if out=$(gh pr view "$PR_NUMBER" --repo "$REPO" --json number,title,author,headRefName,baseRefName,mergeStateStatus 2>&1); then
echo "[$ts] $out" >> "$LOG"
# append a short status
echo "[$ts] SHORT: $(echo "$out" | head -n 1)" >> "$LOG"
else
echo "[$ts] GH_ERROR: $out" >> "$LOG"
fi
elif [[ -n "${GITHUB_TOKEN:-}" ]]; then
out=$(curl -s -H "Authorization: token $GITHUB_TOKEN" "https://api.github.com/repos/$REPO/pulls/$PR_NUMBER") || out="ERROR_FROM_CURL"
echo "[$ts] $out" >> "$LOG"
echo "[$ts] SHORT: $(echo "$out" | head -c 200)" >> "$LOG"
else
echo "[$ts] ERROR: gh CLI not available and GITHUB_TOKEN not set" >> "$LOG"
fi

# NOTE: do not exit automatically here; keep polling until manually stopped.
# If desired we could exit when the PR is merged/closed, but some CI checks
# remain visible after merge, so prefer continuous monitoring.

sleep 600
done
2 changes: 2 additions & 0 deletions .pr_poll_3622.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
nohup: ignoring input
Starting PR poller for google/adk-python#3622, logging to /workspaces/adk-python/.pr_poll_3622.log
36 changes: 31 additions & 5 deletions src/google/adk/artifacts/artifact_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,34 @@ def is_artifact_ref(artifact: types.Part) -> bool:
Returns:
True if the artifact part is an artifact reference, False otherwise.
"""
return bool(
artifact.file_data
and artifact.file_data.file_uri
and artifact.file_data.file_uri.startswith("artifact://")
)
# Support both object-like `types.Part` and plain `dict` shapes.
file_uri = get_file_uri(artifact)
return bool(file_uri and file_uri.startswith("artifact://"))


def get_file_uri(artifact: types.Part) -> Optional[str]:
"""Extracts the `file_uri` from an artifact part, supporting dict and object shapes.

Returns the file_uri string or None if not present.
"""
if isinstance(artifact, dict):
file_data = artifact.get("file_data")
if not file_data or not isinstance(file_data, dict):
return None
return file_data.get("file_uri")

# object-like: attempt attribute access
file_data = getattr(artifact, "file_data", None)
if not file_data:
return None
return getattr(file_data, "file_uri", None)


def get_part_field(artifact: types.Part, name: str):
"""Safely extracts a field from an artifact part supporting dict or object shapes.

Returns the field value or None if missing.
"""
if isinstance(artifact, dict):
return artifact.get(name)
return getattr(artifact, name, None)
98 changes: 73 additions & 25 deletions src/google/adk/artifacts/in_memory_artifact_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,23 +117,44 @@ async def save_artifact(
)
if custom_metadata:
artifact_version.custom_metadata = custom_metadata

if artifact.inline_data is not None:
artifact_version.mime_type = artifact.inline_data.mime_type
elif artifact.text is not None:
# Use shared helpers to extract fields and URIs from both object-like
# and dict-shaped artifacts. Centralizing this logic in `artifact_util`
# avoids duplication and keeps behaviour consistent across modules.
inline_data = artifact_util.get_part_field(artifact, "inline_data")
text = artifact_util.get_part_field(artifact, "text")
file_data = artifact_util.get_part_field(artifact, "file_data")

if inline_data is not None:
# inline_data may be a dict or an object
artifact_version.mime_type = (
inline_data.get("mime_type")
if isinstance(inline_data, dict)
else inline_data.mime_type
)
elif text is not None:
artifact_version.mime_type = "text/plain"
elif artifact.file_data is not None:
elif file_data is not None:
# If the artifact is an artifact-ref we validate the referenced URI.
if artifact_util.is_artifact_ref(artifact):
if not artifact_util.parse_artifact_uri(artifact.file_data.file_uri):
raise ValueError(
f"Invalid artifact reference URI: {artifact.file_data.file_uri}"
)
# If it's a valid artifact URI, we store the artifact part as-is.
# And we don't know the mime type until we load it.
file_uri = artifact_util.get_file_uri(artifact)
if not file_uri or not artifact_util.parse_artifact_uri(file_uri):
raise ValueError(f"Invalid artifact reference URI: {file_uri}")
# Valid artifact URI: keep part as-is; mime type may be resolved later.
else:
artifact_version.mime_type = artifact.file_data.mime_type
artifact_version.mime_type = (
file_data.get("mime_type")
if isinstance(file_data, dict)
else file_data.mime_type
)
else:
raise ValueError("Not supported artifact type.")
# Fallback for unknown shapes: preserve behavior by storing the
# artifact but use a generic binary mime type instead of raising.
artifact_version.mime_type = "application/octet-stream"
logger.debug(
"save_artifact: unknown artifact shape, falling back to"
" application/octet-stream for %s",
path,
)

self.artifacts[path].append(
_ArtifactEntry(data=artifact, artifact_version=artifact_version)
Expand Down Expand Up @@ -167,15 +188,18 @@ async def load_artifact(

# Resolve artifact reference if needed.
artifact_data = artifact_entry.data
# Resolve artifact reference if needed. Support dict-shaped stored
# artifacts as well as object-like `types.Part`.
if artifact_util.is_artifact_ref(artifact_data):
parsed_uri = artifact_util.parse_artifact_uri(
artifact_data.file_data.file_uri
)
# Extract file_uri safely for dict or object shapes.
if isinstance(artifact_data, dict):
file_uri = artifact_data.get("file_data", {}).get("file_uri")
else:
file_uri = getattr(artifact_data.file_data, "file_uri", None)

parsed_uri = artifact_util.parse_artifact_uri(file_uri)
if not parsed_uri:
raise ValueError(
"Invalid artifact reference URI:"
f" {artifact_data.file_data.file_uri}"
)
raise ValueError(f"Invalid artifact reference URI: {file_uri}")
return await self.load_artifact(
app_name=parsed_uri.app_name,
user_id=parsed_uri.user_id,
Expand All @@ -184,12 +208,36 @@ async def load_artifact(
version=parsed_uri.version,
)

if (
artifact_data == types.Part()
or artifact_data == types.Part(text="")
or (artifact_data.inline_data and not artifact_data.inline_data.data)
):
# Determine emptiness for both shapes.
def _is_empty(a):
if a is None:
return True
if isinstance(a, dict):
# common empty forms: empty text, empty inline_data, or inline_data with no bytes
if (
a.get("text") in (None, "")
and not a.get("inline_data")
and not a.get("file_data")
):
return True
inline = a.get("inline_data")
if inline and isinstance(inline, dict) and not inline.get("data"):
return True
return False
# object-like types.Part
try:
if a == types.Part() or a == types.Part(text=""):
return True
inline = getattr(a, "inline_data", None)
if inline and not getattr(inline, "data", None):
return True
except Exception:
return False
Comment on lines +228 to +235
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Using except Exception: is very broad and can hide unexpected bugs. It's better to catch more specific exceptions that you anticipate might be raised from the operations in the try block, such as TypeError or AttributeError, to avoid accidentally silencing unrelated issues.

return False

if _is_empty(artifact_data):
return None

return artifact_data

@override
Expand Down
11 changes: 11 additions & 0 deletions src/google/adk/cli/cli_deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,17 @@ def to_cloud_run(
memory_service_uri: The URI of the memory service.
"""
app_name = app_name or os.path.basename(agent_folder)
# Validate app_name: it will be used as a package/folder name in the
# generated image. Reject names that are not valid Python identifiers
# (for example names containing dashes) and provide a helpful error
# message. This prevents confusing failures later during import or
# when copying files expecting a valid package name.
if not app_name.isidentifier():
raise click.ClickException(
f"Invalid agent folder name '{app_name}'. Agent folder names must be"
' valid Python identifiers (letters, digits and underscores). Please'
' rename the folder or pass a valid `--app_name`.'
)

click.echo(f'Start generating Cloud Run source files in {temp_folder}')

Expand Down
11 changes: 10 additions & 1 deletion src/google/adk/utils/instructions_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,16 @@ async def _async_sub(pattern, repl_async_fn, string) -> str:
return ''.join(result)

async def _replace_match(match) -> str:
var_name = match.group().lstrip('{').rstrip('}').strip()
matched_text = match.group()

# If the pattern is escaped using double braces (e.g. '{{var}}'),
# treat it as a literal and unescape to single braces: '{var}'.
# This allows instruction text to include code examples like
# f"User: {{user_id}}" without attempting substitution.
if matched_text.startswith('{{') and matched_text.endswith('}}'):
return matched_text[1:-1]

var_name = matched_text.lstrip('{').rstrip('}').strip()
optional = False
if var_name.endswith('?'):
optional = True
Expand Down
Loading