Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CLI_REFERENCE.md
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,7 @@ $ aignostics application run describe [OPTIONS] RUN_ID
**Options**:

* `--format TEXT`: Output format: 'text' (default) or 'json' [default: text]
* `--summarize, -s`: Show run details without output artifacts for items
* `--help`: Show this message and exit.

#### `aignostics application run dump-metadata`
Expand Down
12 changes: 11 additions & 1 deletion src/aignostics/application/_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -919,6 +919,14 @@ def run_describe(
str,
typer.Option(help="Output format: 'text' (default) or 'json'"),
] = "text",
summarize: Annotated[
bool,
typer.Option(
"--summarize",
"-s",
help="Show only run and item status summary (external ID, state, error message)",
),
] = False,
) -> None:
"""Describe run."""
logger.trace("Describing run with ID '{}'", run_id)
Expand All @@ -931,7 +939,9 @@ def run_describe(
run_details = run.details(hide_platform_queue_position=not user_info.is_internal_user)
print(json.dumps(run_details.model_dump(mode="json"), indent=2, default=str))
else:
retrieve_and_print_run_details(run, hide_platform_queue_position=not user_info.is_internal_user)
retrieve_and_print_run_details(
run, hide_platform_queue_position=not user_info.is_internal_user, summarize=summarize
)
logger.debug("Described run with ID '{}'", run_id)
except NotFoundException:
logger.warning(f"Run with ID '{run_id}' not found.")
Expand Down
20 changes: 12 additions & 8 deletions src/aignostics/application/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
)
from aignostics.platform import (
InputArtifactData,
ItemState,
OutputArtifactData,
OutputArtifactElement,
Run,
Expand Down Expand Up @@ -174,17 +175,17 @@ class OutputFormat(StrEnum):
JSON = "json"


def _format_status_string(state: RunState, termination_reason: str | None = None) -> str:
def _format_status_string(state: RunState | ItemState, termination_reason: str | None = None) -> str:
"""Format status string with optional termination reason.

Args:
state (RunState): The run state
state (RunState | ItemState): The run or item state
termination_reason (str | None): Optional termination reason

Returns:
str: Formatted status string
"""
if state is RunState.TERMINATED and termination_reason:
if (state.value == RunState.TERMINATED or state.value == ItemState.TERMINATED) and termination_reason:
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: The comparison state.value == RunState.TERMINATED incorrectly compares a string to an enum object, which will always evaluate to false, hiding the termination reason.
Severity: MEDIUM

Suggested Fix

Change the condition to compare enum members directly, for example: if (state is RunState.TERMINATED or state is ItemState.TERMINATED) and termination_reason:. Alternatively, compare the string values consistently: if state.value == 'TERMINATED' and termination_reason:.

Prompt for AI Agent
Review the code at the location below. A potential bug has been identified by an AI
agent.
Verify if this is a real issue. If it is, propose a fix; if not, explain why it's not
valid.

Location: src/aignostics/application/_utils.py#L188

Potential issue: In the `_format_status_string` function, the condition `state.value ==
RunState.TERMINATED` incorrectly compares a string value (e.g., `'TERMINATED'`) with an
enum member object (`RunState.TERMINATED`). Because `RunState` and `ItemState` inherit
from `(str, Enum)` and not the more modern `StrEnum`, this comparison will always
evaluate to `False`. As a result, the termination reason is never appended to the status
string for terminated runs or items. This prevents users from seeing why a process was
terminated, degrading the utility of status summaries.

Did we get this right? 👍 / 👎 to inform future reviews.

return f"{state.value} ({termination_reason})"
return f"{state.value}"

Expand Down Expand Up @@ -277,28 +278,31 @@ def _format_run_details(run: RunData) -> str:
return output


def retrieve_and_print_run_details(run_handle: Run, hide_platform_queue_position: bool) -> None:
def retrieve_and_print_run_details(
run_handle: Run, hide_platform_queue_position: bool, *, summarize: bool = False
) -> None:
"""Retrieve and print detailed information about a run.

Args:
run_handle (Run): The Run handle
hide_platform_queue_position (bool): Whether to hide platform-wide queue position
summarize (bool): If True, show only status summary (external ID, state, error message)

"""
run = run_handle.details(hide_platform_queue_position=hide_platform_queue_position)

run_details = _format_run_details(run)
output = f"[bold]Run Details for {run.run_id}[/bold]\n{'=' * 80}\n{run_details}\n\n[bold]Items:[/bold]"

console.print(output)
_retrieve_and_print_run_items(run_handle)
_retrieve_and_print_run_items(run_handle, summarize)


def _retrieve_and_print_run_items(run_handle: Run) -> None:
def _retrieve_and_print_run_items(run_handle: Run, summarize: bool = False) -> None:
"""Retrieve and print information about items in a run.

Args:
run_handle (Run): The Run handle
summarize (bool): If True, show only status summary without output artifacts
"""
results = run_handle.results()
if not results:
Expand All @@ -314,7 +318,7 @@ def _retrieve_and_print_run_items(run_handle: Run) -> None:
f" [bold]Custom Metadata:[/bold] {item.custom_metadata or 'None'}"
)

if item.output_artifacts:
if not summarize and item.output_artifacts:
artifacts_output = "\n [bold]Output Artifacts:[/bold]"
for artifact in item.output_artifacts:
artifacts_output += (
Expand Down
245 changes: 245 additions & 0 deletions tests/aignostics/application/utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -787,3 +787,248 @@ def test_queue_position_string_from_run_with_only_platform_position() -> None:
num_preceding_items_platform=15,
)
assert queue_position_string_from_run(run) == "15 items ahead across the entire platform"


# Tests for retrieve_and_print_run_details with summarize option


@pytest.mark.unit
@patch("aignostics.application._utils.console")
def test_retrieve_and_print_run_details_summarize_mode(mock_console: Mock) -> None:
"""Test summarize mode shows concise output with external ID, state, and errors."""
submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC)
terminated_at = datetime(2025, 1, 1, 13, 0, 0, tzinfo=UTC)

run_data = RunData(
run_id="run-summarize-test",
application_id="he-tme",
version_number="1.0.0",
state=RunState.TERMINATED,
termination_reason=RunTerminationReason.ALL_ITEMS_PROCESSED,
output=RunOutput.FULL,
statistics=RunItemStatistics(
item_count=2,
item_pending_count=0,
item_processing_count=0,
item_skipped_count=0,
item_succeeded_count=1,
item_user_error_count=1,
item_system_error_count=0,
),
submitted_at=submitted_at,
submitted_by="user@example.com",
terminated_at=terminated_at,
custom_metadata=None,
error_message=None,
error_code=None,
)

from aignx.codegen.models import ItemOutput

item_success = ItemResult(
item_id="item-001",
external_id="slide-success.svs",
state=ItemState.TERMINATED,
termination_reason=ItemTerminationReason.SUCCEEDED,
output=ItemOutput.FULL,
error_message=None,
error_code=None,
custom_metadata=None,
custom_metadata_checksum=None,
terminated_at=terminated_at,
output_artifacts=[],
)

item_error = ItemResult(
item_id="item-002",
external_id="slide-error.svs",
state=ItemState.TERMINATED,
termination_reason=ItemTerminationReason.USER_ERROR,
output=ItemOutput.NONE,
error_message="Invalid file format",
error_code="INVALID_FORMAT",
custom_metadata=None,
custom_metadata_checksum=None,
terminated_at=terminated_at,
output_artifacts=[],
)

mock_run = MagicMock()
mock_run.details.return_value = run_data
mock_run.results.return_value = [item_success, item_error]

retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False, summarize=True)

# Collect all printed output
all_output = " ".join(str(call) for call in mock_console.print.call_args_list)

# Verify run details header is present
assert "Run Details for run-summarize-test" in all_output
# Verify application info is present
assert "he-tme" in all_output
# Verify items are listed with external IDs
assert "slide-success.svs" in all_output
assert "slide-error.svs" in all_output
# Verify error message is shown for failed item
assert "Invalid file format" in all_output
# Verify artifact details are NOT shown (they are omitted in summary)
assert "Download URL" not in all_output
assert "Artifact ID" not in all_output


@pytest.mark.unit
@patch("aignostics.application._utils.console")
def test_retrieve_and_print_run_details_summarize_no_items(mock_console: Mock) -> None:
"""Test summarize mode with no items shows appropriate message."""
submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC)

run_data = RunData(
run_id="run-no-items",
application_id="test-app",
version_number="0.0.1",
state=RunState.PENDING,
termination_reason=None,
output=RunOutput.NONE,
statistics=RunItemStatistics(
item_count=0,
item_pending_count=0,
item_processing_count=0,
item_skipped_count=0,
item_succeeded_count=0,
item_user_error_count=0,
item_system_error_count=0,
),
submitted_at=submitted_at,
submitted_by="user@example.com",
terminated_at=None,
custom_metadata=None,
error_message=None,
error_code=None,
)

mock_run = MagicMock()
mock_run.details.return_value = run_data
mock_run.results.return_value = []

retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False, summarize=True)

all_output = " ".join(str(call) for call in mock_console.print.call_args_list)
assert "Run Details for run-no-items" in all_output
assert "No item results available" in all_output


@pytest.mark.unit
@patch("aignostics.application._utils.console")
def test_retrieve_and_print_run_details_summarize_with_run_error(mock_console: Mock) -> None:
"""Test summarize mode shows run-level errors."""
submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC)
terminated_at = datetime(2025, 1, 1, 12, 5, 0, tzinfo=UTC)

run_data = RunData(
run_id="run-with-error",
application_id="test-app",
version_number="0.0.1",
state=RunState.TERMINATED,
termination_reason=RunTerminationReason.CANCELED_BY_SYSTEM,
output=RunOutput.NONE,
statistics=RunItemStatistics(
item_count=1,
item_pending_count=0,
item_processing_count=0,
item_skipped_count=0,
item_succeeded_count=0,
item_user_error_count=0,
item_system_error_count=1,
),
submitted_at=submitted_at,
submitted_by="user@example.com",
terminated_at=terminated_at,
custom_metadata=None,
error_message="System error occurred",
error_code="SYS_ERROR",
)

mock_run = MagicMock()
mock_run.details.return_value = run_data
mock_run.results.return_value = []

retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False, summarize=True)

all_output = " ".join(str(call) for call in mock_console.print.call_args_list)
assert "System error occurred" in all_output
assert "SYS_ERROR" in all_output


@pytest.mark.unit
@patch("aignostics.application._utils.console")
def test_retrieve_and_print_run_details_default_is_detailed(mock_console: Mock) -> None:
"""Test that default mode (summarize=False) shows detailed output with artifacts."""
submitted_at = datetime(2025, 1, 1, 12, 0, 0, tzinfo=UTC)
terminated_at = datetime(2025, 1, 1, 13, 0, 0, tzinfo=UTC)

run_data = RunData(
run_id="run-detailed-test",
application_id="he-tme",
version_number="1.0.0",
state=RunState.TERMINATED,
termination_reason=RunTerminationReason.ALL_ITEMS_PROCESSED,
output=RunOutput.FULL,
statistics=RunItemStatistics(
item_count=1,
item_pending_count=0,
item_processing_count=0,
item_skipped_count=0,
item_succeeded_count=1,
item_user_error_count=0,
item_system_error_count=0,
),
submitted_at=submitted_at,
submitted_by="user@example.com",
terminated_at=terminated_at,
custom_metadata=None,
error_message=None,
error_code=None,
)

from aignx.codegen.models import ArtifactOutput, ArtifactState, ArtifactTerminationReason, ItemOutput

item_result = ItemResult(
item_id="item-123",
external_id="slide-001.svs",
state=ItemState.TERMINATED,
termination_reason=ItemTerminationReason.SUCCEEDED,
output=ItemOutput.FULL,
error_message=None,
error_code=None,
custom_metadata=None,
custom_metadata_checksum=None,
terminated_at=terminated_at,
output_artifacts=[
OutputArtifactElement(
output_artifact_id="artifact-abc",
name="result.parquet",
download_url="https://example.com/result.parquet",
metadata={"media_type": "application/vnd.apache.parquet"},
state=ArtifactState.TERMINATED,
termination_reason=ArtifactTerminationReason.SUCCEEDED,
output=ArtifactOutput.AVAILABLE,
error_code=None,
error_message=None,
)
],
)

mock_run = MagicMock()
mock_run.details.return_value = run_data
mock_run.results.return_value = [item_result]

# Call without summarize parameter (default is False)
retrieve_and_print_run_details(mock_run, hide_platform_queue_position=False)

all_output = " ".join(str(call) for call in mock_console.print.call_args_list)

# Verify detailed output shows "Run Details" not "Run Summary"
assert "Run Details for run-detailed-test" in all_output
# Verify artifact details ARE shown in detailed mode
assert "Download URL" in all_output
assert "Artifact ID" in all_output
Loading