diff --git a/.github/.env.base b/.github/.env.base
index a9b7038..ccd77c1 100644
--- a/.github/.env.base
+++ b/.github/.env.base
@@ -137,15 +137,6 @@ TEST_TIMEOUT_RACE_COVER=30m # Timeout for tests with race+coverage (m
TEST_TIMEOUT_UNIT=20m # Timeout for unit tests only
TEST_TIMEOUT_FUZZ=5m # Timeout for fuzz tests
-# ================================================================================================
-# ๐ก GO-BROADCAST CONFIGURATION
-# ================================================================================================
-
-# Automerge Labels Configuration
-# When using --automerge flag, these labels will be added to created PRs
-# Comma-separated list of labels to apply for automatic merging
-GO_BROADCAST_AUTOMERGE_LABELS=automerge
-
# ================================================================================================
# ๐ GO-COVERAGE SYSTEM CONFIGURATION
# ================================================================================================
@@ -244,8 +235,9 @@ REDIS_CACHE_FORCE_PULL=false # Force pull Redis images even when cache
# ๐ช MAGE-X CONFIGURATION
# ================================================================================================
-MAGE_X_VERSION=v1.8.14 # https://github.com/mrz1836/mage-x/releases
+MAGE_X_VERSION=v1.10.3 # https://github.com/mrz1836/mage-x/releases
MAGE_X_USE_LOCAL=false # Use local version for development
+MAGE_X_CI_SKIP_STEP_SUMMARY=true # Skip duplicate test results in step summary (already in test validation summary)
MAGE_X_AUTO_DISCOVER_BUILD_TAGS=true # Enable auto-discovery of build tags
MAGE_X_AUTO_DISCOVER_BUILD_TAGS_EXCLUDE=race,custom # Comma-separated list of tags to exclude
MAGE_X_FORMAT_EXCLUDE_PATHS=vendor,node_modules,.git,.idea # Format exclusion paths (comma-separated directories to exclude from formatting)
@@ -284,7 +276,7 @@ MAGE_X_YAMLFMT_VERSION=v0.20.0 # https://github.c
# MAGE_X_DOWNLOAD_TIMEOUT=5000
# MAGE_X_DOWNLOAD_USER_AGENT=MAGE-X-Agent
# MAGE_X_PARALLEL=3
-# MAGE_X_TEST_EXCLUDE_MODULES=module1,module2
+# MAGE_X_TEST_EXCLUDE_MODULES=module1,module2
# MAGE_X_TEST_RACE=false
# MAGE_X_VERBOSE=true
@@ -458,3 +450,53 @@ PR_MANAGEMENT_SIZE_XS_THRESHOLD=10
PR_MANAGEMENT_SIZE_S_THRESHOLD=50
PR_MANAGEMENT_SIZE_M_THRESHOLD=200
PR_MANAGEMENT_SIZE_L_THRESHOLD=500
+
+# ================================================================================================
+# ๐ก GO-BROADCAST CONFIGURATION & AI-POWERED TEXT GENERATION
+# ================================================================================================
+
+# Automerge Labels Configuration
+# When using --automerge flag, these labels will be added to created PRs
+# Comma-separated list of labels to apply for automatic merging
+GO_BROADCAST_AUTOMERGE_LABELS=automerge
+
+# AI generates intelligent PR descriptions and commit messages based on diff analysis.
+# Disabled by default. All AI failures fall back to static templates silently.
+# Uses Google Genkit SDK with support for Anthropic, OpenAI, and Google providers.
+
+# Master switch - enables AI infrastructure (disabled by default)
+GO_BROADCAST_AI_ENABLED=false
+
+# Granular controls (default to GO_BROADCAST_AI_ENABLED value)
+GO_BROADCAST_AI_PR_ENABLED= # Enable AI for PR body generation
+GO_BROADCAST_AI_COMMIT_ENABLED= # Enable AI for commit message generation
+
+# Provider: anthropic, openai, google
+GO_BROADCAST_AI_PROVIDER=anthropic
+
+# API key (or use provider-specific: ANTHROPIC_API_KEY, OPENAI_API_KEY, GEMINI_API_KEY)
+# DO NOT USE THIS IN PUBLIC REPOSITORIES, USE ENVIRONMENT SECRETS INSTEAD
+# GO_BROADCAST_AI_API_KEY=
+
+# Model override (uses provider defaults if empty)
+# anthropic: claude-sonnet-4-5-20250929 | openai: gpt-5.2 | google: gemini-3-pro-preview
+GO_BROADCAST_AI_MODEL=
+
+# Generation parameters
+GO_BROADCAST_AI_MAX_TOKENS=2000
+GO_BROADCAST_AI_TIMEOUT=30
+GO_BROADCAST_AI_TEMPERATURE=0.3
+
+# Diff truncation (prevents token limit issues)
+GO_BROADCAST_AI_DIFF_MAX_CHARS=4000
+GO_BROADCAST_AI_DIFF_MAX_LINES_PER_FILE=50
+
+# Response caching (reduces API calls for identical diffs across repos)
+GO_BROADCAST_AI_CACHE_ENABLED=true
+GO_BROADCAST_AI_CACHE_TTL=3600
+GO_BROADCAST_AI_CACHE_MAX_SIZE=1000
+
+# Retry settings (handles transient failures)
+GO_BROADCAST_AI_RETRY_MAX_ATTEMPTS=3
+GO_BROADCAST_AI_RETRY_INITIAL_DELAY=1
+GO_BROADCAST_AI_RETRY_MAX_DELAY=10
diff --git a/.github/actions/test-failure-detection/action.yml b/.github/actions/test-failure-detection/action.yml
deleted file mode 100644
index 3bbb0cb..0000000
--- a/.github/actions/test-failure-detection/action.yml
+++ /dev/null
@@ -1,541 +0,0 @@
-# ------------------------------------------------------------------------------------
-# Test Failure Detection (Composite Action)
-#
-# Purpose: Define and provide reusable test failure detection functions for
-# robust test output parsing across different formats and test types.
-#
-# This action provides sophisticated failure detection capabilities:
-# - JSON-based test output parsing (fast single-pass)
-# - Text-based failure detection with multiple patterns
-# - Detailed error capture and context preservation
-# - Fallback detection for edge cases
-#
-# Maintainer: @mrz1836
-#
-# ------------------------------------------------------------------------------------
-
-name: "Test Failure Detection"
-description: "Provides robust test failure detection functions for JSON and text output parsing"
-
-inputs:
- output-file:
- description: "Test output file to analyze"
- required: false
- default: "test-output.log"
- exit-code:
- description: "Test command exit code"
- required: false
- default: "0"
- mode:
- description: "Detection mode (json or text)"
- required: false
- default: "text"
- failures-file:
- description: "Output file for detected failures"
- required: false
- default: "test-failures.txt"
-
-outputs:
- failure-count:
- description: "Number of detected test failures"
- value: ${{ steps.detect-failures.outputs.failure-count }}
- has-failures:
- description: "Boolean indicating if failures were detected"
- value: ${{ steps.detect-failures.outputs.has-failures }}
- detailed-failures-file:
- description: "Path to detailed failures file"
- value: ${{ steps.detect-failures.outputs.detailed-failures-file }}
-
-runs:
- using: "composite"
- steps:
- - name: ๐ง Define failure detection functions
- shell: bash
- run: |
- # Define reusable function for robust test failure detection
- cat > test-failure-functions.sh << 'DETECTION_FUNCTIONS_EOF'
- #!/bin/bash
-
- # Robust test failure detection function
- detect_test_failures() {
- local output_file="$1"
- local exit_code="${2:-0}"
- local mode="${3:-text}"
- local failures_file="${4:-test-failures.txt}"
-
- echo "๐ Detecting test failures with exit code: $exit_code, mode: $mode"
-
- # Primary check: exit code indicates failure
- if [[ "$exit_code" -ne 0 ]]; then
- echo "โ Exit code $exit_code indicates test failure"
-
- if [[ -f "$output_file" ]]; then
- case "$mode" in
- "json")
- # Enhanced JSON-based detection
- detect_failures_from_json "$output_file" "$failures_file"
- ;;
- "text"|*)
- # Enhanced text-based detection
- detect_failures_from_text "$output_file" "$failures_file"
- ;;
- esac
-
- # Count detected failures
- if [[ -f "$failures_file" ]]; then
- DETECTED_FAILURES=$(wc -l < "$failures_file" 2>/dev/null || echo "0")
- echo "๐ Detected $DETECTED_FAILURES specific failures"
- return $DETECTED_FAILURES
- fi
- else
- echo "โ ๏ธ Output file '$output_file' not found, relying on exit code"
- echo "Exit code indicates failure but no output file found" > "$failures_file"
- return 1
- fi
- else
- echo "โ
Exit code 0 indicates success"
- touch "$failures_file" # Create empty failures file
- return 0
- fi
- }
-
- # Smart and efficient JSON failure detection with unique signatures
- detect_failures_from_json() {
- local json_file="$1"
- local failures_file="$2"
- local signatures_file="${failures_file%.txt}-signatures.json"
-
- echo "๐ Using smart JSON-based failure detection on $json_file"
-
- # Quick JSON validation (< 0.1s) - check if file contains JSON test output
- if ! grep -q '^{.*"Action"' "$json_file" 2>/dev/null; then
- echo "โ ๏ธ No JSON content detected, using text fallback"
- detect_failures_from_text "$json_file" "$failures_file"
- return
- fi
-
- echo "โ
JSON content detected, processing efficiently..."
-
- # Initialize JSON array for structured failures with signatures
- echo '[]' > "$signatures_file"
-
- # Define common test validation filter to avoid duplication
- local test_failure_filter='select(.Action == "fail" and .Test != null and .Test != "" and .Test != "null" and (.Test | test("^Test[A-Za-z].*")))'
-
- # Fast single-pass JSON extraction for test failures (< 1s for 10K lines)
- # Filter JSON lines and parse in one pass - eliminates 2-minute hang
- # Note: Line numbers aren't available in Go test JSON output
- # IMPORTANT: Only detect actual test function failures, not package/suite completion events
- grep '^{' "$json_file" 2>/dev/null | \
- jq -r "$test_failure_filter"' |
- "--- FAIL: \(.Test) (\(.Package))"' \
- 2>/dev/null > "$failures_file"
-
- # Create structured test failure entries with unique signatures
- if grep '^{' "$json_file" 2>/dev/null | jq -r "$test_failure_filter" 2>/dev/null | head -1 | grep -q .; then
- echo "๐ Creating structured test failure entries with enhanced output..."
-
- # First pass: Extract test failure basic info from Action == "fail" entries
- local temp_failures
- temp_failures=$(mktemp)
- grep '^{' "$json_file" 2>/dev/null | \
- jq -r "$test_failure_filter"' | {
- type: "test",
- package: .Package,
- test: (if (.Test and .Test != null and .Test != "null") then .Test else "unknown" end),
- signature: (.Package + ":" + (if (.Test and .Test != null and .Test != "null") then .Test else "unknown" end)),
- unique_id: (.Package + ":" + (if (.Test and .Test != null and .Test != "null") then .Test else "unknown" end) | gsub("[^a-zA-Z0-9_/.-]"; "_"))
- }' 2>/dev/null | jq -s '.' > "$temp_failures"
-
- # Second pass: Extract failure output from Action == "output" entries containing "--- FAIL:"
- local temp_outputs
- temp_outputs=$(mktemp)
- grep '^{' "$json_file" 2>/dev/null | \
- jq -r 'select(.Action == "output" and (.Output // "") | contains("--- FAIL:")) | {
- package: .Package,
- test: (if (.Test and .Test != null and .Test != "null") then .Test else "unknown" end),
- output: (.Output // ""),
- signature: (.Package + ":" + (if (.Test and .Test != null and .Test != "null") then .Test else "unknown" end))
- }' 2>/dev/null | jq -s '.' > "$temp_outputs"
-
- # Third pass: Merge failure info with outputs using signature as key
- jq -n --slurpfile failures "$temp_failures" --slurpfile outputs "$temp_outputs" '
- ($failures[0] // []) as $fail_list |
- ($outputs[0] // []) as $output_list |
- ($output_list | group_by(.signature) | map({key: .[0].signature, value: (map(.output) | join("\n"))})) as $output_map |
- $fail_list | map(. + {
- output: (($output_map | map(select(.key == .signature)) | .[0].value) // ""),
- line_number: null
- })' > "$signatures_file.tmp" 2>/dev/null && \
- mv "$signatures_file.tmp" "$signatures_file" || echo '[]' > "$signatures_file"
-
- # Cleanup temp files
- rm -f "$temp_failures" "$temp_outputs"
- fi
-
- # Also check for build failures which have FailedBuild field but no Test field
- local build_failures
- build_failures=$(grep '^{' "$json_file" 2>/dev/null | \
- jq -r 'select(.FailedBuild) | .Package // .ImportPath' 2>/dev/null | sort -u)
-
- if [[ -n "$build_failures" ]]; then
- echo "๐จ Processing build failures with signatures..."
-
- # Create temporary array for build failures
- echo '[]' > "${signatures_file}.build"
-
- # Extract all build-output entries once before the loop for better performance
- local all_build_outputs
- all_build_outputs=$(grep '^{' "$json_file" 2>/dev/null | \
- jq -r 'select(.Action == "build-output" and .ImportPath) |
- (.ImportPath | split(" ")[0]) + "\t" + .Output' 2>/dev/null)
-
- while IFS= read -r pkg; do
- if [[ -n "$pkg" ]]; then
- echo "--- BUILD FAILED: $pkg" >> "$failures_file"
-
- # Extract build error messages for this package from pre-extracted data
- local build_errors
- build_errors=$(echo "$all_build_outputs" | grep "^$pkg " | cut -f2 | \
- grep -E "^[^[:space:]]" | head -10) # Limit to first 10 error lines
-
- local error_output=""
- if [[ -n "$build_errors" ]]; then
- echo "$build_errors" | sed 's/^/ /' >> "$failures_file"
- error_output="$build_errors"
- else
- echo " Build failed (no detailed error available)" >> "$failures_file"
- error_output="Build failed (no detailed error available)"
- fi
-
- # Create structured build failure entry
- jq -n --arg pkg "$pkg" --arg errors "$error_output" '{
- type: "build",
- package: $pkg,
- test: "build_compilation",
- output: $errors,
- signature: ($pkg + ":build_compilation"),
- unique_id: ($pkg + ":build_compilation" | gsub("[^a-zA-Z0-9_/.-]"; "_"))
- }' >> "${signatures_file}.build" 2>/dev/null
- fi
- done <<< "$build_failures"
-
- # Merge build failures into main signatures file
- if [[ -s "${signatures_file}.build" ]]; then
- jq -s 'add' "$signatures_file" "${signatures_file}.build" > "${signatures_file}.tmp" 2>/dev/null && \
- mv "${signatures_file}.tmp" "$signatures_file" || echo '[]' > "$signatures_file"
- fi
- rm -f "${signatures_file}.build"
- fi
-
- local failure_count
- failure_count=$(wc -l < "$failures_file" 2>/dev/null | tr -d '\n\r' | xargs)
- [[ "$failure_count" =~ ^[0-9]+$ ]] || failure_count=0
-
- # Count distinct failures (test failures + build failures)
- local test_failure_count build_failure_count
- test_failure_count=$(grep -c "^--- FAIL:" "$failures_file" 2>/dev/null || echo "0")
- build_failure_count=$(grep -c "^--- BUILD FAILED:" "$failures_file" 2>/dev/null || echo "0")
-
- # Validate signatures file
- local unique_failure_count=0
- if [[ -s "$signatures_file" ]]; then
- unique_failure_count=$(jq 'length' "$signatures_file" 2>/dev/null || echo "0")
- echo "๐ Generated $unique_failure_count unique failure signatures"
- fi
-
- if [[ $test_failure_count -gt 0 ]] || [[ $build_failure_count -gt 0 ]]; then
- echo "โ
Found $test_failure_count test failures and $build_failure_count build failures in JSON output"
- echo "๐ Created $unique_failure_count unique signatures for deduplication"
- return 0
- else
- echo "โน๏ธ No failures detected in JSON output"
- return 0
- fi
- }
-
- # Enhanced text-based failure detection with signatures
- detect_failures_from_text() {
- local text_file="$1"
- local failures_file="$2"
- local detailed_failures_file="${failures_file%.txt}-detailed.txt"
- local signatures_file="${failures_file%.txt}-signatures.json"
-
- echo "๐ Using enhanced text-based failure detection on $text_file"
-
- # Initialize JSON array for structured failures with signatures
- echo '[]' > "$signatures_file"
-
- # Enhanced pattern matching for actual test failures only
- # Exclude standalone FAIL lines, package summaries, and exit status indicators
- local patterns=(
- '^---[[:space:]]*FAIL:[[:space:]]*[A-Za-z][A-Za-z0-9_]*'
- '^--[[:space:]]*FAIL:[[:space:]]*[A-Za-z][A-Za-z0-9_]*'
- '^\[?FAIL\]?[[:space:]]*[A-Za-z][A-Za-z0-9_]*'
- )
-
- local temp_failures=$(mktemp)
- local temp_detailed=$(mktemp)
- local found_any=false
-
- # First pass: Find all failure lines and capture context
- for pattern in "${patterns[@]}"; do
- if grep -E -A 15 "$pattern" "$text_file" >> "$temp_detailed" 2>/dev/null; then
- found_any=true
- # Also capture just the failure line for the summary
- grep -E "$pattern" "$text_file" >> "$temp_failures" 2>/dev/null || true
- fi
- done
-
- if [[ "$found_any" == "true" ]]; then
- # Process the detailed failures to create structured output
- echo "๐ Processing detailed failure output..."
-
- # Create a structured detailed failures file with error messages
- awk '
- BEGIN {
- current_test = ""
- capture_output = 0
- output_buffer = ""
- }
- /^(FAIL|---.*FAIL|--.*FAIL|\[?FAIL\]?)/ {
- # If we were capturing output, save it
- if (current_test != "" && output_buffer != "") {
- print "TEST:" current_test
- print "ERROR:" output_buffer
- print "---SEPARATOR---"
- }
-
- # Start new test capture
- current_test = $0
- output_buffer = ""
- capture_output = 1
- next
- }
- capture_output == 1 && /^[[:space:]]*$/ {
- # Empty line might end the error context
- if (length(output_buffer) > 100) capture_output = 0
- next
- }
- capture_output == 1 && !/^(PASS|ok |FAIL|---.*FAIL|--.*FAIL)/ {
- # Capture error output lines
- if (output_buffer == "") {
- output_buffer = $0
- } else {
- output_buffer = output_buffer "\n" $0
- }
- # Stop if we have captured enough context
- if (length(output_buffer) > 1500) capture_output = 0
- }
- /^(PASS|ok )/ && capture_output == 1 {
- # Another test started, stop capturing
- capture_output = 0
- }
- END {
- # Save the last test if we were capturing
- if (current_test != "" && output_buffer != "") {
- print "TEST:" current_test
- print "ERROR:" output_buffer
- print "---SEPARATOR---"
- }
- }
- ' "$temp_detailed" > "$detailed_failures_file" 2>/dev/null || true
-
- # Remove duplicates and sort for the summary file
- sort -u "$temp_failures" > "$failures_file"
-
- # Generate signatures from text failures
- echo "๐ Generating failure signatures from text output..."
- local temp_signatures=$(mktemp)
- echo '[]' > "$temp_signatures"
-
- while IFS= read -r failure_line; do
- # Skip empty lines and generic failures
- if [[ -n "$failure_line" && "$failure_line" != *"Generic test failure"* ]]; then
- # Skip standalone FAIL lines, package summaries, and exit status indicators
- if [[ "$failure_line" =~ ^(FAIL[[:space:]]*$|FAIL[[:space:]]+github\.com|exit[[:space:]]+status) ]]; then
- continue
- fi
-
- # Extract package and test info from failure line
- # Pattern: --- FAIL: TestName (package.name)
- if [[ "$failure_line" =~ "FAIL:" ]]; then
- local test_name=$(echo "$failure_line" | sed -E 's/^.*FAIL: ([^ ]+).*$/\1/' | head -c 200)
- local package_name=$(echo "$failure_line" | sed -E 's/^.*\(([^):]+)[^)]*\).*$/\1/' | head -c 200)
-
- # Handle build failures
- if [[ "$failure_line" =~ "BUILD FAILED:" ]]; then
- package_name=$(echo "$failure_line" | sed 's/^--- BUILD FAILED: //' | head -c 200)
- test_name="build_compilation"
-
- jq -n --arg pkg "$package_name" --arg test "$test_name" --arg output "$failure_line" '{
- type: "build",
- package: $pkg,
- test: $test,
- output: $output,
- line_number: null,
- signature: ($pkg + ":" + $test),
- unique_id: (($pkg + ":" + $test) | gsub("[^a-zA-Z0-9_/.-]"; "_"))
- }' >> "$temp_signatures.items"
- else
- # Handle fuzz test failures specifically
- if [[ "$test_name" =~ ^Fuzz[A-Za-z0-9_]+ ]]; then
- # For fuzz tests, create a more specific signature to avoid duplicates
- # Only count the main fuzz test, not nested failures
- if [[ ! "$failure_line" =~ "#[0-9]+" ]]; then
- jq -n --arg pkg "$package_name" --arg test "$test_name" --arg output "$failure_line" '{
- type: "fuzz_test",
- package: $pkg,
- test: $test,
- output: $output,
- line_number: null,
- signature: ($pkg + ":" + $test + ":fuzz"),
- unique_id: (($pkg + ":" + $test + ":fuzz") | gsub("[^a-zA-Z0-9_/.-]"; "_"))
- }' >> "$temp_signatures.items"
- fi
- else
- # Regular test failure
- if [[ -n "$test_name" && -n "$package_name" && "$test_name" != "$package_name" ]]; then
- jq -n --arg pkg "$package_name" --arg test "$test_name" --arg output "$failure_line" '{
- type: "test",
- package: $pkg,
- test: $test,
- output: $output,
- line_number: null,
- signature: ($pkg + ":" + $test),
- unique_id: (($pkg + ":" + $test) | gsub("[^a-zA-Z0-9_/.-]"; "_"))
- }' >> "$temp_signatures.items"
- fi
- fi
- fi
- fi
- fi
- done < "$failures_file"
-
- # Combine signatures into array
- if [[ -f "$temp_signatures.items" ]]; then
- jq -s '.' "$temp_signatures.items" > "$signatures_file" 2>/dev/null || echo '[]' > "$signatures_file"
- fi
- rm -f "$temp_signatures" "$temp_signatures.items"
-
- local unique_signature_count=$(jq 'length' "$signatures_file" 2>/dev/null || echo "0")
- echo "โ
Text parsing found $(wc -l < "$failures_file") failures with $unique_signature_count unique signatures"
-
- # Clean up
- rm -f "$temp_failures" "$temp_detailed"
- return 0
- fi
-
- # Fallback: look for any error indicators
- echo "โ ๏ธ Standard failure patterns not found, checking for error indicators"
- local error_patterns=(
- 'panic:'
- 'fatal error:'
- 'build failed'
- 'compilation error'
- 'timeout'
- 'killed'
- 'error:'
- )
-
- for pattern in "${error_patterns[@]}"; do
- if grep -i -A 5 "$pattern" "$text_file" >> "$temp_detailed" 2>/dev/null; then
- found_any=true
- grep -i "$pattern" "$text_file" >> "$temp_failures" 2>/dev/null || true
- fi
- done
-
- if [[ "$found_any" == "true" ]]; then
- sort -u "$temp_failures" > "$failures_file"
- cp "$temp_detailed" "$detailed_failures_file" 2>/dev/null || true
- echo "โ ๏ธ Found $(wc -l < "$failures_file") error indicators (not standard test failures)"
- rm -f "$temp_failures" "$temp_detailed"
- return 0
- fi
-
- rm -f "$temp_failures" "$temp_detailed"
-
- # If exit code indicated failure but no patterns found, create generic entry
- if [[ "${TEST_EXIT_CODE:-0}" -ne 0 ]]; then
- echo "Generic test failure (exit code ${TEST_EXIT_CODE:-0}) - pattern detection failed" > "$failures_file"
- echo "โ ๏ธ Exit code indicates failure but no recognizable patterns found"
- return 1
- else
- touch "$failures_file" # Create empty failures file
- echo "โ
No failures detected and exit code is 0"
- return 0
- fi
- }
-
- # Utility function for safe numeric validation
- sanitize_numeric() {
- local value="$1"
- value=$(echo "$value" | tr -d '\n\r' | xargs)
- if [[ "$value" =~ ^[0-9]+$ ]]; then
- echo "$value"
- else
- echo "0"
- fi
- }
-
- # Export functions for use in other steps
- export -f detect_test_failures
- export -f detect_failures_from_json
- export -f detect_failures_from_text
- export -f sanitize_numeric
- DETECTION_FUNCTIONS_EOF
-
- # Source the functions to make them available
- source test-failure-functions.sh
- echo "โ
Failure detection functions defined and loaded"
-
- - name: ๐ Detect test failures
- id: detect-failures
- shell: bash
- env:
- INPUT_OUTPUT_FILE: ${{ inputs.output-file }}
- INPUT_EXIT_CODE: ${{ inputs.exit-code }}
- INPUT_MODE: ${{ inputs.mode }}
- INPUT_FAILURES_FILE: ${{ inputs.failures-file }}
- run: |
- # Source the functions
- source test-failure-functions.sh
-
- # Run detection with provided inputs
- OUTPUT_FILE="$INPUT_OUTPUT_FILE"
- EXIT_CODE="$INPUT_EXIT_CODE"
- MODE="$INPUT_MODE"
- FAILURES_FILE="$INPUT_FAILURES_FILE"
-
- # Detect failures
- detect_test_failures "$OUTPUT_FILE" "$EXIT_CODE" "$MODE" "$FAILURES_FILE"
- detection_result=$?
-
- # Calculate outputs
- if [[ -f "$FAILURES_FILE" ]]; then
- FAILURE_COUNT=$(wc -l < "$FAILURES_FILE" 2>/dev/null | tr -d '\n\r' | xargs)
- FAILURE_COUNT=$(sanitize_numeric "$FAILURE_COUNT")
- else
- FAILURE_COUNT=0
- fi
-
- HAS_FAILURES="false"
- if [[ "$FAILURE_COUNT" -gt 0 ]] || [[ "$detection_result" -ne 0 ]]; then
- HAS_FAILURES="true"
- fi
-
- DETAILED_FILE="${FAILURES_FILE%.txt}-detailed.txt"
-
- # Set outputs
- echo "failure-count=$FAILURE_COUNT" >> $GITHUB_OUTPUT
- echo "has-failures=$HAS_FAILURES" >> $GITHUB_OUTPUT
- echo "detailed-failures-file=$DETAILED_FILE" >> $GITHUB_OUTPUT
-
- echo "๐ Failure detection results:"
- echo " โข Failure count: $FAILURE_COUNT"
- echo " โข Has failures: $HAS_FAILURES"
- echo " โข Detailed file: $DETAILED_FILE"
-
- # Always exit with 0 to not fail the workflow step
- # The outputs contain the failure information for downstream steps
- exit 0
diff --git a/.github/actions/test-statistics/action.yml b/.github/actions/test-statistics/action.yml
index 827b29a..d4563a9 100644
--- a/.github/actions/test-statistics/action.yml
+++ b/.github/actions/test-statistics/action.yml
@@ -1,7 +1,22 @@
# ------------------------------------------------------------------------------------
# Test Statistics Collection (Composite Action)
#
-# Purpose: Calculate and collect comprehensive test statistics including test counts,
+# โ ๏ธ DEPRECATED: This action is deprecated in favor of magex native CI mode.
+#
+# magex CI mode automatically produces .mage-x/ci-results.jsonl which includes:
+# - Test pass/fail counts
+# - Duration metrics
+# - Failure details with file:line locations
+# - Summary statistics
+#
+# Usage: Simply run `magex test:*` commands - CI mode auto-detects GitHub Actions.
+# The validation workflow reads results from ci-results.jsonl instead of this action.
+#
+# This action is kept for backwards compatibility but will be removed in a future release.
+#
+# ---------------------------------------------------------------------------------
+#
+# Original Purpose: Calculate and collect comprehensive test statistics including test counts,
# failure metrics, performance data, and lines of code metrics.
#
# This action provides standardized statistics collection:
diff --git a/.github/scripts/parse-test-label.sh b/.github/scripts/parse-test-label.sh
new file mode 100644
index 0000000..f536050
--- /dev/null
+++ b/.github/scripts/parse-test-label.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+# ------------------------------------------------------------------------------------
+# Parse Test Label Script
+#
+# Helper function to generate human-readable test labels from artifact names.
+# Sourced by workflow steps that need consistent test labeling.
+#
+# Usage: parse_test_label "artifact-name" "jsonl-filename"
+# Output: "Unit Tests (Ubuntu, Go 1.22)" or similar
+# ------------------------------------------------------------------------------------
+
+parse_test_label() {
+ local artifact_name="$1"
+ local jsonl_name="$2"
+
+ # Determine test type from artifact prefix or JSONL name
+ local test_type="Tests"
+ if [[ "$artifact_name" == test-results-fuzz-* ]] || [[ "$jsonl_name" == *fuzz* ]]; then
+ test_type="Fuzz Tests"
+ elif [[ "$artifact_name" == ci-results-* ]]; then
+ test_type="Unit Tests"
+ fi
+
+ # Extract OS from artifact name
+ local os_name=""
+ if [[ "$artifact_name" =~ ubuntu ]]; then
+ os_name="Ubuntu"
+ elif [[ "$artifact_name" =~ windows ]]; then
+ os_name="Windows"
+ elif [[ "$artifact_name" =~ macos ]]; then
+ os_name="macOS"
+ fi
+
+ # Extract Go version (last segment like "1.22", "1.24.x", or "go1.22")
+ local go_version=""
+ go_version=$(echo "$artifact_name" | grep -oE '[0-9]+\.[0-9]+(\.[x0-9]+)?' | tail -1 || echo "")
+
+ # Build label
+ if [[ -n "$os_name" && -n "$go_version" ]]; then
+ echo "$test_type ($os_name, Go $go_version)"
+ elif [[ -n "$os_name" ]]; then
+ echo "$test_type ($os_name)"
+ elif [[ -n "$go_version" ]]; then
+ echo "$test_type (Go $go_version)"
+ else
+ echo "$test_type"
+ fi
+}
+
+# Copy CI artifact file with artifact directory prefix for unique naming
+# Usage: copy_ci_artifact "source_file" ["ci"|"fuzz"]
+# Example: copy_ci_artifact "/path/to/ci-artifacts/artifact-name/.mage-x/ci-results.jsonl" "ci"
+copy_ci_artifact() {
+ local file="$1"
+ local prefix="${2:-ci}"
+
+ # Validate input file exists
+ if [[ ! -f "$file" ]]; then
+ echo "โ ๏ธ Warning: File not found: $file" >&2
+ return 1
+ fi
+
+ # Extract artifact directory name for unique naming
+ local parent_dir=$(dirname "$file")
+ local parent_basename=$(basename "$parent_dir")
+ local artifact_dir
+
+ # Detect which structure we have by checking parent directory
+ # Expected: *-artifacts/ARTIFACT_NAME/.mage-x/ci-results.jsonl
+ if [[ "$parent_basename" == ".mage-x" ]]; then
+ # Expected structure: use grandparent as artifact dir
+ artifact_dir=$(dirname "$parent_dir" | xargs basename)
+ else
+ # Fallback: parent is the artifact dir (not grandparent)
+ echo " Warning: Unexpected artifact structure for: $file" >&2
+ artifact_dir="$parent_basename"
+ fi
+ local filename=$(basename "$file")
+ local dest="${prefix}-${artifact_dir}-${filename}"
+
+ echo "Copying $prefix results $file to ./$dest"
+ if ! cp "$file" "./$dest"; then
+ echo "โ ๏ธ Warning: Failed to copy $file to $dest" >&2
+ return 1
+ fi
+}
diff --git a/.github/workflows/fortress-completion-statistics.yml b/.github/workflows/fortress-completion-statistics.yml
index 64f7608..1414cd3 100644
--- a/.github/workflows/fortress-completion-statistics.yml
+++ b/.github/workflows/fortress-completion-statistics.yml
@@ -99,18 +99,6 @@ jobs:
# --------------------------------------------------------------------
# Download specific artifacts needed for statistics processing
# --------------------------------------------------------------------
- - name: ๐ฅ Download test statistics
- if: always() && env.ENABLE_GO_TESTS == 'true'
- uses: ./.github/actions/download-artifact-resilient
- with:
- pattern: "test-stats-*"
- path: ./artifacts/
- merge-multiple: true
- max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
- retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
- timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
- continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
-
- name: ๐ฅ Download benchmark statistics
if: always()
uses: ./.github/actions/download-artifact-resilient
@@ -175,6 +163,16 @@ jobs:
echo "โ ๏ธ No artifacts directory found"
fi
+ # --------------------------------------------------------------------
+ # Setup MAGE-X for LOC metrics
+ # --------------------------------------------------------------------
+ - name: ๐ง Setup MAGE-X for LOC metrics
+ uses: ./.github/actions/setup-magex
+ with:
+ magex-version: ${{ env.MAGE_X_VERSION }}
+ runner-os: ${{ runner.os }}
+ use-local: ${{ env.MAGE_X_USE_LOCAL }}
+
# --------------------------------------------------------------------
# Initialize statistics report section
# --------------------------------------------------------------------
@@ -533,69 +531,67 @@ jobs:
- name: ๐ Generate Lines of Code Summary
id: process-loc
run: |
- # Try to get LOC from stats files first
+ echo "๐ Running magex metrics:loc json..."
+
+ # Run magex metrics:loc json and capture output
+ LOC_OUTPUT=$(magex metrics:loc json 2>&1 || true)
LOC_FOUND=false
- TEST_FILES_COUNT=""
- GO_FILES_COUNT=""
- TOTAL_LOC=""
- LOC_DATE=""
-
- if compgen -G "*-stats-*.json" >/dev/null 2>&1; then
- echo "๐ Looking for LOC data in stats files..."
- for stats_file in *-stats-*.json; do
- if [ -f "$stats_file" ]; then
- echo "๐ Checking $stats_file for LOC data..."
- TEST_FILES_COUNT=$(jq -r '.loc_test_files // "null"' "$stats_file")
- GO_FILES_COUNT=$(jq -r '.loc_go_files // "null"' "$stats_file")
- TOTAL_LOC=$(jq -r '.loc_total // "null"' "$stats_file")
-
- echo " - Test Files: '$TEST_FILES_COUNT'"
- echo " - Go Files: '$GO_FILES_COUNT'"
- echo " - Total: '$TOTAL_LOC'"
-
- # Check if we have valid LOC data (not null and not empty string)
- if [[ "$TEST_FILES_COUNT" != "null" ]] && [[ "$TEST_FILES_COUNT" != "" ]] && \
- [[ "$GO_FILES_COUNT" != "null" ]] && [[ "$GO_FILES_COUNT" != "" ]] && \
- [[ "$TOTAL_LOC" != "null" ]] && [[ "$TOTAL_LOC" != "" ]]; then
- LOC_DATE=$(jq -r '.loc_date // "unknown"' "$stats_file")
- LOC_FOUND=true
- echo "โ
Found valid LOC data in $stats_file: $TOTAL_LOC total lines"
- break
- fi
- fi
- done
+
+ if [[ -n "$LOC_OUTPUT" ]]; then
+ echo "๐ magex metrics:loc json output:"
+ echo "$LOC_OUTPUT"
+
+ # Parse JSON output using jq
+ TEST_FILES_LOC=$(echo "$LOC_OUTPUT" | jq -r '.test_files_loc // empty')
+ TEST_FILES_COUNT=$(echo "$LOC_OUTPUT" | jq -r '.test_files_count // empty')
+ GO_FILES_LOC=$(echo "$LOC_OUTPUT" | jq -r '.go_files_loc // empty')
+ GO_FILES_COUNT=$(echo "$LOC_OUTPUT" | jq -r '.go_files_count // empty')
+ TOTAL_LOC=$(echo "$LOC_OUTPUT" | jq -r '.total_loc // empty')
+ TOTAL_FILES_COUNT=$(echo "$LOC_OUTPUT" | jq -r '.total_files_count // empty')
+ LOC_DATE=$(echo "$LOC_OUTPUT" | jq -r '.date // empty')
+
+ echo " - Test Files LOC: '$TEST_FILES_LOC' (count: $TEST_FILES_COUNT)"
+ echo " - Go Files LOC: '$GO_FILES_LOC' (count: $GO_FILES_COUNT)"
+ echo " - Total LOC: '$TOTAL_LOC' (files: $TOTAL_FILES_COUNT)"
+ echo " - Date: '$LOC_DATE'"
+
+ # Check if we have valid LOC data
+ if [[ -n "$TEST_FILES_LOC" ]] && [[ -n "$GO_FILES_LOC" ]] && [[ -n "$TOTAL_LOC" ]]; then
+ LOC_FOUND=true
+ echo "โ
Successfully parsed LOC JSON data"
+ fi
+ else
+ echo "โ ๏ธ No output from magex metrics:loc json"
fi
- # Display LOC section if we have data
- if [[ "$LOC_FOUND" == "true" ]] && [[ -n "$TOTAL_LOC" ]]; then
- # Ensure no empty values in table
- DISPLAY_TEST_FILES="${TEST_FILES_COUNT:-N/A}"
- DISPLAY_GO_FILES="${GO_FILES_COUNT:-N/A}"
+ # Display LOC section
+ if [[ "$LOC_FOUND" == "true" ]]; then
+ # Format numbers with commas for display
+ DISPLAY_TEST_LOC=$(LC_NUMERIC=en_US.UTF-8 printf "%'d" "${TEST_FILES_LOC:-0}")
+ DISPLAY_TEST_COUNT="${TEST_FILES_COUNT:-N/A}"
+ DISPLAY_GO_LOC=$(LC_NUMERIC=en_US.UTF-8 printf "%'d" "${GO_FILES_LOC:-0}")
+ DISPLAY_GO_COUNT="${GO_FILES_COUNT:-N/A}"
+ DISPLAY_TOTAL_LOC=$(LC_NUMERIC=en_US.UTF-8 printf "%'d" "${TOTAL_LOC:-0}")
+ DISPLAY_TOTAL_FILES="${TOTAL_FILES_COUNT:-N/A}"
DISPLAY_LOC_DATE="${LOC_DATE:-N/A}"
- DISPLAY_TOTAL="${TOTAL_LOC:-N/A}"
-
- # Double-check for empty strings and replace with N/A
- [[ -z "$DISPLAY_TEST_FILES" ]] && DISPLAY_TEST_FILES="N/A"
- [[ -z "$DISPLAY_GO_FILES" ]] && DISPLAY_GO_FILES="N/A"
- [[ -z "$DISPLAY_LOC_DATE" ]] && DISPLAY_LOC_DATE="N/A"
- [[ -z "$DISPLAY_TOTAL" ]] && DISPLAY_TOTAL="N/A"
{
echo ""
echo "
"
echo ""
echo "### ๐ Lines of Code Summary"
- echo "| Type | Total Lines | Date |"
- echo "|------|-------------|------|"
- echo "| Test Files | $DISPLAY_TEST_FILES | $DISPLAY_LOC_DATE |"
- echo "| Go Files | $DISPLAY_GO_FILES | $DISPLAY_LOC_DATE |"
+ echo "| Type | Lines of Code | Files | Date |"
+ echo "|------|---------------|-------|------|"
+ echo "| Test Files | $DISPLAY_TEST_LOC | $DISPLAY_TEST_COUNT | $DISPLAY_LOC_DATE |"
+ echo "| Go Files | $DISPLAY_GO_LOC | $DISPLAY_GO_COUNT | $DISPLAY_LOC_DATE |"
+ echo "| **Total** | **$DISPLAY_TOTAL_LOC** | **$DISPLAY_TOTAL_FILES** | |"
echo ""
- echo "**Total lines of code: $DISPLAY_TOTAL**"
+ echo "
"
} >> statistics-section.md
- echo "โ
LOC section added to report with values: Test=$DISPLAY_TEST_FILES, Go=$DISPLAY_GO_FILES, Total=$DISPLAY_TOTAL"
+ echo "โ
LOC section added: Test=$DISPLAY_TEST_LOC ($DISPLAY_TEST_COUNT files), Go=$DISPLAY_GO_LOC ($DISPLAY_GO_COUNT files), Total=$DISPLAY_TOTAL_LOC ($DISPLAY_TOTAL_FILES files)"
else
- echo "โ ๏ธ No valid LOC data available to display"
+ echo "โ ๏ธ Could not collect LOC data"
{
echo ""
echo "
"
@@ -604,7 +600,9 @@ jobs:
echo "| Status | Details |"
echo "|--------|---------|"
echo "| **Lines of Code** | โ Data not available |"
- echo "| **Reason** | magex metrics:loc not executed or parsing failed |"
+ echo "| **Reason** | magex metrics:loc json command failed or produced unexpected output |"
+ echo ""
+ echo "
"
} >> statistics-section.md
fi
diff --git a/.github/workflows/fortress-completion-tests.yml b/.github/workflows/fortress-completion-tests.yml
index 9d16d07..198edd7 100644
--- a/.github/workflows/fortress-completion-tests.yml
+++ b/.github/workflows/fortress-completion-tests.yml
@@ -77,18 +77,6 @@ jobs:
# --------------------------------------------------------------------
# Download specific artifacts needed for test analysis
# --------------------------------------------------------------------
- - name: ๐ฅ Download test statistics
- if: always() && env.ENABLE_GO_TESTS == 'true'
- uses: ./.github/actions/download-artifact-resilient
- with:
- pattern: "test-stats-*"
- path: ./artifacts/
- merge-multiple: true
- max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
- retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
- timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
- continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
-
- name: ๐ฅ Download benchmark statistics
if: always()
uses: ./.github/actions/download-artifact-resilient
@@ -113,11 +101,11 @@ jobs:
timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
- - name: ๐ฅ Download test failure artifacts
- if: always() && env.ENABLE_GO_TESTS == 'true'
+ - name: ๐ฅ Download fuzz test failure artifacts
+ if: always() && env.ENABLE_GO_TESTS == 'true' && env.ENABLE_FUZZ_TESTING == 'true'
uses: ./.github/actions/download-artifact-resilient
with:
- pattern: "test-results-unit-*"
+ pattern: "test-results-fuzz-*"
path: ./test-artifacts/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
@@ -125,12 +113,12 @@ jobs:
timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
- - name: ๐ฅ Download fuzz test failure artifacts
- if: always() && env.ENABLE_GO_TESTS == 'true' && env.ENABLE_FUZZ_TESTING == 'true'
+ - name: ๐ฅ Download CI results (native mode)
+ if: always() && env.ENABLE_GO_TESTS == 'true'
uses: ./.github/actions/download-artifact-resilient
with:
- pattern: "test-results-fuzz-*"
- path: ./test-artifacts/
+ pattern: "ci-results-*"
+ path: ./ci-artifacts/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
@@ -142,7 +130,16 @@ jobs:
run: |
echo "๐๏ธ Flattening downloaded artifacts..."
- # Process stats artifacts
+ # Source shared helper functions for artifact processing
+ source .github/scripts/parse-test-label.sh || { echo "โ Failed to source parse-test-label.sh"; exit 1; }
+
+ # Verify critical function is available
+ if ! type copy_ci_artifact &>/dev/null; then
+ echo "โ Error: copy_ci_artifact function not found after sourcing"
+ exit 1
+ fi
+
+ # Process stats artifacts (bench-stats, cache-stats JSON files)
if [ -d "./artifacts/" ]; then
find ./artifacts/ -name "*.json" -type f | while read -r file; do
filename=$(basename "$file")
@@ -152,22 +149,29 @@ jobs:
echo "๐ Available stats files:"
ls -la *-stats-*.json 2>/dev/null || echo "No stats files found"
else
- echo "โ ๏ธ No artifacts directory found"
+ echo "โน๏ธ No artifacts directory found"
+ fi
+
+ # Process CI results from ci-artifacts (unit tests)
+ if [ -d "./ci-artifacts/" ]; then
+ echo "๐ Processing unit test CI results..."
+ while IFS= read -r -d '' file; do
+ copy_ci_artifact "$file" "ci" || true
+ done < <(find ./ci-artifacts/ -name "*.jsonl" -type f -print0 2>/dev/null)
fi
- # Process test failure artifacts
+ # Process CI results from test-artifacts (fuzz tests)
if [ -d "./test-artifacts/" ]; then
- find ./test-artifacts/ -name "*.json" -type f | while read -r file; do
- filename=$(basename "$file")
- echo "Moving test artifact $file to ./$filename"
- cp "$file" "./$filename"
- done
- echo "๐ Available test artifacts:"
- ls -la test-failures*.json test-results*.json 2>/dev/null || echo "No test artifacts found"
- else
- echo "โ ๏ธ No test-artifacts directory found"
+ echo "๐ Processing fuzz test CI results..."
+ while IFS= read -r -d '' file; do
+ copy_ci_artifact "$file" "ci" || true
+ done < <(find ./test-artifacts/ -name "*.jsonl" -type f -print0 2>/dev/null)
fi
+ # Show all available JSONL files
+ echo "๐ Available CI results JSONL files:"
+ ls -la ci-*.jsonl 2>/dev/null || echo "No CI results JSONL files found"
+
# --------------------------------------------------------------------
# Initialize test analysis section
# --------------------------------------------------------------------
@@ -181,60 +185,71 @@ jobs:
- name: ๐งช Process Test Statistics
id: process-tests
run: |
- # Process test statistics if available
- if compgen -G "test-stats-*.json" >/dev/null 2>&1; then
+ # Source shared helper function for generating test labels
+ source .github/scripts/parse-test-label.sh || { echo "โ Failed to source parse-test-label.sh"; exit 1; }
+
+ # Enable nullglob so "for f in *.jsonl" loops safely skip when no files match
+ # (prevents iterating with literal pattern string "ci-*.jsonl")
+ shopt -s nullglob
+
+ # Initialize totals for summary
+ TOTAL_TESTS=0
+ TOTAL_FAILURES=0
+ TOTAL_PASSED=0
+ TOTAL_SKIPPED=0
+ SUITE_COUNT=0
+ HAS_DATA=false
+
+ # Check for native CI mode JSONL files first (preferred)
+ if compgen -G "ci-*.jsonl" >/dev/null 2>&1; then
+ echo "๐ Processing native CI mode JSONL files..."
+ HAS_DATA=true
+
{
echo ""
echo ""
echo "### ๐งช Test Results Summary"
- echo "| Test Suite | Mode | Duration | Tests | Failed | Packages | Status | Race | Coverage |"
- echo "|------------|------|----------|-------|--------|----------|--------|------|----------|"
+ echo "| Test Suite | Duration | Tests | Runs | Passed | Failed | Skipped | Status |"
+ echo "|------------|----------|-------|------|--------|--------|---------|--------|"
} >> tests-section.md
- # Initialize totals for summary
- TOTAL_TESTS=0
- TOTAL_FAILURES=0
- TOTAL_AFFECTED_PACKAGES=0
- SUITE_COUNT=0
-
- for stats_file in test-stats-*.json; do
- if [ -f "$stats_file" ]; then
- NAME=$(jq -r '.name' "$stats_file")
- DURATION=$(jq -r '.duration_seconds' "$stats_file")
- TEST_COUNT=$(jq -r '.test_count' "$stats_file")
- STATUS=$(jq -r '.status' "$stats_file")
- RACE_ENABLED=$(jq -r '.race_enabled' "$stats_file")
- COVERAGE_ENABLED=$(jq -r '.coverage_enabled' "$stats_file")
-
- # New enhanced fields
- TEST_MODE=$(jq -r '.test_mode // "unknown"' "$stats_file")
- SUITE_FAILURES=$(jq -r '.total_failures // 0' "$stats_file")
- AFFECTED_PACKAGES=$(jq -r '.affected_packages // 0' "$stats_file")
-
- DURATION_MIN=$((DURATION / 60))
- DURATION_SEC=$((DURATION % 60))
-
- COVERAGE_ICON=$([[ "$COVERAGE_ENABLED" == "true" ]] && echo "โ
" || echo "โ")
- RACE_ICON=$([[ "$RACE_ENABLED" == "true" ]] && echo "โ
" || echo "โ")
- STATUS_ICON=$([[ "$STATUS" == "success" ]] && echo "โ
" || echo "โ")
-
- # Show package count or dash
- PACKAGE_DISPLAY=$([[ "$AFFECTED_PACKAGES" -gt 0 ]] && echo "$AFFECTED_PACKAGES" || echo "-")
-
- echo "| $NAME | $TEST_MODE | ${DURATION_MIN}m ${DURATION_SEC}s | $TEST_COUNT | $SUITE_FAILURES | $PACKAGE_DISPLAY | $STATUS_ICON | $RACE_ICON | $COVERAGE_ICON |" >> tests-section.md
-
- # Accumulate totals
- TOTAL_TESTS=$((TOTAL_TESTS + TEST_COUNT))
- TOTAL_FAILURES=$((TOTAL_FAILURES + SUITE_FAILURES))
- TOTAL_AFFECTED_PACKAGES=$((TOTAL_AFFECTED_PACKAGES + AFFECTED_PACKAGES))
- SUITE_COUNT=$((SUITE_COUNT + 1))
+ # Process each JSONL file
+ for jsonl_file in ci-*.jsonl; do
+ if [ -f "$jsonl_file" ]; then
+ # Extract artifact name from filename (ci-ARTIFACT_NAME-ci-results.jsonl)
+ ARTIFACT_NAME=$(echo "$jsonl_file" | sed 's/^ci-//' | sed 's/-ci-results\.jsonl$//')
+ SUITE_LABEL=$(parse_test_label "$ARTIFACT_NAME")
+
+ # Extract summary line
+ SUMMARY=$(grep '"type":"summary"' "$jsonl_file" 2>/dev/null | head -1 || echo "")
+
+ if [[ -n "$SUMMARY" ]]; then
+ STATUS=$(echo "$SUMMARY" | jq -r '.summary.status // "unknown"')
+ PASSED=$(echo "$SUMMARY" | jq -r '.summary.passed // 0')
+ FAILED=$(echo "$SUMMARY" | jq -r '.summary.failed // 0')
+ SKIPPED=$(echo "$SUMMARY" | jq -r '.summary.skipped // 0')
+ TOTAL=$(echo "$SUMMARY" | jq -r '.summary.total // 0')
+ UNIQUE=$(echo "$SUMMARY" | jq -r '.summary.unique_total // .summary.total // 0')
+ DURATION=$(echo "$SUMMARY" | jq -r '.summary.duration // "0s"')
+
+ STATUS_ICON=$([[ "$STATUS" == "passed" ]] && echo "โ
" || echo "โ")
+
+ echo "| $SUITE_LABEL | $DURATION | $UNIQUE | $TOTAL | $PASSED | $FAILED | $SKIPPED | $STATUS_ICON |" >> tests-section.md
+
+ # Accumulate totals (use unique for primary test count)
+ TOTAL_TESTS=$((TOTAL_TESTS + UNIQUE))
+ TOTAL_PASSED=$((TOTAL_PASSED + PASSED))
+ TOTAL_FAILURES=$((TOTAL_FAILURES + FAILED))
+ TOTAL_SKIPPED=$((TOTAL_SKIPPED + SKIPPED))
+ SUITE_COUNT=$((SUITE_COUNT + 1))
+ fi
fi
done
- # Store totals as outputs for later use
+ # Store totals as outputs
echo "test-metrics={\"total_tests\":$TOTAL_TESTS,\"total_failures\":$TOTAL_FAILURES,\"suite_count\":$SUITE_COUNT}" >> $GITHUB_OUTPUT
- # Add test failure analysis if any failures exist
+ # Add failure analysis if any failures exist
if [[ $TOTAL_FAILURES -gt 0 ]]; then
{
echo ""
@@ -242,43 +257,56 @@ jobs:
echo "### โ Test Failure Analysis"
echo "**Total Failures**: $TOTAL_FAILURES across $SUITE_COUNT test suite(s)"
echo ""
- echo "#### ๐ Failures by Test Suite:"
} >> tests-section.md
- for stats_file in test-stats-*.json; do
- if [ -f "$stats_file" ]; then
- SUITE_NAME=$(jq -r '.name' "$stats_file")
- SUITE_FAILURES=$(jq -r '.total_failures // 0' "$stats_file")
- SUITE_PACKAGES=$(jq -r '.affected_packages // 0' "$stats_file")
-
- if [[ $SUITE_FAILURES -gt 0 ]]; then
- echo "- **$SUITE_NAME**: $SUITE_FAILURES failures across $SUITE_PACKAGES packages" >> tests-section.md
+ # Show failures by suite
+ echo "#### ๐ Failures by Test Suite:" >> tests-section.md
+ for jsonl_file in ci-*.jsonl; do
+ if [ -f "$jsonl_file" ]; then
+ ARTIFACT_NAME=$(echo "$jsonl_file" | sed 's/^ci-//' | sed 's/-ci-results\.jsonl$//')
+ SUITE_LABEL=$(parse_test_label "$ARTIFACT_NAME")
+ SUMMARY=$(grep '"type":"summary"' "$jsonl_file" 2>/dev/null | head -1 || echo "")
+
+ if [[ -n "$SUMMARY" ]]; then
+ FAILED=$(echo "$SUMMARY" | jq -r '.summary.failed // 0')
+ if [[ $FAILED -gt 0 ]]; then
+ echo "- **$SUITE_LABEL**: $FAILED failures" >> tests-section.md
+ fi
fi
fi
done
+ # Add collapsible section for failed tests
{
echo ""
echo ""
- echo "๐ Top Failed Tests (click to expand)
"
+ echo "๐ Failed Tests (click to expand)
"
echo ""
- echo "| Test Name | Package | Duration | Suite |"
- echo "|-----------|---------|----------|-------|"
+ echo "| Test Name | Package | Error |"
+ echo "|-----------|---------|-------|"
} >> tests-section.md
- # Extract detailed failure information from all suites
+ # Extract failure details from all JSONL files
FAILURE_COUNT=0
- for stats_file in test-stats-*.json; do
- if [ -f "$stats_file" ] && [[ $FAILURE_COUNT -lt 20 ]]; then
- SUITE_NAME=$(jq -r '.name' "$stats_file")
- FAILURE_DETAILS=$(jq -r '.failure_details // null' "$stats_file")
-
- if [[ "$FAILURE_DETAILS" != "null" ]] && [[ "$FAILURE_DETAILS" != "[]" ]]; then
- echo "$FAILURE_DETAILS" | jq -r --arg suite "$SUITE_NAME" \
- '.[] | "| \(.Test) | \(.Package | split("/") | .[-1] // .[-2] // .) | \(.Duration // "unknown")s | \($suite) |"' 2>/dev/null | \
- head -10 >> tests-section.md || true
- FAILURE_COUNT=$((FAILURE_COUNT + 10))
- fi
+ for jsonl_file in ci-*.jsonl; do
+ if [ -f "$jsonl_file" ] && [[ $FAILURE_COUNT -lt 20 ]]; then
+ while read -r line; do
+ if [[ $FAILURE_COUNT -ge 20 ]]; then
+ break
+ fi
+
+ TEST=$(echo "$line" | jq -r '.failure.test // "unknown"')
+ PKG=$(echo "$line" | jq -r '.failure.package // "unknown"' | sed 's|.*/||')
+ ERROR=$(echo "$line" | jq -r '.failure.error // ""' | head -c 100 | tr '\n' ' ')
+
+ # Truncate error message for table display (max 80 chars: 77 + "...")
+ if [[ ${#ERROR} -gt 80 ]]; then
+ ERROR="${ERROR:0:77}..."
+ fi
+
+ echo "| \`$TEST\` | $PKG | ${ERROR:-_no message_} |"
+ FAILURE_COUNT=$((FAILURE_COUNT + 1))
+ done < <(grep '"type":"failure"' "$jsonl_file" 2>/dev/null) >> tests-section.md || true
fi
done
@@ -287,53 +315,13 @@ jobs:
echo " "
} >> tests-section.md
- # Add error details section for failed tests
- HAS_ERROR_OUTPUT=false
- for stats_file in test-stats-*.json; do
- if [ -f "$stats_file" ]; then
- FAILURE_DETAILS=$(jq -r '.failure_details // null' "$stats_file")
- if [[ "$FAILURE_DETAILS" != "null" ]] && [[ "$FAILURE_DETAILS" != "[]" ]]; then
- # Check if any failure has non-empty output
- HAS_OUTPUT=$(echo "$FAILURE_DETAILS" | jq -r 'map(select(.Output != "" and .Output != null)) | length > 0' 2>/dev/null)
- if [[ "$HAS_OUTPUT" == "true" ]]; then
- HAS_ERROR_OUTPUT=true
- break
- fi
- fi
- fi
- done
-
- if [[ "$HAS_ERROR_OUTPUT" == "true" ]]; then
- {
- echo ""
- echo ""
- echo "### ๐ Test Error Messages"
- echo ""
- } >> tests-section.md
-
- ERROR_COUNT=0
- for stats_file in test-stats-*.json; do
- if [ -f "$stats_file" ] && [[ $ERROR_COUNT -lt 10 ]]; then
- SUITE_NAME=$(jq -r '.name' "$stats_file")
- FAILURE_DETAILS=$(jq -r '.failure_details // null' "$stats_file")
-
- if [[ "$FAILURE_DETAILS" != "null" ]] && [[ "$FAILURE_DETAILS" != "[]" ]]; then
- # Display failures with non-empty outputs using smart truncation
- echo "$FAILURE_DETAILS" | jq -r --arg suite "$SUITE_NAME" \
- '.[] | select(.Output != "" and .Output != null) |
- "#### \(.Test) (\(.Package | split("/") | .[-1] // .[-2] // .))\n\n```\n\(.Output | if length > 1500 then .[0:1500] + "\n... (truncated)" else . end)\n```\n"' 2>/dev/null | \
- head -c 4000 >> tests-section.md || true
- ERROR_COUNT=$((ERROR_COUNT + 3))
- fi
- fi
- done
- fi
-
# Store failure metrics
- echo "failure-metrics={\"total_failures\":$TOTAL_FAILURES,\"has_error_output\":$HAS_ERROR_OUTPUT}" >> $GITHUB_OUTPUT
+ echo "failure-metrics={\"total_failures\":$TOTAL_FAILURES,\"has_error_output\":true}" >> $GITHUB_OUTPUT
fi
- else
- # No test statistics available - check if tests were disabled or fork PR
+ fi
+
+ # No test statistics available
+ if [[ "$HAS_DATA" == "false" ]]; then
{
echo ""
echo ""
@@ -362,49 +350,26 @@ jobs:
id: add-test-config
run: |
# Add test output configuration section
- if compgen -G "test-stats-*.json" >/dev/null 2>&1; then
+ HAS_CONFIG_DATA=false
+
+ # Check for native CI mode JSONL files
+ if compgen -G "ci-*.jsonl" >/dev/null 2>&1; then
+ HAS_CONFIG_DATA=true
{
echo ""
echo "
"
echo ""
echo "### ๐๏ธ Test Output Configuration"
+ echo ""
+ echo "**Output Mode**: Native CI Mode (JSONL)"
+ echo ""
+ echo "- Tests executed with magex native CI mode"
+ echo "- Structured output in .mage-x/ci-results.jsonl"
+ echo "- Automatic GitHub annotations for failures"
} >> tests-section.md
+ fi
- # Show output strategy summary
- SUITE_COUNT=0
- for stats_file in test-stats-*.json; do
- if [ -f "$stats_file" ]; then
- SUITE_COUNT=$((SUITE_COUNT + 1))
- fi
- done
-
- if [[ $SUITE_COUNT -gt 0 ]]; then
- FULL_MODE_COUNT=0
- FAILURES_ONLY_COUNT=0
-
- for stats_file in test-stats-*.json; do
- if [ -f "$stats_file" ]; then
- MODE=$(jq -r '.test_mode // "unknown"' "$stats_file")
- if [[ "$MODE" == "FULL" ]]; then
- FULL_MODE_COUNT=$((FULL_MODE_COUNT + 1))
- elif [[ "$MODE" == "FAILURES_ONLY" ]]; then
- FAILURES_ONLY_COUNT=$((FAILURES_ONLY_COUNT + 1))
- fi
- fi
- done
-
- {
- echo ""
- echo "**Output Strategy Summary:**"
- echo "- $FULL_MODE_COUNT suite(s) used FULL mode (complete output)"
- echo "- $FAILURES_ONLY_COUNT suite(s) used FAILURES_ONLY mode (efficient extraction)"
- } >> tests-section.md
-
- if [[ $FAILURES_ONLY_COUNT -gt 0 ]]; then
- echo "- Estimated output size reduction: ~80-90% for large test suites" >> tests-section.md
- fi
- fi
- else
+ if [[ "$HAS_CONFIG_DATA" == "false" ]]; then
# No test configuration to display - test stats not available
echo "" >> tests-section.md
echo "โน๏ธ _Test configuration section skipped - no test data available_" >> tests-section.md
@@ -425,69 +390,43 @@ jobs:
# Check if fuzz testing is enabled in environment
if [[ "${{ env.ENABLE_FUZZ_TESTING }}" == "true" ]]; then
- # Fuzz testing is enabled, check for stats files
- if compgen -G "fuzz-stats-*.json" >/dev/null 2>&1; then
- # Check if we have actual fuzz stats data before creating table header
- HAS_FUZZ_DATA=false
- for stats_file in fuzz-stats-*.json; do
- if [ -f "$stats_file" ]; then
- NAME=$(jq -r '.name' "$stats_file")
- if [[ "$NAME" != "null" ]] && [[ -n "$NAME" ]]; then
- HAS_FUZZ_DATA=true
- break
- fi
- fi
- done
+ # Look for fuzz test JSONL files (native CI mode)
+ FUZZ_JSONL=$(ls ci-*-ci-results-fuzz.jsonl 2>/dev/null | head -1 || echo "")
- if [[ "$HAS_FUZZ_DATA" == "true" ]]; then
- # Create table header only when we have actual data
- echo "| Fuzz Suite | Duration | Fuzz Tests | Status | Enabled |" >> tests-section.md
- echo "|------------|----------|------------|--------|---------|" >> tests-section.md
-
- # Process fuzz stats files only if they exist
- ROW_ADDED=false
- if compgen -G "fuzz-stats-*.json" >/dev/null 2>&1; then
- for stats_file in fuzz-stats-*.json; do
- if [ -f "$stats_file" ] && [[ "$ROW_ADDED" == "false" ]]; then
- NAME=$(jq -r '.name' "$stats_file")
- DURATION=$(jq -r '.duration_seconds' "$stats_file")
- FUZZ_TEST_COUNT=$(jq -r '.fuzz_test_count' "$stats_file")
- STATUS=$(jq -r '.status' "$stats_file")
-
- # Only add a table row if we have valid, complete data
- if [[ "$NAME" != "null" ]] && [[ -n "$NAME" ]] && \
- [[ "$DURATION" != "null" ]] && [[ -n "$DURATION" ]] && \
- [[ "$FUZZ_TEST_COUNT" != "null" ]] && [[ -n "$FUZZ_TEST_COUNT" ]] && \
- [[ "$STATUS" != "null" ]] && [[ -n "$STATUS" ]]; then
-
- DURATION_MIN=$((DURATION / 60))
- DURATION_SEC=$((DURATION % 60))
-
- STATUS_ICON=$([[ "$STATUS" == "success" ]] && echo "โ
" || echo "โ")
-
- echo "| $NAME | ${DURATION_MIN}m ${DURATION_SEC}s | $FUZZ_TEST_COUNT | $STATUS_ICON | ๐ฏ |" >> tests-section.md
- ROW_ADDED=true
- fi
- fi
- done
- fi
+ if [[ -n "$FUZZ_JSONL" ]] && [[ -f "$FUZZ_JSONL" ]]; then
+ # Extract summary from fuzz JSONL
+ SUMMARY=$(grep '"type":"summary"' "$FUZZ_JSONL" 2>/dev/null | head -1 || echo "")
+
+ if [[ -n "$SUMMARY" ]]; then
+ STATUS=$(echo "$SUMMARY" | jq -r '.summary.status // "unknown"')
+ TOTAL=$(echo "$SUMMARY" | jq -r '.summary.total // 0')
+ DURATION=$(echo "$SUMMARY" | jq -r '.summary.duration // "0s"')
+
+ STATUS_ICON=$([[ "$STATUS" == "passed" ]] && echo "โ
" || echo "โ")
+
+ # Create table with fuzz test data from JSONL
+ {
+ echo "| Fuzz Suite | Duration | Fuzz Tests | Status | Enabled |"
+ echo "|------------|----------|------------|--------|---------|"
+ echo "| Fuzz Tests | $DURATION | $TOTAL | $STATUS_ICON | ๐ฏ |"
+ } >> tests-section.md
else
- # Fuzz testing enabled but no valid stats data
+ # JSONL found but no summary record
{
echo "| Status | Details |"
echo "|--------|---------|"
echo "| **Fuzz Testing** | โ
Enabled |"
- echo "| **Execution** | โ ๏ธ No valid fuzz stats found - check job logs |"
+ echo "| **Execution** | โ ๏ธ No fuzz summary found in JSONL - check job logs |"
echo "| **Platform** | Linux with primary Go version |"
} >> tests-section.md
fi
else
- # Fuzz testing enabled but no stats files found
+ # No fuzz JSONL found
{
echo "| Status | Details |"
echo "|--------|---------|"
echo "| **Fuzz Testing** | โ
Enabled |"
- echo "| **Execution** | โ ๏ธ No fuzz stats found - check job logs |"
+ echo "| **Execution** | โ ๏ธ No fuzz results found - check job logs |"
echo "| **Platform** | Linux with primary Go version |"
} >> tests-section.md
fi
diff --git a/.github/workflows/fortress-coverage.yml b/.github/workflows/fortress-coverage.yml
index 28a13bc..35db336 100644
--- a/.github/workflows/fortress-coverage.yml
+++ b/.github/workflows/fortress-coverage.yml
@@ -451,14 +451,14 @@ jobs:
# Validate extracted history files contain actual coverage data
EXTRACTED_FILES=$(find . -name "*.json" -type f 2>/dev/null | wc -l || echo "0")
VALID_FILES=0
- for json_file in $(find . -name "*.json" -type f 2>/dev/null); do
+ while IFS= read -r -d '' json_file; do
if [[ -s "$json_file" ]] && grep -q "coverage" "$json_file" 2>/dev/null; then
VALID_FILES=$((VALID_FILES + 1))
else
echo " โ ๏ธ Removing invalid history file: $(basename "$json_file")"
rm -f "$json_file"
fi
- done
+ done < <(find . -name "*.json" -type f -print0 2>/dev/null)
echo " ๐ Extracted $EXTRACTED_FILES files, $VALID_FILES valid history entries"
else
echo " โ ๏ธ Failed to extract history-$run_id.zip (possibly corrupted)"
diff --git a/.github/workflows/fortress-test-fuzz.yml b/.github/workflows/fortress-test-fuzz.yml
index a197961..16d2a5f 100644
--- a/.github/workflows/fortress-test-fuzz.yml
+++ b/.github/workflows/fortress-test-fuzz.yml
@@ -117,15 +117,6 @@ jobs:
echo "fuzz-start=$(date +%s)" >> $GITHUB_OUTPUT
echo "๐ Fuzz test timer started at: $(date -u +%Y-%m-%dT%H:%M:%SZ)"
- # --------------------------------------------------------------------
- # Setup fuzz test failure detection functions
- # --------------------------------------------------------------------
- - name: ๐ง Setup failure detection for fuzz tests
- uses: ./.github/actions/test-failure-detection
- with:
- output-file: "fuzz-output.log"
- mode: "text"
-
# --------------------------------------------------------------------
# Run fuzz tests
# --------------------------------------------------------------------
@@ -159,290 +150,7 @@ jobs:
fi
# --------------------------------------------------------------------
- # Analyze fuzz test failures using detection functions
- # --------------------------------------------------------------------
- - name: ๐ Analyze fuzz test failures
- if: always() && steps.run-fuzz-tests.outputs.fuzz-exit-code != '0'
- continue-on-error: true
- uses: ./.github/actions/test-failure-detection
- with:
- output-file: "fuzz-output.log"
- exit-code: ${{ steps.run-fuzz-tests.outputs.fuzz-exit-code }}
- mode: "text"
- failures-file: "fuzz-failures.txt"
-
- # --------------------------------------------------------------------
- # Create structured fuzz test failure summary for validation workflow
- # --------------------------------------------------------------------
- - name: ๐ Create structured fuzz test failure summary
- if: always() && steps.run-fuzz-tests.outputs.fuzz-exit-code != '0'
- continue-on-error: true
- run: |
- echo "๐ Creating structured fuzz test failure summary..."
-
- # Initialize the JSON structure
- echo '[]' > test-failures-summary.json
-
- if [ -f fuzz-output.log ]; then
- echo "๐ Processing fuzz test output for structured failures..."
-
- # Extract failed fuzz tests from output log
- # Pattern: --- FAIL: FuzzTestName (0.34s)
- # Use process substitution to avoid the pipeline subshell issue
- # Filter out nested/indented failures to avoid duplicate counting
- # Only capture top-level fuzz test failures, not nested test failures
- FAILED_TESTS_LIST=$(mktemp)
- # Match pattern: "--- FAIL: FuzzTestName" but exclude lines that contain "#01" or similar nested indicators
- grep -E "^--- FAIL: Fuzz[A-Za-z0-9_]+[[:space:]]*\(" fuzz-output.log | grep -E -v "#[0-9]+" > "$FAILED_TESTS_LIST" || true
-
- if [[ -s "$FAILED_TESTS_LIST" ]]; then
- echo "๐ Found $(wc -l < "$FAILED_TESTS_LIST") failed fuzz tests to process"
-
- while IFS= read -r fail_line; do
- echo "๐ Processing failure line: $fail_line"
-
- # Extract test name from the failure line
- # Format: --- FAIL: FuzzGetTokenFromHeader (0.34s)
- if [[ "$fail_line" =~ ^---[[:space:]]*FAIL:[[:space:]]*([^[:space:]]+) ]]; then
- FUZZ_TEST_NAME="${BASH_REMATCH[1]}"
- echo "๐ Found failed fuzz test: $FUZZ_TEST_NAME"
-
- # Extract detailed error output for this specific test
- # Start from the failure line and capture subsequent error details
- ERROR_OUTPUT=$(awk "
- BEGIN { capture = 0; found_start = 0; }
- /^--- FAIL: $FUZZ_TEST_NAME/ {
- found_start = 1;
- capture = 1;
- output = \$0;
- next;
- }
- found_start && /^--- FAIL:/ && !/^--- FAIL: $FUZZ_TEST_NAME/ {
- # Another test failure started, stop capturing
- exit;
- }
- found_start && /^(PASS|ok |FAIL)/ && capture {
- # Next test result line, stop capturing
- exit;
- }
- found_start && capture && /^[[:space:]]/ {
- # Indented lines are part of the test output
- if (length(output) < 2000) {
- output = output \"\\n\" \$0;
- }
- }
- found_start && capture && /^[[:space:]]*$/ {
- # Empty line, continue capturing briefly
- next;
- }
- found_start && capture && !/^[[:space:]]/ && !/^(To re-run:|Failing input)/ {
- # Non-indented line that's not our test, stop capturing unless it's re-run info
- if (\$0 !~ /^github\.com/) exit;
- }
- END {
- if (found_start) print output;
- }
- " fuzz-output.log)
-
- # Get package name from go.mod or use git context fallback
- PACKAGE_NAME="${{ github.repository }}"
- if [[ -f go.mod ]]; then
- PACKAGE_NAME=$(head -1 go.mod | awk '{print $2}')
- elif [[ -n "${{ github.server_url }}" && -n "${{ github.repository }}" ]]; then
- # Use GitHub context as fallback
- PACKAGE_NAME="${{ github.server_url }}/${{ github.repository }}"
- PACKAGE_NAME=${PACKAGE_NAME#https://}
- fi
-
- # Calculate elapsed time from the failure line (extract from parentheses)
- ELAPSED="unknown"
- if [[ "$fail_line" =~ \(([0-9.]+[a-z]*)\) ]]; then
- ELAPSED="${BASH_REMATCH[1]}"
- fi
-
- # Create JSON entry for this failed fuzz test
- FUZZ_JSON=$(jq -n \
- --arg pkg "$PACKAGE_NAME" \
- --arg test "$FUZZ_TEST_NAME" \
- --arg output "$ERROR_OUTPUT" \
- --arg elapsed "$ELAPSED" \
- '{
- Package: $pkg,
- Type: "test",
- failures: [{
- Test: $test,
- Elapsed: $elapsed,
- Output: $output
- }]
- }')
-
- echo "๐ Adding fuzz test failure to summary:"
- echo "$FUZZ_JSON" | jq '.'
-
- # Add to summary file
- jq --argjson new_entry "$FUZZ_JSON" '. += [$new_entry]' test-failures-summary.json > test-failures-summary.json.tmp
- mv test-failures-summary.json.tmp test-failures-summary.json
- fi
- done < "$FAILED_TESTS_LIST"
-
- # Deduplicate fuzz test failures (handle nested test failures with same name)
- # Keep only the entry with the longest elapsed time (parent test)
- if [[ -f test-failures-summary.json ]] && [[ -s test-failures-summary.json ]]; then
- echo "๐ Deduplicating fuzz test failures within each package..."
-
- # Create a deduplicated version by flattening all failures and grouping by test name only
- jq '
- # First, flatten all failures from all packages
- [.[] | {Package: .Package, Type: .Type} as $parent | .failures[] | . + {PackageName: $parent.Package, Type: $parent.Type}] |
-
- # Then group by test name across all packages
- group_by(.Test) |
-
- # For each test name, pick the entry with longest elapsed time
- map(
- sort_by(.Elapsed |
- if . == "unknown" then -1
- elif test("^[0-9.]+[a-z]*$") then
- (. | gsub("[a-z]"; "") | tonumber)
- else 0
- end
- ) |
- .[-1]
- ) |
-
- # Reconstruct the package structure
- group_by(.PackageName) |
- map({
- Package: .[0].PackageName,
- Type: .[0].Type,
- failures: [.[] | {Test: .Test, Elapsed: .Elapsed, Output: .Output}]
- })
- ' test-failures-summary.json > test-failures-summary.json.dedup
-
- mv test-failures-summary.json.dedup test-failures-summary.json
- echo "โ
Deduplicated fuzz test failures - kept entries with longest elapsed time"
- fi
-
- # Cleanup temp file
- rm -f "$FAILED_TESTS_LIST"
- else
- echo "โ ๏ธ No failed fuzz tests found in fuzz-output.log"
- fi
-
- # Also update the signatures file if it exists but is empty
- if [[ -f fuzz-failures-signatures.json ]] && [[ $(jq 'length' fuzz-failures-signatures.json 2>/dev/null || echo "0") -eq 0 ]]; then
- echo "๐ Updating empty signatures file with fuzz test failures..."
-
- # Get package name from go.mod or use git context fallback
- PACKAGE_NAME="${{ github.repository }}"
- if [[ -f go.mod ]]; then
- PACKAGE_NAME=$(head -1 go.mod | awk '{print $2}')
- elif [[ -n "${{ github.server_url }}" && -n "${{ github.repository }}" ]]; then
- # Use GitHub context as fallback
- PACKAGE_NAME="${{ github.server_url }}/${{ github.repository }}"
- PACKAGE_NAME=${PACKAGE_NAME#https://}
- fi
-
- # Create signature entries from the failures
- SIGNATURE_ENTRIES='[]'
- if [[ -f test-failures-summary.json ]] && [[ $(jq 'length' test-failures-summary.json 2>/dev/null || echo "0") -gt 0 ]]; then
- # Store GitHub Actions values in bash variable for proper escaping
- MATRIX_JOB_ID="fuzz-${{ inputs.primary-runner }}-${{ inputs.go-primary-version }}"
-
- SIGNATURE_ENTRIES=$(jq --arg matrix_job "$MATRIX_JOB_ID" '[
- .[] as $parent | $parent.failures[] | {
- type: "test",
- package: $parent.Package,
- test: .Test,
- output: .Output,
- signature: ($parent.Package + ":" + .Test),
- unique_id: (($parent.Package + ":" + .Test) | gsub("[^a-zA-Z0-9_/.-]"; "_")),
- matrix_job: $matrix_job
- }
- ]' test-failures-summary.json)
- fi
-
- # Deduplicate signature entries by test name (handle any remaining duplicates)
- # Group by test name only and prefer entries with valid package names
- SIGNATURE_ENTRIES_DEDUP=$(echo "$SIGNATURE_ENTRIES" | jq '[
- group_by(.test) |
- .[] |
- # Sort by package validity: valid package names last (highest priority)
- sort_by(.package |
- if test("^[0-9.]+[a-z]*$") then 0 # Elapsed time pattern (lowest priority)
- else 1 # Valid package name (highest priority)
- end
- ) |
- .[-1] # Take the last entry (valid package name if available)
- ]')
-
- echo "$SIGNATURE_ENTRIES_DEDUP" > fuzz-failures-signatures.json
- echo "โ
Updated signatures file with $(echo "$SIGNATURE_ENTRIES_DEDUP" | jq 'length') deduplicated entries"
- fi
-
- echo "โ
Structured failure summary created with $(jq 'length' test-failures-summary.json 2>/dev/null || echo "0") fuzz test packages"
-
- # Debug: Show the created summary
- if [[ -f test-failures-summary.json ]]; then
- echo "๐ Final fuzz failure summary:"
- jq . test-failures-summary.json
- fi
-
- # If no structured failures were found but exit code indicates failure, add generic entry
- if [[ $(jq 'length' test-failures-summary.json 2>/dev/null || echo "0") -eq 0 ]] && [[ "${{ steps.run-fuzz-tests.outputs.fuzz-exit-code }}" != "0" ]]; then
- echo "โ ๏ธ Exit code indicates failure but no structured failures found - adding generic fuzz failure entry"
-
- # Get package name from go.mod or use git context fallback
- PACKAGE_NAME="${{ github.repository }}"
- if [[ -f go.mod ]]; then
- PACKAGE_NAME=$(head -1 go.mod | awk '{print $2}')
- elif [[ -n "${{ github.server_url }}" && -n "${{ github.repository }}" ]]; then
- PACKAGE_NAME="${{ github.server_url }}/${{ github.repository }}"
- PACKAGE_NAME=${PACKAGE_NAME#https://}
- fi
-
- # Capture last 20 lines of output as error context
- LAST_OUTPUT=$(tail -20 fuzz-output.log 2>/dev/null || echo "No output available")
-
- jq -n --arg pkg "$PACKAGE_NAME" --arg output "$LAST_OUTPUT" '[{
- Package: $pkg,
- Type: "test",
- failures: [{
- Test: "fuzz_test_generic_failure",
- Elapsed: "unknown",
- Output: $output
- }]
- }]' > test-failures-summary.json
-
- echo "โ
Added generic fuzz failure entry to summary"
- fi
- else
- echo "โ ๏ธ No fuzz-output.log found, creating minimal failure entry"
-
- # Create minimal entry when output log is missing
- # Get package name from go.mod or use git context fallback
- PACKAGE_NAME="${{ github.repository }}"
- if [[ -f go.mod ]]; then
- PACKAGE_NAME=$(head -1 go.mod | awk '{print $2}')
- elif [[ -n "${{ github.server_url }}" && -n "${{ github.repository }}" ]]; then
- # Use GitHub context as fallback
- PACKAGE_NAME="${{ github.server_url }}/${{ github.repository }}"
- PACKAGE_NAME=${PACKAGE_NAME#https://}
- fi
-
- jq -n --arg pkg "$PACKAGE_NAME" '[{
- Package: $pkg,
- Type: "test",
- failures: [{
- Test: "unknown_fuzz_test",
- Elapsed: "unknown",
- Output: "Fuzz test failed but no output log available (exit code: ${{ steps.run-fuzz-tests.outputs.fuzz-exit-code }})"
- }]
- }]' > test-failures-summary.json
- fi
-
- # --------------------------------------------------------------------
- # Fuzz test failure analysis and reporting
+ # Fuzz test failure analysis and reporting (using magex CI mode JSONL output)
# --------------------------------------------------------------------
- name: ๐จ Create Fuzz Test Failure Summary
if: failure()
@@ -453,52 +161,33 @@ jobs:
echo "- **Go Version**: ${{ inputs.go-primary-version }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
- if [ -f fuzz-output.log ]; then
- # Use robust failure detection for fuzz tests
- if command -v detect_failures_from_text >/dev/null 2>&1; then
- # Use robust detection if functions are available
- detect_failures_from_text "fuzz-output.log" "fuzz-failures.txt" || true
- FAIL_COUNT=$(wc -l < fuzz-failures.txt 2>/dev/null || echo "0")
- echo "๐ Using robust fuzz failure detection: $FAIL_COUNT"
- else
- # Fallback: use refined patterns that only match actual test failures
- FAIL_COUNT=$(grep -c -E "^---[[:space:]]*FAIL:[[:space:]]*[A-Za-z][A-Za-z0-9_]*" fuzz-output.log 2>/dev/null || echo "0")
- FAIL_COUNT=$(echo "$FAIL_COUNT" | tr -d '\n\r' | xargs)
- [[ "$FAIL_COUNT" =~ ^[0-9]+$ ]] || FAIL_COUNT=0
- echo "โ ๏ธ Using refined patterns for fuzz failure detection: $FAIL_COUNT"
- fi
-
- # Enhanced panic detection with numeric validation
- PANIC_COUNT=$(grep -c -E "panic:|fatal error:|runtime error:" fuzz-output.log 2>/dev/null || echo "0")
- PANIC_COUNT=$(echo "$PANIC_COUNT" | tr -d '\n\r' | xargs)
- [[ "$PANIC_COUNT" =~ ^[0-9]+$ ]] || PANIC_COUNT=0
-
+ # Use magex CI mode JSONL output if available
+ JSONL_FILE=".mage-x/ci-results-fuzz.jsonl"
+ if [ -f "$JSONL_FILE" ]; then
+ FAIL_COUNT=$(grep -c '"type":"failure"' "$JSONL_FILE" 2>/dev/null || echo "0")
echo "- **Failed Fuzz Tests**: $FAIL_COUNT" >> $GITHUB_STEP_SUMMARY
- echo "- **Panics**: $PANIC_COUNT" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
if [ "$FAIL_COUNT" -gt 0 ]; then
- echo "### ๐ Failed Fuzz Test Summary (First 5)" >> $GITHUB_STEP_SUMMARY
- echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
- if [[ -f fuzz-failures.txt ]] && [[ -s fuzz-failures.txt ]]; then
- head -5 fuzz-failures.txt >> $GITHUB_STEP_SUMMARY
- else
- # Fallback: use refined patterns that only match test failures
- grep -E "^---[[:space:]]*FAIL:[[:space:]]*[A-Za-z][A-Za-z0-9_]*" fuzz-output.log | head -5 >> $GITHUB_STEP_SUMMARY
- fi
- echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### ๐ Failed Fuzz Tests" >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ grep '"type":"failure"' "$JSONL_FILE" | head -5 | jq -r '.failure.test + " - " + (.failure.error // "test failed")' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
fi
+ elif [ -f fuzz-output.log ]; then
+ # Fallback to log file parsing
+ FAIL_COUNT=$(grep -c -E "^--- FAIL:" fuzz-output.log 2>/dev/null || echo "0")
+ echo "- **Failed Fuzz Tests**: $FAIL_COUNT" >> $GITHUB_STEP_SUMMARY
- if [ "$PANIC_COUNT" -gt 0 ]; then
- echo "### ๐จ Fuzz Test Panic/Error Summary" >> $GITHUB_STEP_SUMMARY
- echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
- # Enhanced panic/error detection patterns
- grep -A 2 -B 1 -E "panic:|fatal error:|runtime error:" fuzz-output.log | head -20 >> $GITHUB_STEP_SUMMARY
- echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
+ if [ "$FAIL_COUNT" -gt 0 ]; then
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### ๐ Failed Fuzz Tests" >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ grep -E "^--- FAIL:" fuzz-output.log | head -5 >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
fi
else
- echo "โ ๏ธ Fuzz test output log not found" >> $GITHUB_STEP_SUMMARY
+ echo "โ ๏ธ No fuzz test output found" >> $GITHUB_STEP_SUMMARY
fi
# --------------------------------------------------------------------
@@ -507,50 +196,30 @@ jobs:
- name: ๐ Annotate Key Fuzz Test Failures
if: failure()
run: |
- if [ -f fuzz-output.log ]; then
- echo "::group::๐ Fuzz Test Failure Analysis (Enhanced)"
+ # Use magex CI mode JSONL output if available
+ JSONL_FILE=".mage-x/ci-results-fuzz.jsonl"
+ if [ -f "$JSONL_FILE" ]; then
+ FAIL_COUNT=$(grep -c '"type":"failure"' "$JSONL_FILE" 2>/dev/null || echo "0")
+ echo "::error title=Fuzz Test Suite Failed::$FAIL_COUNT fuzz tests failed on ${{ inputs.primary-runner }} Go ${{ inputs.go-primary-version }}"
- # Count and report overall statistics using robust detection
- if [[ -f fuzz-failures.txt ]] && [[ -s fuzz-failures.txt ]]; then
- FAIL_COUNT=$(wc -l < fuzz-failures.txt 2>/dev/null || echo "0")
- echo "๐ Using robust fuzz failure count for annotation: $FAIL_COUNT"
- else
- # Fallback: use refined patterns that only match test failures
- FAIL_COUNT=$(grep -c -E "^---[[:space:]]*FAIL:[[:space:]]*[A-Za-z][A-Za-z0-9_]*" fuzz-output.log 2>/dev/null || echo "0")
- FAIL_COUNT=$(echo "$FAIL_COUNT" | tr -d '\n\r' | xargs)
- [[ "$FAIL_COUNT" =~ ^[0-9]+$ ]] || FAIL_COUNT=0
- echo "โ ๏ธ Using refined patterns for fuzz annotation: $FAIL_COUNT"
- fi
-
- # Enhanced panic detection with numeric validation
- PANIC_COUNT=$(grep -c -E "panic:|fatal error:|runtime error:" fuzz-output.log 2>/dev/null || echo "0")
- PANIC_COUNT=$(echo "$PANIC_COUNT" | tr -d '\n\r' | xargs)
- [[ "$PANIC_COUNT" =~ ^[0-9]+$ ]] || PANIC_COUNT=0
-
- echo "::error title=Fuzz Test Suite Failed::$FAIL_COUNT fuzz tests failed, $PANIC_COUNT panics/errors detected on ${{ inputs.primary-runner }} Go ${{ inputs.go-primary-version }}"
-
- # Annotate first 3 failed fuzz tests using robust detection
- if [[ "$FAIL_COUNT" -gt 0 ]]; then
- if [[ -f fuzz-failures.txt ]] && [[ -s fuzz-failures.txt ]]; then
- head -3 fuzz-failures.txt | while IFS= read -r line; do
- echo "::error title=Failed Fuzz Test::$line"
- done
- else
- # Fallback: use refined patterns that only match test failures
- grep -E "^---[[:space:]]*FAIL:[[:space:]]*[A-Za-z][A-Za-z0-9_]*" fuzz-output.log | head -3 | while IFS= read -r line; do
- echo "::error title=Failed Fuzz Test::$line"
- done
- fi
+ # Annotate first 3 failures from JSONL
+ if [ "$FAIL_COUNT" -gt 0 ]; then
+ grep '"type":"failure"' "$JSONL_FILE" | head -3 | while read -r line; do
+ TEST=$(echo "$line" | jq -r '.failure.test')
+ ERROR=$(echo "$line" | jq -r '.failure.error // "test failed"')
+ echo "::error title=Failed Fuzz Test::$TEST - $ERROR"
+ done
fi
+ elif [ -f fuzz-output.log ]; then
+ # Fallback to log file parsing
+ FAIL_COUNT=$(grep -c -E "^--- FAIL:" fuzz-output.log 2>/dev/null || echo "0")
+ echo "::error title=Fuzz Test Suite Failed::$FAIL_COUNT fuzz tests failed on ${{ inputs.primary-runner }} Go ${{ inputs.go-primary-version }}"
- # Annotate panics/errors with enhanced patterns
- if [ "$PANIC_COUNT" -gt 0 ]; then
- grep -B 1 -E "panic:|fatal error:|runtime error:" fuzz-output.log | head -2 | while IFS= read -r line; do
- echo "::error title=Fuzz Test Panic/Error::$line"
+ if [ "$FAIL_COUNT" -gt 0 ]; then
+ grep -E "^--- FAIL:" fuzz-output.log | head -3 | while read -r line; do
+ echo "::error title=Failed Fuzz Test::$line"
done
fi
-
- echo "::endgroup::"
fi
# --------------------------------------------------------------------
@@ -608,22 +277,7 @@ jobs:
with:
name: test-results-fuzz-${{ inputs.primary-runner }}-${{ inputs.go-primary-version }}
path: |
+ .mage-x/ci-results-fuzz.jsonl
fuzz-output.log
- fuzz-failures.txt
- fuzz-failures-detailed.txt
- test-failures-summary.json
- fuzz-failures-signatures.json
retention-days: 1
if-no-files-found: ignore
-
- # --------------------------------------------------------------------
- # Upload fuzz test statistics for completion report
- # --------------------------------------------------------------------
- - name: ๐ค Upload fuzz test statistics
- if: always() && steps.fuzz-summary.outputs.statistics-file != ''
- uses: ./.github/actions/upload-statistics
- with:
- artifact-name: test-stats-fuzz-${{ inputs.primary-runner }}-${{ inputs.go-primary-version }}
- artifact-path: ${{ steps.fuzz-summary.outputs.statistics-file }}
- retention-days: "1"
- if-no-files-found: "ignore"
diff --git a/.github/workflows/fortress-test-matrix.yml b/.github/workflows/fortress-test-matrix.yml
index 1875709..7f9baa4 100644
--- a/.github/workflows/fortress-test-matrix.yml
+++ b/.github/workflows/fortress-test-matrix.yml
@@ -2,16 +2,20 @@
# Test Matrix Execution (Reusable Workflow) (GoFortress)
#
# Purpose: Execute Go tests across multiple operating systems and Go versions
-# in a matrix strategy with comprehensive failure detection and statistics collection.
+# in a matrix strategy with native CI mode for failure detection and reporting.
#
# This workflow handles:
# - Multi-platform test execution (ubuntu, windows, macOS)
# - Multiple Go version testing (primary, secondary)
# - Race detection and code coverage
-# - Test failure detection and reporting
-# - Statistics collection and artifact uploading
+# - Native CI mode for GitHub annotations and JSONL output
# - Cache performance tracking
#
+# CI Mode: magex automatically detects GitHub Actions and produces:
+# - GitHub annotations with file:line locations
+# - Step summary written to $GITHUB_STEP_SUMMARY
+# - Structured output at .mage-x/ci-results.jsonl
+#
# Maintainer: @mrz1836
#
# ------------------------------------------------------------------------------------
@@ -167,60 +171,6 @@ jobs:
runner-os: ${{ matrix.os }}
use-local: ${{ env.MAGE_X_USE_LOCAL }}
- # --------------------------------------------------------------------
- # Start test timer
- # --------------------------------------------------------------------
- - name: โฑ๏ธ Start test timer
- id: test-timer
- run: |
- TEST_START=$(date +%s)
- echo "test-start=$TEST_START" >> $GITHUB_OUTPUT
- echo "๐ Test timer started at: $(date -u +%Y-%m-%dT%H:%M:%SZ)"
-
- # --------------------------------------------------------------------
- # Detect test output mode based on test count and configuration
- # --------------------------------------------------------------------
- - name: ๐ Detect test output mode
- id: detect-mode
- run: |
- # Count total tests in the repository (excluding vendor and testdata)
- GO_MODULE_DIR="${{ env.GO_MODULE_DIR }}"
- if [ -n "$GO_MODULE_DIR" ]; then
- TEST_COUNT=$(cd "$GO_MODULE_DIR" && find . -type f -name '*_test.go' \
- -not -path './vendor/*' \
- -not -path './third_party/*' \
- -not -path './testdata/*' \
- -exec grep -hE '^\s*func (\([^)]+\) )?Test[A-Z0-9_]' {} + | wc -l | xargs)
- else
- TEST_COUNT=$(find . -type f -name '*_test.go' \
- -not -path './vendor/*' \
- -not -path './third_party/*' \
- -not -path './testdata/*' \
- -exec grep -hE '^\s*func (\([^)]+\) )?Test[A-Z0-9_]' {} + | wc -l | xargs)
- fi
-
- # Sanitize count (ensure it's numeric)
- TEST_COUNT=${TEST_COUNT:-0}
- if ! [[ "$TEST_COUNT" =~ ^[0-9]+$ ]]; then
- TEST_COUNT=0
- fi
-
- echo "๐ Total tests found: $TEST_COUNT"
-
- # Determine output mode based on test count and failure patterns
- # For large test suites (>100 tests), use FAILURES_ONLY to reduce noise
- # For smaller test suites, use FULL to see all output
- if [[ "$TEST_COUNT" -gt 100 ]]; then
- DETECTED_MODE="FAILURES_ONLY"
- echo "๐๏ธ Using FAILURES_ONLY mode for large test suite ($TEST_COUNT tests)"
- else
- DETECTED_MODE="FULL"
- echo "๐๏ธ Using FULL mode for manageable test suite ($TEST_COUNT tests)"
- fi
-
- echo "detected-mode=$DETECTED_MODE" >> $GITHUB_OUTPUT
- echo "test-count=$TEST_COUNT" >> $GITHUB_OUTPUT
-
# --------------------------------------------------------------------
# Setup Redis service using composite action with caching
# --------------------------------------------------------------------
@@ -237,40 +187,32 @@ jobs:
trust-service-health: ${{ inputs.redis-trust-service-health }}
# --------------------------------------------------------------------
- # Setup test failure detection functions
+ # Start test timer
# --------------------------------------------------------------------
- - name: ๐ง Setup failure detection
- uses: ./.github/actions/test-failure-detection
- with:
- output-file: "test-output.log"
- mode: "text" # Will be overridden during actual detection
+ - name: โฑ๏ธ Start test timer
+ id: test-timer
+ run: |
+ TEST_START=$(date +%s)
+ echo "test-start=$TEST_START" >> $GITHUB_OUTPUT
+ echo "๐ Test timer started at: $(date -u +%Y-%m-%dT%H:%M:%SZ)"
# --------------------------------------------------------------------
- # Run tests with appropriate configuration
+ # Run tests with native CI mode
+ # CI mode auto-detects GitHub Actions and produces:
+ # - GitHub annotations (::error file=path,line=N::)
+ # - Step summary ($GITHUB_STEP_SUMMARY)
+ # - Structured output (.mage-x/ci-results.jsonl)
# --------------------------------------------------------------------
- name: ๐งช Run tests
id: run-tests
continue-on-error: true
run: |
- # Sanitize inherited environment variables first
- TEST_COUNT=${TEST_COUNT:-0}
- TEST_COUNT=$(echo "$TEST_COUNT" | xargs)
- if ! [[ "$TEST_COUNT" =~ ^[0-9]+$ ]]; then
- TEST_COUNT=0
- fi
- export TEST_COUNT
-
- # Safely assign values to shell vars with defaults
+ # Determine test type based on inputs
RACE="${{ inputs.race-detection-enabled || 'false' }}"
COVER="${{ inputs.code-coverage-enabled || 'false' }}"
- MODE="${{ steps.detect-mode.outputs.detected-mode || 'FULL' }}"
echo "๐ Race Detection Enabled: $RACE"
echo "๐ Code Coverage Enabled: $COVER"
- echo "๐๏ธ Output Mode: $MODE"
-
- # Initialize test exit code
- TEST_EXIT_CODE=0
# Build unified magex command with timeout and appropriate test type
if [[ "$RACE" == "true" && "$COVER" == "true" ]]; then
@@ -291,109 +233,60 @@ jobs:
echo "๐ Running tests without coverage or race detection (timeout: $TEST_TIMEOUT)..."
fi
- # Build command with JSON flag for FAILURES_ONLY mode
+ # magex CI mode auto-detects GitHub Actions and produces structured output
MAGEX_CMD="magex test:${TEST_TYPE} -timeout $TEST_TIMEOUT"
- if [[ "$MODE" == "FAILURES_ONLY" ]]; then
- MAGEX_CMD="$MAGEX_CMD -json"
- fi
-
- echo "๐ง Running: $MAGEX_CMD (timeout: $TEST_TIMEOUT)"
+ echo "๐ง Running: $MAGEX_CMD"
+ echo ""
- # Pre-execution diagnostic info for better visibility
- if [ -n "$GO_MODULE_DIR" ]; then
- PACKAGE_COUNT=$(cd "$GO_MODULE_DIR" && find . -name '*.go' -not -path './vendor/*' -not -path './third_party/*' | xargs dirname | sort -u | wc -l | xargs)
+ # Execute tests - CI mode automatically:
+ # - Parses test output in real-time
+ # - Emits GitHub annotations for failures
+ # - Writes step summary
+ # - Creates .mage-x/ci-results.jsonl
+ set +e
+ if [ "$ENABLE_MULTI_MODULE_TESTING" == "true" ]; then
+ echo "๐ง Multi-module testing enabled - running from repository root"
+ $MAGEX_CMD 2>&1 | tee test-output.log
+ elif [ -n "$GO_MODULE_DIR" ]; then
+ echo "๐ง Running from directory: $GO_MODULE_DIR"
+ (cd "$GO_MODULE_DIR" && $MAGEX_CMD) 2>&1 | tee test-output.log
else
- PACKAGE_COUNT=$(find . -name '*.go' -not -path './vendor/*' -not -path './third_party/*' | xargs dirname | sort -u | wc -l | xargs)
+ echo "๐ง Running from repository root"
+ $MAGEX_CMD 2>&1 | tee test-output.log
fi
- PACKAGE_COUNT=${PACKAGE_COUNT:-0}
- echo "๐ Starting test execution:"
- echo " โข Total tests: $TEST_COUNT"
- echo " โข Test packages: $PACKAGE_COUNT"
- echo " โข Test mode: $TEST_TYPE"
- echo " โข Output mode: $MODE"
- ESTIMATED_TIME=$(( ($TEST_COUNT + 49) / 50 ))
- echo " โข Estimated time: $ESTIMATED_TIME minutes (based on ~50 tests/minute)"
- echo " โข Timeout: $TEST_TIMEOUT"
- echo ""
-
- START_TIME=$(date +%s)
- export START_TIME
-
- # Execute based on detected mode with simplified processing
- set +e # Don't exit on error to capture exit code properly
- if [[ "$MODE" == "FULL" ]]; then
- echo "๐ Using FULL output mode - showing all test output"
- echo "๐ Executing: $MAGEX_CMD"
- if [ "$ENABLE_MULTI_MODULE_TESTING" == "true" ]; then
- echo "๐ง Multi-module testing enabled - running from repository root"
- echo "๐ฆ magex will discover all Go modules and merge coverage"
- $MAGEX_CMD 2>&1 | tee test-output.log
- elif [ -n "$GO_MODULE_DIR" ]; then
- echo "๐ง Running from directory: $GO_MODULE_DIR"
- (cd "$GO_MODULE_DIR" && $MAGEX_CMD) 2>&1 | tee test-output.log
- else
- echo "๐ง Running from repository root"
- $MAGEX_CMD 2>&1 | tee test-output.log
- fi
- TEST_EXIT_CODE=${PIPESTATUS[0]}
- echo "๐ Magex command exit code: $TEST_EXIT_CODE"
-
- # Extract failures for summary (even in full mode) using robust detection
- if [[ $TEST_EXIT_CODE -ne 0 ]]; then
- source test-failure-functions.sh 2>/dev/null || true
- detect_failures_from_text "test-output.log" "test-failures.txt" || true
- fi
-
- elif [[ "$MODE" == "FAILURES_ONLY" ]]; then
- echo "๐ Using FAILURES_ONLY mode - JSON output with failure extraction"
- echo "๐ Executing: $MAGEX_CMD"
- if [ "$ENABLE_MULTI_MODULE_TESTING" == "true" ]; then
- echo "๐ง Multi-module testing enabled - running from repository root"
- echo "๐ฆ magex will discover all Go modules and merge coverage"
- $MAGEX_CMD 2>&1 | tee test-output.log
- elif [ -n "$GO_MODULE_DIR" ]; then
- echo "๐ง Running from directory: $GO_MODULE_DIR"
- (cd "$GO_MODULE_DIR" && $MAGEX_CMD) 2>&1 | tee test-output.log
- else
- echo "๐ง Running from repository root"
- $MAGEX_CMD 2>&1 | tee test-output.log
- fi
- TEST_EXIT_CODE=${PIPESTATUS[0]}
- echo "๐ Magex command exit code: $TEST_EXIT_CODE"
-
- # Always extract failures in JSON mode to check for test failures
- source test-failure-functions.sh 2>/dev/null || true
- detect_failures_from_json "test-output.log" "test-failures.txt" || true
+ TEST_EXIT_CODE=${PIPESTATUS[0]}
+ set -e
- # JSON failure override: Check if JSON contains ACTUAL test failures when exit code is 0
- if [[ $TEST_EXIT_CODE -eq 0 ]] && [[ -f test-output.log ]]; then
- echo "๐ Checking JSON output for actual test failures despite exit code 0..."
+ # Set outputs
+ echo "test-exit-code=$TEST_EXIT_CODE" >> $GITHUB_OUTPUT
+ echo "๐ Test execution completed with exit code: $TEST_EXIT_CODE"
- # Check for JSON test failure entries or error patterns
- JSON_FAILURES=0
- if grep -q '^{' test-output.log 2>/dev/null; then
- # Count ONLY failed test events, not package/suite failures which are expected
- # Look for Action="fail" AND Package field AND Test field (indicating actual test failure)
- JSON_FAILURES=$(grep '^{' test-output.log 2>/dev/null | \
- jq -r 'select(.Action == "fail" and .Test != null and .Test != "" and (.Package // "") != "" and (.Test | test("^Test[A-Za-z].*"))) | .Test' 2>/dev/null | wc -l | xargs || echo "0")
+ # Calculate duration
+ TEST_END=$(date +%s)
+ TEST_DURATION=$((TEST_END - ${{ steps.test-timer.outputs.test-start }}))
+ echo "test-duration=$TEST_DURATION" >> $GITHUB_OUTPUT
+ echo "โฑ๏ธ Test duration: ${TEST_DURATION}s"
- if [[ $JSON_FAILURES -gt 0 ]]; then
- echo "โ ๏ธ Found $JSON_FAILURES actual failing test functions in JSON output"
- echo "๐ง Overriding exit code from 0 to 1 due to detected test failures"
- TEST_EXIT_CODE=1
- else
- echo "โ
JSON output contains no actual test failures (exit code 0 is correct)"
- fi
- fi
+ # --------------------------------------------------------------------
+ # Normalize CI results location for multi-module projects
+ # --------------------------------------------------------------------
+ - name: ๐ Normalize CI results location
+ if: always()
+ run: |
+ # Ensure CI results are in expected location for artifact upload
+ # When running from GO_MODULE_DIR, results are created there
+ if [ -n "$GO_MODULE_DIR" ] && [ -f "$GO_MODULE_DIR/.mage-x/ci-results.jsonl" ]; then
+ mkdir -p .mage-x
+ if ! cp "$GO_MODULE_DIR/.mage-x/ci-results.jsonl" .mage-x/; then
+ echo "โ Failed to copy CI results from $GO_MODULE_DIR/.mage-x/ci-results.jsonl to .mage-x/"
+ exit 1
fi
+ echo "โ
Copied CI results from module directory: $GO_MODULE_DIR"
+ elif [ -f ".mage-x/ci-results.jsonl" ]; then
+ echo "โ
CI results already in expected location"
+ else
+ echo "โ ๏ธ No CI results file found (tests may have passed with no failures)"
fi
- set -e
-
- # Set outputs for other steps
- echo "test-exit-code=$TEST_EXIT_CODE" >> $GITHUB_OUTPUT
- echo "output-mode=$MODE" >> $GITHUB_OUTPUT
-
- echo "๐ Test execution completed with exit code: $TEST_EXIT_CODE"
# --------------------------------------------------------------------
# Normalize coverage file name if coverage was generated
@@ -401,15 +294,14 @@ jobs:
- name: ๐ Normalize coverage file name
if: inputs.code-coverage-enabled == 'true' && steps.run-tests.outputs.test-exit-code == '0'
run: |
- # Check if we need to look in the module directory first
GO_MODULE_DIR="${{ env.GO_MODULE_DIR }}"
+ # Check module directory first
if [ -n "$GO_MODULE_DIR" ]; then
echo "๐ Looking for coverage files in module directory: $GO_MODULE_DIR"
- # Move coverage file from module directory to root if it exists there
for coverage_file in coverage.out coverage.txt cover.out cover.txt profile.out profile.txt; do
if [[ -f "$GO_MODULE_DIR/$coverage_file" ]]; then
- echo "๐ Found coverage file in module directory: $GO_MODULE_DIR/$coverage_file"
+ echo "๐ Found coverage file: $GO_MODULE_DIR/$coverage_file"
echo "๐ Moving to repository root as coverage.txt"
mv "$GO_MODULE_DIR/$coverage_file" coverage.txt
break
@@ -417,12 +309,12 @@ jobs:
done
fi
- # Look for coverage files in root directory (either originally there or just moved)
+ # Check root directory
for coverage_file in coverage.out coverage.txt cover.out cover.txt profile.out profile.txt; do
if [[ -f "$coverage_file" ]]; then
echo "๐ Found coverage file: $coverage_file"
if [[ "$coverage_file" != "coverage.txt" ]]; then
- echo "๐ Renaming $coverage_file to coverage.txt for consistency"
+ echo "๐ Renaming to coverage.txt"
mv "$coverage_file" coverage.txt
fi
echo "โ
Coverage file normalized to coverage.txt"
@@ -430,235 +322,13 @@ jobs:
fi
done
- # Verify coverage file exists and has content
+ # Verify
if [[ -f coverage.txt ]] && [[ -s coverage.txt ]]; then
- COVERAGE_LINES=$(wc -l < coverage.txt)
- echo "โ
Coverage file verified: $COVERAGE_LINES lines"
+ echo "โ
Coverage file verified: $(wc -l < coverage.txt) lines"
else
echo "โ ๏ธ No coverage file found or file is empty"
fi
- # --------------------------------------------------------------------
- # Inject matrix job info into failure signatures for deduplication tracking
- # --------------------------------------------------------------------
- - name: ๐ท๏ธ Add matrix job info to signatures
- if: always()
- run: |
- # Add matrix job information to signatures for tracking across matrix jobs
- if [[ -f test-failures-signatures.json ]]; then
- echo "๐ท๏ธ Adding matrix job information to failure signatures..."
- MATRIX_JOB_ID="${{ matrix.name }}"
-
- # Debug: Show signature file before processing
- echo "๐ Signature file before adding matrix info:"
- echo " โข File size: $(wc -c < test-failures-signatures.json) bytes"
- echo " โข Entry count: $(jq 'length' test-failures-signatures.json 2>/dev/null || echo 'invalid')"
- if [[ -s test-failures-signatures.json ]]; then
- echo " โข First entry:"
- jq '.[0]' test-failures-signatures.json 2>/dev/null | head -5 | sed 's/^/ /' || echo " (invalid JSON)"
- fi
-
- # Add matrix_job field to each signature entry
- jq --arg matrix_job "$MATRIX_JOB_ID" 'map(. + {matrix_job: $matrix_job})' test-failures-signatures.json > test-failures-signatures.tmp && \
- mv test-failures-signatures.tmp test-failures-signatures.json
-
- echo "โ
Added matrix job info ($MATRIX_JOB_ID) to $(jq 'length' test-failures-signatures.json 2>/dev/null || echo '0') signatures"
- else
- echo "โน๏ธ No signatures file found - creating empty file for matrix job ${{ matrix.name }}"
- echo '[]' > test-failures-signatures.json
- fi
-
- # --------------------------------------------------------------------
- # Create test failure summary if failures occurred
- # --------------------------------------------------------------------
- - name: ๐จ Create Test Failure Summary
- if: always() && steps.run-tests.outputs.test-exit-code != '0'
- run: |
- echo "๐จ Processing test failures for enhanced reporting..."
-
- FAILURES_EXIST=false
-
- # Check if we have failure details to process
- if [[ -f test-failures.txt ]] && [[ -s test-failures.txt ]]; then
- FAILURES_EXIST=true
- echo "๐ Found $(wc -l < test-failures.txt) test failures to process"
-
- # Create enhanced failure summary with structured JSON output
- echo "๐ Creating enhanced test failure summary..."
-
- # Initialize the JSON structure
- echo '[]' > test-failures-summary.json
-
- # Process each unique package that had failures
- TEMP_PACKAGES=$(mktemp)
-
- # Process both test failures and build failures
-
- # Extract package names from test failures using a named regex and
- # intermediate steps
- FAIL_REGEX='FAIL:.*\(([^)]+)\)'
- PACKAGE_LINE_REGEX='.*\(([^):]+)[^)]*\).*'
-
- grep -oE "$FAIL_REGEX" test-failures.txt 2>/dev/null > temp-fail-lines.txt
- sed -E "s/$PACKAGE_LINE_REGEX/\1/" temp-fail-lines.txt | sort -u > "$TEMP_PACKAGES"
- rm temp-fail-lines.txt
-
- if [[ -s "$TEMP_PACKAGES" ]]; then
- echo "๐ฆ Found packages with test failures:"
- cat "$TEMP_PACKAGES"
-
- # Process each package with test failures
- while IFS= read -r package; do
- if [[ -n "$package" ]]; then
- echo "๐ Processing test failures for package: $package"
-
- # Extract failures for this specific package
- PACKAGE_FAILURES=$(grep "($package)" test-failures.txt 2>/dev/null || true)
- if [[ -n "$PACKAGE_FAILURES" ]]; then
- # Create package failure entry for test failures
- PACKAGE_JSON=$(jq -n \
- --arg pkg "$package" \
- --arg failures "$PACKAGE_FAILURES" \
- '{
- Package: $pkg,
- Type: "test",
- failures: [
- $failures | split("\n")[] | select(length > 0) | {
- Test: (. | gsub("^.*FAIL: "; "") | gsub(" \\(.*\\)$"; "")),
- Output: .,
- Elapsed: "unknown"
- }
- ]
- }')
-
- # Add to summary
- jq --argjson new_entry "$PACKAGE_JSON" '. += [$new_entry]' test-failures-summary.json > test-failures-summary.json.tmp
- mv test-failures-summary.json.tmp test-failures-summary.json
- fi
- fi
- done < "$TEMP_PACKAGES"
- fi
-
- rm -f "$TEMP_PACKAGES"
-
- # Now handle build failures separately
- TEMP_BUILD_PACKAGES=$(mktemp)
-
- if grep "^--- BUILD FAILED:" test-failures.txt 2>/dev/null | sed 's/^--- BUILD FAILED: //' | sort -u > "$TEMP_BUILD_PACKAGES"; then
- echo "๐ฆ Found packages with build failures:"
- cat "$TEMP_BUILD_PACKAGES"
-
- # Process each package with build failures
- while IFS= read -r package; do
- if [[ -n "$package" ]]; then
- echo "๐จ Processing build failures for package: $package"
-
- # Extract build error details for this package
- BUILD_ERRORS=$(awk -v pkg="$package" '
- BEGIN { capture = 0; errors = "" }
- $0 == "--- BUILD FAILED: " pkg { capture = 1; next }
- /^--- (FAIL|BUILD FAILED):/ && $0 != "--- BUILD FAILED: " pkg { capture = 0 }
- capture && /^ / {
- if (errors) errors = errors "\n" substr($0, 5)
- else errors = substr($0, 5)
- }
- END { print errors }
- ' test-failures.txt)
-
- # Create package failure entry for build failures
- BUILD_JSON=$(jq -n \
- --arg pkg "$package" \
- --arg errors "$BUILD_ERRORS" \
- '{
- Package: $pkg,
- Type: "build",
- BuildErrors: ($errors | split("\n") | map(select(length > 0))),
- failures: [{
- Test: "Build compilation",
- Output: ("--- BUILD FAILED: " + $pkg + "\n" + $errors),
- Elapsed: "unknown"
- }]
- }')
-
- # Add to summary
- jq --argjson new_entry "$BUILD_JSON" '. += [$new_entry]' test-failures-summary.json > test-failures-summary.json.tmp
- mv test-failures-summary.json.tmp test-failures-summary.json
- fi
- done < "$TEMP_BUILD_PACKAGES"
- fi
-
- rm -f "$TEMP_BUILD_PACKAGES"
-
- echo "โ
Test failure summary created with $(jq 'length' test-failures-summary.json) packages"
-
- else
- echo "โน๏ธ No specific test failures detected, creating generic failure entry"
- # Create generic failure entry when exit code indicates failure but no specific patterns found
- jq -n \
- --arg exit_code "${{ steps.run-tests.outputs.test-exit-code }}" \
- '[{
- Package: "unknown",
- failures: [{
- Test: "Generic failure",
- Output: ("Test execution failed with exit code " + $exit_code),
- Elapsed: "unknown"
- }]
- }]' > test-failures-summary.json
- fi
-
- # Verify the summary file
- if [[ -f test-failures-summary.json ]]; then
- echo "๐ Final failure summary:"
- jq . test-failures-summary.json
- fi
-
- # --------------------------------------------------------------------
- # Create test failure annotations for GitHub PR/commit view
- # --------------------------------------------------------------------
- - name: ๐ Annotate Key Test Failures
- if: always() && steps.run-tests.outputs.test-exit-code != '0'
- run: |
- if [[ -f test-failures.txt ]] && [[ -s test-failures.txt ]]; then
- echo "๐ Creating GitHub annotations for key test failures..."
-
- # Limit to first 10 failures to avoid annotation overload
- head -10 test-failures.txt | while IFS= read -r failure_line; do
- if [[ -n "$failure_line" ]]; then
- # Extract test name and create annotation
- TEST_NAME=$(echo "$failure_line" | sed -E 's/^.*FAIL: ([^ ]+).*$/\1/' | head -c 100)
- echo "::error title=Test Failure::โ $TEST_NAME failed in ${{ matrix.name }}"
- fi
- done
-
- TOTAL_FAILURES=$(wc -l < test-failures.txt)
- if [[ "$TOTAL_FAILURES" -gt 10 ]]; then
- echo "::warning title=Additional Failures::โ ๏ธ $((TOTAL_FAILURES - 10)) additional test failures not shown in annotations"
- fi
-
- echo "โ
Created annotations for up to 10 test failures"
- else
- echo "::error title=Test Suite Failed::โ Test suite failed but no specific failure patterns detected"
- fi
-
- # --------------------------------------------------------------------
- # Generate comprehensive test statistics
- # --------------------------------------------------------------------
- - name: ๐ Calculate test statistics
- id: test-summary
- if: always()
- uses: ./.github/actions/test-statistics
- with:
- matrix-name: ${{ matrix.name }}
- matrix-os: ${{ matrix.os }}
- matrix-go-version: ${{ matrix.go-version }}
- test-exit-code: ${{ steps.run-tests.outputs.test-exit-code || '0' }}
- output-mode: ${{ steps.run-tests.outputs.output-mode || 'FULL' }}
- job-status: ${{ job.status }}
- test-start-time: ${{ steps.test-timer.outputs.test-start || '0' }}
- race-detection-enabled: ${{ inputs.race-detection-enabled }}
- code-coverage-enabled: ${{ inputs.code-coverage-enabled }}
- fuzz-run: "false"
-
# --------------------------------------------------------------------
# Collect cache performance statistics
# --------------------------------------------------------------------
@@ -691,21 +361,6 @@ jobs:
echo "โข Image Size: ${{ steps.setup-redis.outputs.image-size }}MB"
echo "โข Cache Operation Time: ${{ steps.setup-redis.outputs.cache-operation-time }}s"
echo "โข Connection Time: ${{ steps.setup-redis.outputs.connection-time }}s"
- echo "โข Installation Method: ${{ steps.setup-redis.outputs.installation-method }}"
-
- # Calculate total Redis setup time
- CACHE_TIME="${{ steps.setup-redis.outputs.cache-operation-time || '0' }}"
- CONNECTION_TIME="${{ steps.setup-redis.outputs.connection-time || '0' }}"
- TOTAL_TIME=$((CACHE_TIME + CONNECTION_TIME))
-
- echo "โข Total Setup Time: ${TOTAL_TIME}s"
-
- # Performance assessment
- if [[ "${{ steps.setup-redis.outputs.cache-hit }}" == "true" ]]; then
- echo "๐ Performance: Redis cache hit - faster startup achieved!"
- else
- echo "๐ฅ Performance: Redis pulled from Docker Hub - consider cache warming"
- fi
# --------------------------------------------------------------------
# Upload performance cache statistics for completion report
@@ -718,32 +373,19 @@ jobs:
retention-days: "1"
# --------------------------------------------------------------------
- # Upload test outputs and failure details for validation
+ # Upload CI results (native CI mode output)
# --------------------------------------------------------------------
- - name: ๐ค Upload test outputs and statistics
+ - name: ๐ค Upload CI results
if: always()
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
- name: test-results-unit-${{ matrix.os }}-${{ matrix.go-version }}
+ name: ci-results-${{ matrix.os }}-${{ matrix.go-version }}
path: |
+ .mage-x/ci-results.jsonl
test-output.log
- test-failures.txt
- test-failures-detailed.txt
- test-failures-summary.json
- test-failures-signatures.json
retention-days: 1
if-no-files-found: ignore
- # --------------------------------------------------------------------
- # Upload test statistics for completion report
- # --------------------------------------------------------------------
- - name: ๐ค Upload test statistics
- uses: ./.github/actions/upload-statistics
- with:
- artifact-name: test-stats-${{ matrix.os }}-${{ matrix.go-version }}
- artifact-path: ${{ steps.test-summary.outputs.statistics-file }}
- retention-days: "1"
-
# --------------------------------------------------------------------
# Verify coverage file exists and upload for processing
# --------------------------------------------------------------------
@@ -755,7 +397,7 @@ jobs:
echo "๐ Coverage file size: $(wc -c < coverage.txt) bytes"
echo "๐ Coverage entries: $(wc -l < coverage.txt) lines"
- # Basic validation - ensure it looks like Go coverage format
+ # Basic validation
if head -1 coverage.txt | grep -q "mode:"; then
echo "โ
Coverage file format validation passed"
else
diff --git a/.github/workflows/fortress-test-validation.yml b/.github/workflows/fortress-test-validation.yml
index cf5b3c8..319efc6 100644
--- a/.github/workflows/fortress-test-validation.yml
+++ b/.github/workflows/fortress-test-validation.yml
@@ -1,16 +1,18 @@
# ------------------------------------------------------------------------------------
# Test Results Validation (Reusable Workflow) (GoFortress)
#
-# Purpose: Validate and aggregate test results from all test workflows including
-# matrix tests, fuzz tests, and provide comprehensive failure analysis.
+# Purpose: Validate and aggregate test results from all test workflows using
+# native CI mode output (.mage-x/ci-results.jsonl).
#
# This workflow handles:
-# - Downloading test result artifacts from all test workflows
-# - Validating test statistics and exit codes
-# - Aggregating failure information across test types
-# - Providing detailed failure analysis with smart filtering
+# - Downloading CI results artifacts from test workflows
+# - Validating test exit codes and failure counts
+# - Aggregating failures across matrix jobs
# - Creating comprehensive validation reports
#
+# CI Mode Integration: magex CI mode produces .mage-x/ci-results.jsonl which
+# contains structured failure data with built-in deduplication.
+#
# Maintainer: @mrz1836
#
# ------------------------------------------------------------------------------------
@@ -66,36 +68,28 @@ jobs:
env-json: ${{ inputs.env-json }}
# --------------------------------------------------------------------
- # Download unit test result artifacts with resilience
+ # Download CI results from test matrix
# --------------------------------------------------------------------
- - name: ๐ฅ Download unit test results (with retry)
- uses: ./.github/actions/download-artifact-resilient
- with:
- pattern: "test-results-unit-*"
- path: test-results/
- merge-multiple: true
- max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
- retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
- timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
- continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
-
- - name: ๐ฅ Download test statistics (with retry)
+ - name: ๐ฅ Download CI results
uses: ./.github/actions/download-artifact-resilient
with:
- pattern: "test-stats-*"
- path: test-results/
+ pattern: "ci-results-*"
+ path: ci-results/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
timeout: ${{ env.ARTIFACT_DOWNLOAD_TIMEOUT }}
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
- - name: ๐ฅ Download fuzz results (if enabled, with retry)
+ # --------------------------------------------------------------------
+ # Download fuzz test results if enabled
+ # --------------------------------------------------------------------
+ - name: ๐ฅ Download fuzz test results
if: inputs.fuzz-testing-enabled == 'true'
uses: ./.github/actions/download-artifact-resilient
with:
pattern: "test-results-fuzz-*"
- path: test-results/
+ path: ci-results/
merge-multiple: true
max-retries: ${{ env.ARTIFACT_DOWNLOAD_RETRIES }}
retry-delay: ${{ env.ARTIFACT_DOWNLOAD_RETRY_DELAY }}
@@ -103,708 +97,326 @@ jobs:
continue-on-error: ${{ env.ARTIFACT_DOWNLOAD_CONTINUE_ON_ERROR }}
# --------------------------------------------------------------------
- # Flatten downloaded artifacts to expected directory structure
- # --------------------------------------------------------------------
- - name: ๐๏ธ Flatten downloaded artifacts
- if: always()
- run: |
- echo "๐๏ธ Flattening downloaded artifacts..."
-
- # Process test statistics (move from subdirectories to test-results/)
- if [ -d "test-results/" ]; then
- echo "๐ Directory structure before flattening:"
- find test-results/ -name "*.json" -type f | head -10 | sed 's/^/ โข /' || echo " No JSON files found"
-
- # Process all JSON files including signatures
- find test-results/ -name "*.json" -type f | while read -r file; do
- filename=$(basename "$file")
- echo "Moving $file to test-results/$filename"
- cp "$file" "test-results/$filename"
- done
-
- echo "๐ Available files after flattening:"
- echo " โข Statistics files:"
- ls -la test-results/*-stats-*.json 2>/dev/null || echo " No statistics files found"
- echo " โข Signature files:"
- ls -la test-results/*test-failures-signatures.json 2>/dev/null || echo " No signature files found"
- echo " โข Summary files:"
- ls -la test-results/*test-failures-summary.json 2>/dev/null || echo " No summary files found"
- else
- echo "โ ๏ธ No test-results directory found"
- fi
-
- # --------------------------------------------------------------------
- # Deduplicate test failures across matrix jobs
- # --------------------------------------------------------------------
- - name: ๐ Deduplicate test failures
- run: |
- echo "๐ Deduplicating test failures across matrix jobs..."
-
- # Initialize deduplicated results
- echo '[]' > deduplicated-failures.json
-
- TOTAL_RAW_FAILURES=0
- TOTAL_UNIQUE_FAILURES=0
-
- # Collect all signature files from different matrix jobs (including subdirectories)
- # Check for both regular test signatures and fuzz test signatures
- SIGNATURE_PATTERN_1="test-results/*test-failures-signatures.json"
- SIGNATURE_PATTERN_2="test-results/*fuzz-failures-signatures.json"
-
- if compgen -G "$SIGNATURE_PATTERN_1" >/dev/null 2>&1 || compgen -G "$SIGNATURE_PATTERN_2" >/dev/null 2>&1; then
- echo "๐ Found failure signature files:"
- ls -la test-results/*test-failures-signatures.json test-results/*fuzz-failures-signatures.json 2>/dev/null || echo "No signature files found"
-
- # Debug: Show content of signature files
- echo "๐ Debugging signature file contents:"
- for sig_file in test-results/*test-failures-signatures.json test-results/*fuzz-failures-signatures.json; do
- if [[ -f "$sig_file" ]]; then
- echo "๐ File: $sig_file"
- echo " โข File size: $(wc -c < "$sig_file") bytes"
- echo " โข First few lines:"
- head -5 "$sig_file" | sed 's/^/ /' || echo " (empty or invalid)"
- fi
- done
-
- # Merge all signature files and deduplicate by signature
- for sig_file in test-results/*test-failures-signatures.json test-results/*fuzz-failures-signatures.json; do
- if [[ -s "$sig_file" ]]; then
- echo "๐ Processing: $sig_file"
- RAW_COUNT=$(jq 'length' "$sig_file" 2>/dev/null || echo "0")
- TOTAL_RAW_FAILURES=$((TOTAL_RAW_FAILURES + RAW_COUNT))
- echo " โข Raw failures in this file: $RAW_COUNT"
- else
- echo "๐ Skipping empty file: $sig_file"
- fi
- done
-
- # Combine all signature files and deduplicate
- # Create a list of all signature files that exist
- SIGNATURE_FILES=""
- for pattern in test-results/*test-failures-signatures.json test-results/*fuzz-failures-signatures.json; do
- if [[ -f "$pattern" ]]; then
- SIGNATURE_FILES="$SIGNATURE_FILES $pattern"
- fi
- done
-
- if [[ -n "$SIGNATURE_FILES" ]]; then
- jq -s 'add | group_by(.signature) | map({
- signature: .[0].signature,
- unique_id: .[0].unique_id,
- type: .[0].type,
- package: .[0].package,
- test: .[0].test,
- output: .[0].output,
- occurrences: length,
- matrix_jobs: [.[].matrix_job // "unknown"] | unique
- })' $SIGNATURE_FILES > deduplicated-failures.json 2>/dev/null || echo '[]' > deduplicated-failures.json
- else
- echo '[]' > deduplicated-failures.json
- fi
-
- TOTAL_UNIQUE_FAILURES=$(jq 'length' deduplicated-failures.json 2>/dev/null || echo "0")
-
- echo "๐ Deduplication Results:"
- echo " โข Total raw failure reports: $TOTAL_RAW_FAILURES"
- echo " โข Total unique failures: $TOTAL_UNIQUE_FAILURES"
-
- if [[ $TOTAL_RAW_FAILURES -gt $TOTAL_UNIQUE_FAILURES ]]; then
- DUPLICATE_COUNT=$((TOTAL_RAW_FAILURES - TOTAL_UNIQUE_FAILURES))
- echo " โข Duplicates eliminated: $DUPLICATE_COUNT"
- fi
- else
- echo "โ ๏ธ No signature files found - falling back to legacy detection"
- TOTAL_RAW_FAILURES=0
- TOTAL_UNIQUE_FAILURES=0
-
- # Count failures from legacy summary files when signature files are missing
- if compgen -G "test-results/*test-failures-summary.json" >/dev/null 2>&1; then
- echo "๐ Counting failures from legacy summary files..."
- for summary_file in test-results/*test-failures-summary.json; do
- if [[ -s "$summary_file" ]]; then
- echo "๐ Processing legacy file: $summary_file"
- # Count both test and build failures from the summary file
- LEGACY_TEST_FAILURES=$(jq '[.[] | select(.Type == "test" or .Type == null) | .failures[]?] | length' "$summary_file" 2>/dev/null || echo "0")
- LEGACY_BUILD_FAILURES=$(jq '[.[] | select(.Type == "build")] | length' "$summary_file" 2>/dev/null || echo "0")
- LEGACY_TOTAL=$((LEGACY_TEST_FAILURES + LEGACY_BUILD_FAILURES))
- TOTAL_UNIQUE_FAILURES=$((TOTAL_UNIQUE_FAILURES + LEGACY_TOTAL))
- echo " โข Test failures: $LEGACY_TEST_FAILURES"
- echo " โข Build failures: $LEGACY_BUILD_FAILURES"
- echo " โข Total from this file: $LEGACY_TOTAL"
- fi
- done
- echo "๐ Legacy detection found: $TOTAL_UNIQUE_FAILURES total failures"
- else
- echo "โ ๏ธ No legacy summary files found either"
- fi
- fi
-
- # Save deduplication stats for reporting
- jq -n --arg raw "$TOTAL_RAW_FAILURES" --arg unique "$TOTAL_UNIQUE_FAILURES" '{
- raw_failure_count: ($raw | tonumber),
- unique_failure_count: ($unique | tonumber),
- duplicates_eliminated: (($raw | tonumber) - ($unique | tonumber))
- }' > deduplication-stats.json
-
- # --------------------------------------------------------------------
- # Enhanced test results validation with deduplication
+ # Validate test results from CI mode JSONL output
# --------------------------------------------------------------------
- name: ๐ Validate test results
run: |
- echo "๐ Validating test results using deduplicated failure analysis..."
+ echo "๐ Validating test results from CI mode output..."
+ source .github/scripts/parse-test-label.sh
+
VALIDATION_FAILED=false
+ TOTAL_FAILURES=0
+ TOTAL_UNIQUE=0
TOTAL_TESTS=0
-
- # Load deduplication statistics
- RAW_FAILURES=0
- UNIQUE_FAILURES=0
- if [[ -f deduplication-stats.json ]]; then
- RAW_FAILURES=$(jq -r '.raw_failure_count // 0' deduplication-stats.json)
- UNIQUE_FAILURES=$(jq -r '.unique_failure_count // 0' deduplication-stats.json)
- DUPLICATES_ELIMINATED=$(jq -r '.duplicates_eliminated // 0' deduplication-stats.json)
- fi
-
- # Check regular test results for overall test counts and exit codes
- if compgen -G "test-results/test-stats-*.json" >/dev/null 2>&1; then
- echo "๐ Found test statistics files:"
- ls -la test-results/test-stats-*.json
-
- for stats_file in test-results/test-stats-*.json; do
- echo "๐ Checking $stats_file..."
-
- # Extract basic test results
- TEST_PASSED=$(jq -r '.test_passed // empty' "$stats_file")
- TEST_EXIT_CODE=$(jq -r '.test_exit_code // empty' "$stats_file")
- TEST_NAME=$(jq -r '.name // empty' "$stats_file")
- TEST_MODE=$(jq -r '.test_mode // "unknown"' "$stats_file")
- TEST_COUNT=$(jq -r '.test_count // 0' "$stats_file")
-
- echo " โข Test Suite: $TEST_NAME"
- echo " โข Mode: $TEST_MODE"
- echo " โข Tests: $TEST_COUNT"
- echo " โข Exit Code: $TEST_EXIT_CODE"
- echo " โข Passed: $TEST_PASSED"
-
- if [[ "$TEST_PASSED" == "false" ]] || [[ "$TEST_EXIT_CODE" != "0" ]]; then
- echo "โ Test suite '$TEST_NAME' failed with exit code $TEST_EXIT_CODE"
- VALIDATION_FAILED=true
- else
- echo "โ
Test suite '$TEST_NAME' passed"
+ TOTAL_SKIPPED=0
+
+ # Find all CI results files
+ echo "๐ Looking for CI results files..."
+ find ci-results/ -name "ci-results.jsonl" -o -name "*.jsonl" 2>/dev/null | head -20
+
+ # Process each CI results file
+ # Note: Using find for recursive directory traversal to locate all matching files
+ if find ci-results/ -name "*.jsonl" 2>/dev/null | grep -q .; then
+ echo "โ
Found CI results JSONL files"
+
+ while IFS= read -r -d '' jsonl_file; do
+ # Extract artifact directory name from JSONL file path
+ # Supported directory structures:
+ # 1. Expected: ci-results/ARTIFACT_NAME/.mage-x/ci-results.jsonl
+ # โ Use grandparent (skip .mage-x) to get ARTIFACT_NAME
+ # 2. Fallback: ci-results/ARTIFACT_NAME/ci-results.jsonl
+ # โ Use parent directory as ARTIFACT_NAME
+ ARTIFACT_DIR=$(dirname "$(dirname "$jsonl_file")" | xargs basename)
+ JSONL_NAME=$(basename "$jsonl_file")
+
+ # Detect which structure we have by checking parent directory
+ PARENT_DIR=$(basename "$(dirname "$jsonl_file")")
+ if [[ "$PARENT_DIR" != ".mage-x" ]]; then
+ echo " Warning: Unexpected artifact structure for: $jsonl_file"
+ echo " Expected: ci-results/ARTIFACT_NAME/.mage-x/ci-results.jsonl"
+ # Fallback: parent is the artifact dir (not grandparent)
+ ARTIFACT_DIR=$(basename "$(dirname "$jsonl_file")")
fi
- TOTAL_TESTS=$((TOTAL_TESTS + TEST_COUNT))
- done
- else
- echo "โ ๏ธ No regular test statistics found"
- fi
-
- # Use deduplicated failure count as the authoritative failure count
- # If deduplication found failures, use that count
- # If deduplication failed but validation failed, use a fallback count
- if [[ $UNIQUE_FAILURES -gt 0 ]]; then
- TOTAL_FAILURES=$UNIQUE_FAILURES
- echo "โ
Using deduplicated failure count: $TOTAL_FAILURES"
- elif [[ "$VALIDATION_FAILED" == "true" ]]; then
- # Count from legacy files if deduplication failed
- TOTAL_FAILURES=0
- if compgen -G "test-results/*test-failures-summary.json" >/dev/null 2>&1; then
- for summary_file in test-results/*test-failures-summary.json; do
- if [[ -s "$summary_file" ]]; then
- echo "๐ Processing summary file: $summary_file for failure counting"
-
- # Count test failures - handle both regular test format and fuzz test format
- # Regular test format: .failures array contains individual test failures (excluding fuzz tests)
- LEGACY_TEST_COUNT=$(jq '[.[] | select(.failures and ((.Type // null) != "test")) | .failures[]] | length' "$summary_file" 2>/dev/null || echo "0")
-
- # Count fuzz test failures - fuzz tests have Type field and failures array
- # This counts the number of individual test failures within fuzz test entries
- FUZZ_TEST_FAILURES=$(jq '[.[] | select((.Type // null) == "test") | (.failures // [])[]] | length' "$summary_file" 2>/dev/null || echo "0")
-
- # Count build failures
- BUILD_COUNT=$(jq '[.[] | select((.Type // null) == "build")] | length' "$summary_file" 2>/dev/null || echo "0")
-
- LEGACY_COUNT=$((LEGACY_TEST_COUNT + FUZZ_TEST_FAILURES))
- TOTAL_FAILURES=$((TOTAL_FAILURES + LEGACY_COUNT + BUILD_COUNT))
-
- echo " โข Test failures: $LEGACY_TEST_COUNT, Fuzz failures: $FUZZ_TEST_FAILURES, Build failures: $BUILD_COUNT"
- echo " โข Total from this file: $((LEGACY_COUNT + BUILD_COUNT))"
- fi
- done
- fi
- # Fallback: if still 0 but validation failed, report at least 1 failure
- if [[ $TOTAL_FAILURES -eq 0 ]]; then
- TOTAL_FAILURES=1
- echo "โ ๏ธ Using fallback failure count (validation failed but no specific failures counted)"
- else
- echo "โ
Using legacy failure count: $TOTAL_FAILURES"
- fi
- else
- TOTAL_FAILURES=0
- fi
-
- # Check fuzz test results if enabled
- if [[ "${{ inputs.fuzz-testing-enabled }}" == "true" ]]; then
- if compgen -G "test-results/fuzz-stats-*.json" >/dev/null 2>&1; then
- echo "๐ Found fuzz test statistics files:"
- ls -la test-results/fuzz-stats-*.json
-
- for stats_file in test-results/fuzz-stats-*.json; do
- echo "๐ Checking $stats_file..."
+ TEST_LABEL=$(parse_test_label "$ARTIFACT_DIR" "$JSONL_NAME")
+
+ echo ""
+ echo "๐ Processing: $TEST_LABEL"
+
+ # Extract summary line (type: summary)
+ SUMMARY=$(grep '"type":"summary"' "$jsonl_file" 2>/dev/null | head -1 || echo "")
+
+ if [[ -n "$SUMMARY" ]]; then
+ # Parse summary data
+ STATUS=$(echo "$SUMMARY" | jq -r '.summary.status // "unknown"')
+ PASSED=$(echo "$SUMMARY" | jq -r '.summary.passed // 0')
+ FAILED=$(echo "$SUMMARY" | jq -r '.summary.failed // 0')
+ SKIPPED=$(echo "$SUMMARY" | jq -r '.summary.skipped // 0')
+ UNIQUE=$(echo "$SUMMARY" | jq -r '.summary.unique_total // 0')
+ TOTAL=$(echo "$SUMMARY" | jq -r '.summary.total // 0')
+ DURATION=$(echo "$SUMMARY" | jq -r '.summary.duration // "unknown"')
+
+ echo " โข Status: $STATUS"
+ echo " โข Passed: $PASSED"
+ echo " โข Failed: $FAILED"
+ echo " โข Skipped: $SKIPPED"
+ echo " โข Unique Tests: $UNIQUE"
+ echo " โข Test Runs: $TOTAL"
+ echo " โข Duration: $DURATION"
+
+ TOTAL_UNIQUE=$((TOTAL_UNIQUE + UNIQUE))
+ TOTAL_TESTS=$((TOTAL_TESTS + TOTAL))
+ TOTAL_SKIPPED=$((TOTAL_SKIPPED + SKIPPED))
+
+ if [[ "$STATUS" == "failed" ]] || [[ "$FAILED" -gt 0 ]]; then
+ VALIDATION_FAILED=true
+ TOTAL_FAILURES=$((TOTAL_FAILURES + FAILED))
- # Extract fuzz test results
- FUZZ_PASSED=$(jq -r '.test_passed // false' "$stats_file")
- FUZZ_EXIT_CODE=$(jq -r '.test_exit_code // empty' "$stats_file")
- FUZZ_NAME=$(jq -r '.name // empty' "$stats_file")
+ # Extract failure details
+ echo ""
+ echo " ๐จ Failures in this file:"
+ grep '"type":"failure"' "$jsonl_file" 2>/dev/null | while read -r line; do
+ TEST=$(echo "$line" | jq -r '.failure.test // "unknown"')
+ PKG=$(echo "$line" | jq -r '.failure.package // "unknown"' | sed 's|.*/||')
+ FILE=$(echo "$line" | jq -r '.failure.file // ""')
+ LINE_NUM=$(echo "$line" | jq -r '.failure.line // ""')
+ FAIL_TYPE=$(echo "$line" | jq -r '.failure.type // "test"')
+ ERROR_MSG=$(echo "$line" | jq -r '.failure.error // ""')
+
+ # Show test name with type and location
+ if [[ -n "$FILE" && -n "$LINE_NUM" && "$LINE_NUM" != "0" ]]; then
+ echo " โ [$FAIL_TYPE] $TEST ($PKG) at $FILE:$LINE_NUM"
+ else
+ echo " โ [$FAIL_TYPE] $TEST ($PKG)"
+ fi
- echo " โข Fuzz Test: $FUZZ_NAME"
- echo " โข Exit Code: $FUZZ_EXIT_CODE"
- echo " โข Passed: $FUZZ_PASSED"
+ # Show error message if available (truncated for readability)
+ if [[ -n "$ERROR_MSG" && "$ERROR_MSG" != "null" ]]; then
+ echo " โ ${ERROR_MSG:0:200}"
+ fi
+ done | head -30
+ fi
+ else
+ echo " โ ๏ธ No summary found in JSONL file"
- if [[ "$FUZZ_PASSED" == "false" ]] || [[ "$FUZZ_EXIT_CODE" != "0" ]]; then
- echo "โ Fuzz test suite '$FUZZ_NAME' failed with exit code $FUZZ_EXIT_CODE"
+ # Try to count failures directly
+ FAILURE_COUNT=$(grep -c '"type":"failure"' "$jsonl_file" 2>/dev/null || echo "0")
+ if [[ "$FAILURE_COUNT" -gt 0 ]]; then
+ echo " โข Found $FAILURE_COUNT failure entries"
VALIDATION_FAILED=true
- else
- echo "โ
Fuzz test suite '$FUZZ_NAME' passed"
+ TOTAL_FAILURES=$((TOTAL_FAILURES + FAILURE_COUNT))
fi
+ fi
+ done < <(find ci-results/ -name "*.jsonl" -print0 2>/dev/null)
+ else
+ echo "โ ๏ธ No JSONL files found - checking for test-output.log files..."
- # Add fuzz test count to total
- FUZZ_TEST_COUNT=$(jq -r '.fuzz_test_count // 0' "$stats_file")
- echo " โข Fuzz Tests: $FUZZ_TEST_COUNT"
- TOTAL_TESTS=$((TOTAL_TESTS + FUZZ_TEST_COUNT))
- done
- else
- echo "โ ๏ธ No fuzz test statistics found (fuzz testing was enabled)"
- fi
- fi
-
- # Fix: Update TOTAL_FAILURES if validation failed but we haven't counted any failures yet
- # This handles the case where fuzz tests failed but TOTAL_FAILURES is still 0
- if [[ "$VALIDATION_FAILED" == "true" ]] && [[ $TOTAL_FAILURES -eq 0 ]]; then
- echo "๐ง Detected validation failure with 0 counted failures - recounting..."
-
- # Recount failures including fuzz test failures that were just detected
- if compgen -G "test-results/*test-failures-summary.json" >/dev/null 2>&1; then
- for summary_file in test-results/*test-failures-summary.json; do
- if [[ -s "$summary_file" ]]; then
- echo "๐ Recounting failures from: $summary_file"
-
- # Count all failure types
- RECOUNT_TEST=$(jq '[.[] | select(.failures) | .failures[]] | length' "$summary_file" 2>/dev/null || echo "0")
- RECOUNT_FUZZ=$(jq '[.[] | select((.Type // null) == "test") | (.failures // [])[]] | length' "$summary_file" 2>/dev/null || echo "0")
- RECOUNT_BUILD=$(jq '[.[] | select((.Type // null) == "build")] | length' "$summary_file" 2>/dev/null || echo "0")
-
- FILE_FAILURES=$((RECOUNT_TEST + RECOUNT_FUZZ + RECOUNT_BUILD))
- TOTAL_FAILURES=$((TOTAL_FAILURES + FILE_FAILURES))
-
- echo " โข Found $FILE_FAILURES failures in this file (test:$RECOUNT_TEST, fuzz:$RECOUNT_FUZZ, build:$RECOUNT_BUILD)"
- fi
- done
- fi
+ # Fallback: check test-output.log files for exit codes
+ while IFS= read -r -d '' log_file; do
+ echo "๐ Checking: $log_file"
- # If still no failures found but validation failed, use fallback
- if [[ $TOTAL_FAILURES -eq 0 ]]; then
- TOTAL_FAILURES=1
- echo "โ ๏ธ Using fallback count: validation failed but no structured failures found"
- else
- echo "โ
Recount successful: found $TOTAL_FAILURES total failures"
- fi
+ # Look for FAIL indicators
+ if grep -q "^FAIL" "$log_file" 2>/dev/null || grep -q "--- FAIL:" "$log_file" 2>/dev/null; then
+ echo " โ Found test failures in log file"
+ VALIDATION_FAILED=true
+ FAIL_COUNT=$(grep -c "^--- FAIL:" "$log_file" 2>/dev/null || echo "1")
+ TOTAL_FAILURES=$((TOTAL_FAILURES + FAIL_COUNT))
+ fi
+ done < <(find ci-results/ -name "test-output.log" -print0 2>/dev/null)
fi
- # Enhanced validation summary with deduplication info
+ # Final validation result
echo ""
echo "๐ Validation Summary:"
- echo " โข Total Tests: $TOTAL_TESTS"
- echo " โข Unique Failures: $TOTAL_FAILURES"
- if [[ $RAW_FAILURES -gt 0 && $RAW_FAILURES -gt $UNIQUE_FAILURES ]]; then
- echo " โข Raw Reports: $RAW_FAILURES (eliminated $DUPLICATES_ELIMINATED duplicates)"
- fi
+ echo " โข Unique Tests: $TOTAL_UNIQUE"
+ echo " โข Test Runs: $TOTAL_TESTS"
+ echo " โข Total Failures: $TOTAL_FAILURES"
+ echo " โข Total Skipped: $TOTAL_SKIPPED"
+ echo " โข Validation Status: $(if [[ "$VALIDATION_FAILED" == "true" ]]; then echo "FAILED"; else echo "PASSED"; fi)"
- # Display detailed failure information if tests failed
if [[ "$VALIDATION_FAILED" == "true" ]]; then
echo ""
- echo "๐ Detailed Failure Analysis (Deduplicated):"
- echo "==============================================="
-
- # Use deduplicated failures if available
- if [[ -f deduplicated-failures.json ]] && [[ $(jq 'length' deduplicated-failures.json 2>/dev/null || echo "0") -gt 0 ]]; then
- echo "๐ Found deduplicated failure details:"
- DEDUP_COUNT=$(jq 'length' deduplicated-failures.json)
- echo " โข Unique failures after deduplication: $DEDUP_COUNT"
-
- # Show build failures first
- BUILD_FAILURES=$(jq -r '.[] | select(.type == "build") | " ๐จ " + (.package | split("/") | .[-1] // .[-2] // .) + ": Build compilation failed (" + (.occurrences | tostring) + " matrix jobs)"' deduplicated-failures.json 2>/dev/null)
- if [[ -n "$BUILD_FAILURES" ]]; then
- echo ""
- echo "๐จ Build Failures:"
- echo "------------------"
- echo "$BUILD_FAILURES"
-
- # Show detailed build errors with output
- jq -r '.[] | select(.type == "build") |
- "๐ฆ " + (.package | split("/") | .[-1] // .[-2] // .) + " (detected in " + (.occurrences | tostring) + " matrix jobs):" +
- "\n" + (.output // "No detailed error available")' deduplicated-failures.json 2>/dev/null | sed 's/^/ /' | head -c 2000
- fi
-
- # Show test failures with enhanced details
- TEST_FAILURES=$(jq -r '.[] | select(.type == "test" and (.test | startswith("Fuzz") | not)) |
- " ๐ฆ " + (.package | split("/") | .[-1] // .[-2] // .) + ": " + .test + " (" + (.occurrences | tostring) + " matrix jobs)"' deduplicated-failures.json 2>/dev/null)
- if [[ -n "$TEST_FAILURES" ]]; then
- echo ""
- echo "๐งช Test Failures:"
- echo "-----------------"
- echo "$TEST_FAILURES"
-
- # Show detailed test outputs
- echo ""
- echo "๐ Test Error Details:"
- echo "---------------------"
- jq -r '.[] | select(.type == "test" and (.test | startswith("Fuzz") | not)) |
- "โ " + .test + " (" + (.package | split("/") | .[-1] // .[-2] // .) + "):" +
- "\n" + (.output // "No error output available")' deduplicated-failures.json 2>/dev/null | head -c 3000
- fi
-
- # Show fuzz test failures with special formatting
- FUZZ_FAILURES=$(jq -r '.[] | select(.type == "test" and (.test | startswith("Fuzz"))) |
- " ๐ฏ " + (.package | split("/") | .[-1] // .[-2] // .) + ": " + .test + " (" + (.occurrences | tostring) + " matrix jobs)"' deduplicated-failures.json 2>/dev/null)
- if [[ -n "$FUZZ_FAILURES" ]]; then
- echo ""
- echo "๐ฏ Fuzz Test Failures:"
- echo "---------------------"
- echo "$FUZZ_FAILURES"
-
- # Show detailed fuzz test outputs with enhanced formatting
- echo ""
- echo "๐ Fuzz Test Error Details:"
- echo "--------------------------"
- jq -r '.[] | select(.type == "test" and (.test | startswith("Fuzz"))) |
- "๐ฏ " + .test + " (" + (.package | split("/") | .[-1] // .[-2] // .) + "):" +
- "\n๐ Error Output:" +
- "\n" + (.output // "No fuzz test output available") +
- "\n" +
- (if (.output | contains("Failing input written to")) then
- "\n๐ก Re-run command: " + ((.output | split("\n")[] | select(. | contains("go test -run="))) // "Check output above for re-run command")
- else "" end)' deduplicated-failures.json 2>/dev/null | head -c 4000
- fi
-
- # Fallback to old structured failure details
- elif compgen -G "test-results/*test-failures-summary.json" >/dev/null 2>&1; then
- echo "๐ Found legacy structured failure details:"
- for failure_file in test-results/*test-failures-summary.json; do
- echo ""
- echo "๐ Processing: $failure_file"
-
- # Extract and display package-level failures
- PACKAGES=$(jq -r 'length' "$failure_file" 2>/dev/null || echo "0")
- if [[ "$PACKAGES" -gt 0 ]]; then
- echo " โข Affected Packages: $PACKAGES"
-
- # Show package summary grouped by failure type
- echo "๐ฆ Package Summary:"
-
- # Show build failures first
- BUILD_PACKAGES=$(jq -r '.[] | select((.Type // null) == "build") | select(.Package | test("^[a-zA-Z0-9/_.-]+$")) | " ๐จ \(.Package | split("/") | .[-1] // .[-2] // .): Build failed"' "$failure_file" 2>/dev/null || true)
- if [[ -n "$BUILD_PACKAGES" ]]; then
- echo "$BUILD_PACKAGES"
- fi
-
- # Show regular test failures
- TEST_PACKAGES=$(jq -r '.[] | select((.Type // null) == null) | select(.Package | test("^[a-zA-Z0-9/_.-]+$")) | " ๐ฆ \(.Package | split("/") | .[-1] // .[-2] // .): \(.failures | length) test failure(s)"' "$failure_file" 2>/dev/null || true)
- if [[ -n "$TEST_PACKAGES" ]]; then
- echo "$TEST_PACKAGES"
- fi
-
- # Show fuzz test failures
- FUZZ_PACKAGES=$(jq -r '.[] | select((.Type // null) == "test") | select(.Package | test("^[a-zA-Z0-9/_.-]+$")) | " ๐ฏ \(.Package | split("/") | .[-1] // .[-2] // .): \(.failures | length) fuzz test failure(s)"' "$failure_file" 2>/dev/null || true)
- if [[ -n "$FUZZ_PACKAGES" ]]; then
- echo "$FUZZ_PACKAGES"
- fi
-
- # Show build failures in detail
- BUILD_FAILURE_COUNT=$(jq -r '[.[] | select((.Type // null) == "build")] | length' "$failure_file" 2>/dev/null || echo "0")
- if [[ "$BUILD_FAILURE_COUNT" -gt 0 ]]; then
- echo ""
- echo "๐จ Build Failures:"
- echo "------------------"
-
- jq -r '.[] | select((.Type // null) == "build") | select(.Package | test("^[a-zA-Z0-9/_.-]+$")) |
- "๐ฆ " + (.Package | split("/") | .[-1] // .[-2] // .) + ":" +
- ((.BuildErrors[]? // []) | "\n โ " + .)' "$failure_file" 2>/dev/null || true
- fi
-
- # Show regular test failures
- TEST_FAILURE_COUNT=$(jq -r '[.[] | select((.Type // null) == null)] | length' "$failure_file" 2>/dev/null || echo "0")
- if [[ "$TEST_FAILURE_COUNT" -gt 0 ]]; then
- echo ""
- echo "๐งช Test Failures:"
- echo "-----------------"
-
- # Show detailed failed tests with package context (leaf failures only) - only for test failures
- #
- # Problem: Go's nested test structure (TestA/TestB/TestC) reports failures for
- # all parent tests when a leaf test fails, causing confusing output like:
- # โ TestNetworkEdgeCases/concurrent_api_operations/concurrency_3 (integration) <- actual failure
- # โ TestNetworkEdgeCases/concurrent_api_operations (integration) <- parent (redundant)
- # โ TestNetworkEdgeCases (integration) <- parent (redundant)
- # โ (integration) <- empty (artifact)
- #
- # Solution: Extract and deduplicate to show only the actual failed leaf tests
- # This reduces "4 failures" to "1 actual failure" for better clarity.
- RAW_FAILED_TESTS=$(jq -r '.[] as $parent | select(($parent.Type // null) == null) | select($parent.Package | test("^[a-zA-Z0-9/_.-]+$")) | $parent.failures[] | .Test + " (" + ($parent.Package | split("/") | .[-1] // .[-2] // .) + ")"' "$failure_file" 2>/dev/null)
-
- # Smart filtering: Only show the most specific (deepest nested) test failures
- FAILED_TESTS=$(echo "$RAW_FAILED_TESTS" | awk '
- {
- # Skip empty lines
- if ($0 == "" || $0 ~ /^[[:space:]]*$/) next
-
- # Extract test name before package info
- if (match($0, /^([^(]*[^[:space:]]) \(.*\)$/)) {
- testname = substr($0, RSTART, RLENGTH)
- gsub(/ \(.*\)$/, "", testname)
- # Remove leading/trailing whitespace
- gsub(/^[[:space:]]+|[[:space:]]+$/, "", testname)
- # Skip if testname is empty
- if (testname == "") next
-
- # Count depth by number of "/" characters
- depth_counter = testname
- gsub(/[^\/]/, "", depth_counter)
- depth = length(depth_counter)
- tests[NR] = $0
- depths[NR] = depth
- names[NR] = testname
- }
- }
- END {
- # For each test, check if there is a more specific (deeper) version
- for (i in tests) {
- is_leaf = 1
- for (j in tests) {
- if (i != j && depths[j] > depths[i] && index(names[j], names[i]) == 1) {
- is_leaf = 0
- break
- }
- }
- if (is_leaf && names[i] != "") print tests[i]
- }
- }
- ' | head -20)
-
- if [[ -n "$FAILED_TESTS" ]]; then
- echo "$FAILED_TESTS" | sed 's/^/ โ /'
-
- # Update failure count to reflect actual unique failures
- ACTUAL_UNIQUE_FAILURES=$(echo "$FAILED_TESTS" | grep -v '^[[:space:]]*$' | wc -l)
- RAW_FAILURE_COUNT=$(echo "$RAW_FAILED_TESTS" | grep -v '^[[:space:]]*$' | wc -l)
- if [[ $RAW_FAILURE_COUNT -gt $ACTUAL_UNIQUE_FAILURES ]]; then
- echo ""
- echo " ๐ Note: Showing $ACTUAL_UNIQUE_FAILURES actual failures"
- echo " (filtered from $RAW_FAILURE_COUNT nested test hierarchy entries)"
- fi
- else
- echo " โ ๏ธ No test failures found in JSON structure"
- echo " ๐ Raw JSON content:"
- head -c 2000 "$failure_file" | sed 's/^/ /'
- fi
-
- # Show any error outputs if available from the enhanced failure details (for test failures only)
- ERROR_OUTPUTS=$(jq -r '.[] as $package | select(($package.Type // null) == null) | select($package.Package | test("^[a-zA-Z0-9/_.-]+$")) | $package.failures[] | select(.Output and .Output != "" and .Output != null) | "โ \(.Test) (\($package.Package | split("/") | .[-1] // .[-2] // .))\n\(.Output)\n"' "$failure_file" 2>/dev/null | head -c 3000)
- if [[ -n "$ERROR_OUTPUTS" ]]; then
- echo ""
- echo "๐ Test Error Messages:"
- echo "----------------------"
- echo "$ERROR_OUTPUTS"
- fi
- fi # End of regular test failures section
-
- # Show fuzz test failures
- FUZZ_FAILURE_COUNT=$(jq -r '[.[] | select((.Type // null) == "test")] | length' "$failure_file" 2>/dev/null || echo "0")
- if [[ "$FUZZ_FAILURE_COUNT" -gt 0 ]]; then
- echo ""
- echo "๐ฏ Fuzz Test Failures:"
- echo "---------------------"
-
- # Extract fuzz test failures
- FUZZ_FAILED_TESTS=$(jq -r '.[] as $parent | select(($parent.Type // null) == "test") | select($parent.Package | test("^[a-zA-Z0-9/_.-]+$")) | $parent.failures[] | .Test + " (" + ($parent.Package | split("/") | .[-1] // .[-2] // .) + ")"' "$failure_file" 2>/dev/null)
-
- if [[ -n "$FUZZ_FAILED_TESTS" ]]; then
- echo "$FUZZ_FAILED_TESTS" | sed 's/^/ ๐ฏ /'
-
- # Show detailed fuzz test error outputs with re-run commands
- FUZZ_ERROR_OUTPUTS=$(jq -r '.[] as $package | select(($package.Type // null) == "test") | select($package.Package | test("^[a-zA-Z0-9/_.-]+$")) | $package.failures[] | select(.Output and .Output != "" and .Output != null) |
- "๐ฏ \(.Test) (\($package.Package | split("/") | .[-1] // .[-2] // .)):\n๐ Error Output:\n\(.Output)" +
- (if (.Output | contains("Failing input written to")) then "\n๐ก Re-run command: Check output above for re-run command" else "" end) + "\n"' "$failure_file" 2>/dev/null | head -c 4000)
-
- if [[ -n "$FUZZ_ERROR_OUTPUTS" ]]; then
- echo ""
- echo "๐ Fuzz Test Error Details:"
- echo "--------------------------"
- echo "$FUZZ_ERROR_OUTPUTS"
- fi
- else
- echo " โ ๏ธ No fuzz test failures found in JSON structure"
- fi
- fi # End of fuzz test failures section
- else
- echo " โข No structured failure data found in JSON"
- fi
- done
-
- # Fallback to simple text files if JSON not available
- elif compgen -G "test-results/*test-failures.txt" >/dev/null 2>&1; then
- echo "๐ Found text failure details:"
- for failure_file in test-results/*test-failures.txt; do
- if [[ -s "$failure_file" ]]; then
- echo ""
- echo "๐ Processing: $failure_file"
- echo "๐จ Failed Tests:"
- echo "---------------"
- head -20 "$failure_file" | while IFS= read -r line; do
- echo " โ $line"
- done
- fi
- done
-
- else
- echo "โ ๏ธ No detailed failure information found in downloaded artifacts"
- echo " Available files:"
- ls -la test-results/ 2>/dev/null | head -10 || echo " No files found"
- fi
-
- echo ""
- echo "==============================================="
- if [[ $RAW_FAILURES -gt $UNIQUE_FAILURES ]]; then
- echo "โ Test validation failed - $TOTAL_FAILURES unique failure(s) (from $RAW_FAILURES reports across matrix jobs)"
- echo "::error title=Test Validation Failed::$TOTAL_FAILURES unique test failure(s) detected (eliminated $DUPLICATES_ELIMINATED duplicates from $RAW_FAILURES total reports). Check deduplicated failure analysis above."
- else
- echo "โ Test validation failed - $TOTAL_FAILURES test(s) failed"
- echo "::error title=Test Validation Failed::$TOTAL_FAILURES test(s) failed across all test suites. Check failure details above."
- fi
+ echo "โ Test validation failed - $TOTAL_FAILURES failure(s) detected"
+ echo "::error title=Test Validation Failed::$TOTAL_FAILURES test failure(s) detected. Check the CI results above for details."
exit 1
else
- echo "โ
All $TOTAL_TESTS tests passed validation"
+ echo ""
+ echo "โ
All tests passed validation"
fi
# --------------------------------------------------------------------
- # Create validation summary for GitHub UI with deduplication info
+ # Create validation summary for GitHub UI
# --------------------------------------------------------------------
- name: ๐ Create validation summary
if: always()
run: |
- echo "## ๐ Test Validation Summary (With Deduplication)" >> $GITHUB_STEP_SUMMARY
+ source .github/scripts/parse-test-label.sh
+
+ echo "## ๐ Test Validation Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
- # Load deduplication stats for summary
- if [[ -f deduplication-stats.json ]]; then
- RAW_COUNT=$(jq -r '.raw_failure_count // 0' deduplication-stats.json)
- UNIQUE_COUNT=$(jq -r '.unique_failure_count // 0' deduplication-stats.json)
- ELIMINATED_COUNT=$(jq -r '.duplicates_eliminated // 0' deduplication-stats.json)
-
- if [[ $RAW_COUNT -gt $UNIQUE_COUNT ]]; then
- echo "### ๐ฏ Deduplication Results" >> $GITHUB_STEP_SUMMARY
- echo "- **Raw Failure Reports**: $RAW_COUNT" >> $GITHUB_STEP_SUMMARY
- echo "- **Unique Failures**: $UNIQUE_COUNT" >> $GITHUB_STEP_SUMMARY
- echo "- **Duplicates Eliminated**: $ELIMINATED_COUNT" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
+ # Count artifacts
+ MATRIX_JOBS=$(find ci-results/ -name "*.jsonl" 2>/dev/null | wc -l || echo "0")
+ echo "- **Matrix Jobs Validated**: $MATRIX_JOBS" >> $GITHUB_STEP_SUMMARY
+
+ # Aggregate results from JSONL files
+ TOTAL_PASSED=0
+ TOTAL_FAILED=0
+ TOTAL_SKIPPED=0
+ TOTAL_UNIQUE=0
+ TOTAL_TESTS=0
+
+ while IFS= read -r -d '' jsonl_file; do
+ SUMMARY=$(grep '"type":"summary"' "$jsonl_file" 2>/dev/null | head -1 || echo "")
+ if [[ -n "$SUMMARY" ]]; then
+ PASSED=$(echo "$SUMMARY" | jq -r '.summary.passed // 0')
+ FAILED=$(echo "$SUMMARY" | jq -r '.summary.failed // 0')
+ SKIPPED=$(echo "$SUMMARY" | jq -r '.summary.skipped // 0')
+ UNIQUE=$(echo "$SUMMARY" | jq -r '.summary.unique_total // 0')
+ TOTAL=$(echo "$SUMMARY" | jq -r '.summary.total // 0')
+ TOTAL_PASSED=$((TOTAL_PASSED + PASSED))
+ TOTAL_FAILED=$((TOTAL_FAILED + FAILED))
+ TOTAL_SKIPPED=$((TOTAL_SKIPPED + SKIPPED))
+ TOTAL_UNIQUE=$((TOTAL_UNIQUE + UNIQUE))
+ TOTAL_TESTS=$((TOTAL_TESTS + TOTAL))
fi
- fi
+ done < <(find ci-results/ -name "*.jsonl" -print0 2>/dev/null)
- # Count test suites and statistics (using reliable file counting)
- TEST_SUITE_COUNT=0
- if ls test-results/test-stats-*.json >/dev/null 2>&1; then
- TEST_SUITE_COUNT=$(ls test-results/test-stats-*.json | wc -l)
- fi
+ echo "- **Unique Tests**: $TOTAL_UNIQUE" >> $GITHUB_STEP_SUMMARY
+ echo "- **Test Runs**: $TOTAL_TESTS" >> $GITHUB_STEP_SUMMARY
+ echo "- **Passed**: $TOTAL_PASSED" >> $GITHUB_STEP_SUMMARY
+ echo "- **Failed**: $TOTAL_FAILED" >> $GITHUB_STEP_SUMMARY
+ echo "- **Skipped**: $TOTAL_SKIPPED" >> $GITHUB_STEP_SUMMARY
+ echo "- **Validation Status**: ${{ job.status }}" >> $GITHUB_STEP_SUMMARY
- FUZZ_SUITE_COUNT=0
if [[ "${{ inputs.fuzz-testing-enabled }}" == "true" ]]; then
- if ls test-results/fuzz-stats-*.json >/dev/null 2>&1; then
- FUZZ_SUITE_COUNT=$(ls test-results/fuzz-stats-*.json | wc -l)
- fi
+ echo "- **Fuzz Testing**: Enabled" >> $GITHUB_STEP_SUMMARY
fi
- # Calculate total test counts across all suites
- TOTAL_REGULAR_TESTS=0
- TOTAL_FUZZ_TESTS=0
+ echo "" >> $GITHUB_STEP_SUMMARY
- # Aggregate regular test counts
- if [[ $TEST_SUITE_COUNT -gt 0 ]]; then
- for stats_file in test-results/test-stats-*.json; do
- if [[ -f "$stats_file" ]]; then
- TEST_COUNT=$(jq -r '.test_count // 0' "$stats_file")
- TOTAL_REGULAR_TESTS=$((TOTAL_REGULAR_TESTS + TEST_COUNT))
- fi
- done
- fi
+ # Show per-job breakdown
+ if [[ $MATRIX_JOBS -gt 0 ]]; then
+ echo "### Test Matrix Results" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
- # Aggregate fuzz test counts
- if [[ $FUZZ_SUITE_COUNT -gt 0 ]]; then
- for stats_file in test-results/fuzz-stats-*.json; do
- if [[ -f "$stats_file" ]]; then
- FUZZ_COUNT=$(jq -r '.fuzz_test_count // 0' "$stats_file")
- TOTAL_FUZZ_TESTS=$((TOTAL_FUZZ_TESTS + FUZZ_COUNT))
+ while IFS= read -r -d '' jsonl_file; do
+ # Extract artifact directory name from JSONL file path
+ # Supported directory structures:
+ # 1. Expected: ci-results/ARTIFACT_NAME/.mage-x/ci-results.jsonl
+ # โ Use grandparent (skip .mage-x) to get ARTIFACT_NAME
+ # 2. Fallback: ci-results/ARTIFACT_NAME/ci-results.jsonl
+ # โ Use parent directory as ARTIFACT_NAME
+ ARTIFACT_DIR=$(dirname "$(dirname "$jsonl_file")" | xargs basename)
+ JSONL_NAME=$(basename "$jsonl_file")
+
+ # Detect which structure we have by checking parent directory
+ PARENT_DIR=$(basename "$(dirname "$jsonl_file")")
+ if [[ "$PARENT_DIR" != ".mage-x" ]]; then
+ # Fallback: parent is the artifact dir (not grandparent)
+ ARTIFACT_DIR=$(basename "$(dirname "$jsonl_file")")
fi
- done
- fi
- echo "- **Test Matrix Jobs**: $TEST_SUITE_COUNT" >> $GITHUB_STEP_SUMMARY
- echo "- **Total Tests**: $TOTAL_REGULAR_TESTS" >> $GITHUB_STEP_SUMMARY
- echo "- **Fuzz Test Jobs**: $FUZZ_SUITE_COUNT" >> $GITHUB_STEP_SUMMARY
- echo "- **Total Fuzz Tests**: $TOTAL_FUZZ_TESTS" >> $GITHUB_STEP_SUMMARY
- echo "- **Validation Status**: ${{ job.status }}" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
+ TEST_LABEL=$(parse_test_label "$ARTIFACT_DIR" "$JSONL_NAME")
- # Show per-suite breakdown if statistics available
- if [[ $TEST_SUITE_COUNT -gt 0 ]]; then
- echo "### Test Suite Breakdown" >> $GITHUB_STEP_SUMMARY
- echo "" >> $GITHUB_STEP_SUMMARY
+ SUMMARY=$(grep '"type":"summary"' "$jsonl_file" 2>/dev/null | head -1 || echo "")
- for stats_file in test-results/test-stats-*.json; do
- if [[ -f "$stats_file" ]]; then
- TEST_NAME=$(jq -r '.name // "Unknown"' "$stats_file")
- TEST_PASSED=$(jq -r '.test_passed // false' "$stats_file")
- TEST_COUNT=$(jq -r '.test_count // 0' "$stats_file")
- FAILURES=$(jq -r '.total_failures // 0' "$stats_file")
+ if [[ -n "$SUMMARY" ]]; then
+ STATUS=$(echo "$SUMMARY" | jq -r '.summary.status // "unknown"')
+ PASSED=$(echo "$SUMMARY" | jq -r '.summary.passed // 0')
+ FAILED=$(echo "$SUMMARY" | jq -r '.summary.failed // 0')
- if [[ "$TEST_PASSED" == "true" ]]; then
- echo "- โ
**$TEST_NAME**: $TEST_COUNT tests passed" >> $GITHUB_STEP_SUMMARY
+ if [[ "$STATUS" == "passed" ]]; then
+ echo "- โ
**$TEST_LABEL**: $PASSED tests passed" >> $GITHUB_STEP_SUMMARY
else
- echo "- โ **$TEST_NAME**: $FAILURES/$TEST_COUNT tests failed" >> $GITHUB_STEP_SUMMARY
+ echo "- โ **$TEST_LABEL**: $FAILED failures" >> $GITHUB_STEP_SUMMARY
fi
fi
- done
+ done < <(find ci-results/ -name "*.jsonl" -print0 2>/dev/null)
fi
- if [[ $FUZZ_SUITE_COUNT -gt 0 ]]; then
+ # Add detailed failure section if there are failures
+ if [[ $TOTAL_FAILED -gt 0 ]]; then
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### ๐จ Failure Details" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
- echo "### Fuzz Test Breakdown" >> $GITHUB_STEP_SUMMARY
+ echo "_Expand each failure to see full output and stack traces_" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
- for stats_file in test-results/fuzz-stats-*.json; do
- if [[ -f "$stats_file" ]]; then
- FUZZ_NAME=$(jq -r '.name // "Unknown"' "$stats_file")
- FUZZ_PASSED=$(jq -r '.test_passed // false' "$stats_file")
- FUZZ_COUNT=$(jq -r '.fuzz_test_count // 0' "$stats_file")
+ FAILURE_COUNT=0
+ while IFS= read -r -d '' jsonl_file; do
+ while read -r line; do
+ # Limit total failures shown
+ FAILURE_COUNT=$((FAILURE_COUNT + 1))
+ if [[ $FAILURE_COUNT -gt 20 ]]; then
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "_... additional failures truncated_" >> $GITHUB_STEP_SUMMARY
+ break 2
+ fi
- if [[ "$FUZZ_PASSED" == "true" ]]; then
- echo "- โ
**$FUZZ_NAME**: $FUZZ_COUNT fuzz tests passed" >> $GITHUB_STEP_SUMMARY
- else
- echo "- โ **$FUZZ_NAME**: Fuzz tests failed" >> $GITHUB_STEP_SUMMARY
+ TEST=$(echo "$line" | jq -r '.failure.test // "unknown"')
+ PKG=$(echo "$line" | jq -r '.failure.package // "unknown"' | sed 's|.*/||')
+ FAIL_TYPE=$(echo "$line" | jq -r '.failure.type // "test"')
+ ERROR_MSG=$(echo "$line" | jq -r '.failure.error // ""')
+ OUTPUT=$(echo "$line" | jq -r '.failure.output // ""')
+ STACK=$(echo "$line" | jq -r '.failure.stack // ""')
+
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "โ $TEST ($PKG) - $FAIL_TYPE
" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ if [[ -n "$ERROR_MSG" && "$ERROR_MSG" != "null" ]]; then
+ echo "**Error:** \`$ERROR_MSG\`" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
fi
- fi
- done
+
+ if [[ -n "$OUTPUT" && "$OUTPUT" != "null" && "$OUTPUT" != "" ]]; then
+ echo "**Output:**" >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ # Truncate output to avoid massive summaries
+ echo "${OUTPUT:0:2000}" >> $GITHUB_STEP_SUMMARY
+ if [[ ${#OUTPUT} -gt 2000 ]]; then
+ echo "... (truncated)" >> $GITHUB_STEP_SUMMARY
+ fi
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ fi
+
+ if [[ -n "$STACK" && "$STACK" != "null" && "$STACK" != "" ]]; then
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Stack Trace:**" >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ echo "${STACK:0:1500}" >> $GITHUB_STEP_SUMMARY
+ if [[ ${#STACK} -gt 1500 ]]; then
+ echo "... (truncated)" >> $GITHUB_STEP_SUMMARY
+ fi
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ fi
+
+ # For fuzz tests, show fuzz-specific info
+ FUZZ_INFO=$(echo "$line" | jq -r '.failure.fuzz_info // null')
+ if [[ "$FUZZ_INFO" != "null" && -n "$FUZZ_INFO" ]]; then
+ CORPUS=$(echo "$FUZZ_INFO" | jq -r '.corpus_path // ""')
+ if [[ -n "$CORPUS" && "$CORPUS" != "null" ]]; then
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Fuzz Corpus:** \`$CORPUS\`" >> $GITHUB_STEP_SUMMARY
+ fi
+ fi
+
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo " " >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ done < <(grep '"type":"failure"' "$jsonl_file" 2>/dev/null)
+ done < <(find ci-results/ -name "*.jsonl" -print0 2>/dev/null)
fi
# --------------------------------------------------------------------
- # Upload validation artifacts for completion report
+ # Upload validation artifacts
# --------------------------------------------------------------------
- name: ๐ค Upload validation summary
if: always()
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: validation-summary
- path: |
- test-results/
- deduplicated-failures.json
- deduplication-stats.json
+ path: ci-results/
retention-days: 1
if-no-files-found: ignore
diff --git a/.github/workflows/fortress.yml b/.github/workflows/fortress.yml
index c889802..e5fd588 100644
--- a/.github/workflows/fortress.yml
+++ b/.github/workflows/fortress.yml
@@ -1,7 +1,7 @@
# ------------------------------------------------------------------------------------
# ๐ฐ GoFortress - Enterprise-grade CI/CD fortress for Go applications
#
-# Version: 1.2.0 | Released: 2025-11-20
+# Version: 1.3.0 | Released: 2025-12-14
#
# Built Strong. Tested Harder.
#
@@ -186,50 +186,50 @@ jobs:
ossi-token: ${{ secrets.OSSI_TOKEN }}
ossi-username: ${{ secrets.OSSI_USERNAME }}
# ----------------------------------------------------------------------------------
- # Code Quality Checks (FORK-SAFE: No secrets required)
+ # Pre-commit Checks (FORK-SAFE: No secrets required)
# ----------------------------------------------------------------------------------
- code-quality:
- name: ๐ Code Quality
+ pre-commit:
+ name: ๐ช Pre-commit Checks
needs: [load-env, setup, test-magex, warm-cache]
if: |
!cancelled() &&
needs.setup.result == 'success' &&
needs.test-magex.result == 'success' &&
- (needs.warm-cache.result == 'success' || needs.warm-cache.result == 'skipped')
+ (needs.warm-cache.result == 'success' || needs.warm-cache.result == 'skipped') &&
+ needs.setup.outputs.pre-commit-enabled == 'true'
permissions:
- contents: read # Read repository content for code quality checks
- uses: ./.github/workflows/fortress-code-quality.yml
+ contents: read # Read repository content for pre-commit checks
+ uses: ./.github/workflows/fortress-pre-commit.yml
with:
env-json: ${{ needs.load-env.outputs.env-json }}
- go-primary-version: ${{ needs.setup.outputs.go-primary-version }}
- go-lint-enabled: ${{ needs.setup.outputs.go-lint-enabled }}
- yaml-lint-enabled: ${{ needs.setup.outputs.yaml-lint-enabled }}
primary-runner: ${{ needs.setup.outputs.primary-runner }}
- static-analysis-enabled: ${{ needs.setup.outputs.static-analysis-enabled }}
+ go-primary-version: ${{ needs.setup.outputs.go-primary-version }}
+ pre-commit-enabled: ${{ needs.setup.outputs.pre-commit-enabled }}
go-sum-file: ${{ needs.setup.outputs.go-sum-file }}
- secrets:
- github-token: ${{ secrets.GH_PAT_TOKEN != '' && secrets.GH_PAT_TOKEN || secrets.GITHUB_TOKEN }}
# ----------------------------------------------------------------------------------
- # Pre-commit Checks (FORK-SAFE: No secrets required)
+ # Code Quality Checks (FORK-SAFE: No secrets required)
# ----------------------------------------------------------------------------------
- pre-commit:
- name: ๐ช Pre-commit Checks
+ code-quality:
+ name: ๐ Code Quality
needs: [load-env, setup, test-magex, warm-cache]
if: |
!cancelled() &&
needs.setup.result == 'success' &&
needs.test-magex.result == 'success' &&
- (needs.warm-cache.result == 'success' || needs.warm-cache.result == 'skipped') &&
- needs.setup.outputs.pre-commit-enabled == 'true'
+ (needs.warm-cache.result == 'success' || needs.warm-cache.result == 'skipped')
permissions:
- contents: read # Read repository content for pre-commit checks
- uses: ./.github/workflows/fortress-pre-commit.yml
+ contents: read # Read repository content for code quality checks
+ uses: ./.github/workflows/fortress-code-quality.yml
with:
env-json: ${{ needs.load-env.outputs.env-json }}
- primary-runner: ${{ needs.setup.outputs.primary-runner }}
go-primary-version: ${{ needs.setup.outputs.go-primary-version }}
- pre-commit-enabled: ${{ needs.setup.outputs.pre-commit-enabled }}
+ go-lint-enabled: ${{ needs.setup.outputs.go-lint-enabled }}
+ yaml-lint-enabled: ${{ needs.setup.outputs.yaml-lint-enabled }}
+ primary-runner: ${{ needs.setup.outputs.primary-runner }}
+ static-analysis-enabled: ${{ needs.setup.outputs.static-analysis-enabled }}
go-sum-file: ${{ needs.setup.outputs.go-sum-file }}
+ secrets:
+ github-token: ${{ secrets.GH_PAT_TOKEN != '' && secrets.GH_PAT_TOKEN || secrets.GITHUB_TOKEN }}
# ----------------------------------------------------------------------------------
# Test Suite (FORK-UNSAFE: Requires CODECOV_TOKEN for coverage - skipped on fork PRs)
# ----------------------------------------------------------------------------------