diff --git a/.github/scripts/mcdc-analyze.sh b/.github/scripts/mcdc-analyze.sh new file mode 100644 index 000000000..721677d1d --- /dev/null +++ b/.github/scripts/mcdc-analyze.sh @@ -0,0 +1,142 @@ +#!/bin/bash + +# Redirect all echo outputs to mcdc_results.txt and capture gcov output +exec > >(tee -a mcdc_results.txt) 2>&1 + +# Pass the test modules after running unit tests +# Ex. echo "MODULES=$(grep -oP 'Test #\d+: \K[\w\-\_]+' test_results.txt | tr '\n' ' ' | sed 's/ $//')" >> $GITHUB_ENV +if [ -n "$MODULES" ]; then + modules="$MODULES" + echo "Test modules provided: " + for module in $modules; do + echo "$module" + done +else + echo "No test modules provided." + exit 1 +fi + +# Initialize overall counters +overall_total_functions=0 +overall_total_covered_functions=0 +overall_file_count=0 +overall_no_conditions_count=0 +module_count=0 + + +# Show coverage for each file in a module and summary coverage for each module +for module in $modules; do + module_name=$(basename "$module") + + # Skip specific files and directories + if [[ "$module_name" == "core-cpu1" || \ + "$module_name" == "Makefile" || \ + "$module_name" == "CTestTestfile" || \ + "$module_name" == "cmake_install" || \ + "$module_name" == "gmon" || \ + "$module_name" == *"stubs"* ]]; then + continue + fi + + module_name_no_testrunner=$(echo "$module_name" | sed 's/-testrunner$//') + + echo " " + echo "Processing $module_name_no_testrunner module..." + + # Initialize module-level counters + total_functions=0 + total_covered_functions=0 + file_count=0 + no_conditions_count=0 + + module_dirs="" + + if [ -n "$BASE_DIR" ]; then + # If BASE_DIR is provided, search within the BASE_DIR for the module directories. + # FIX, module dirs doesn't always show + module_dirs=$(find "$BASE_DIR" -type d -name "*${module_name}*") + echo "Base directory specified: $BASE_DIR" + echo "Searching for .gcda directory..." + else + # Otherwise, look for the default module directories. + module_dirs=$(find "build/native/default_cpu1" -type d -name "*${module_name}*.dir") + echo "No base directory provided: Searching for .gcda directory..." + fi + + if [ -n "$module_dirs" ]; then + for module_dir in $module_dirs; do + echo "Found module directory: $module_dir" + + parent_dir=$(dirname "$module_dir") + echo "Searching for .gcda files under parent directory: $parent_dir..." + gcda_files=$(find "$parent_dir" -type d -name "*${module_name_no_testrunner}*.dir" -exec find {} -type f -name "*.gcda" \;) + + if [ -n "$gcda_files" ]; then + for gcda_file in $gcda_files; do + c_file=$(echo "$gcda_file" | sed 's/\.gcda$/.c/') + + echo "Processing corresponding .c file: $c_file" + echo "Running gcov on $c_file..." + + # Capture gcov output and remove header files + gcov_output=$(gcov -abcgi "$c_file" | sed "/\.h/,/^$/d") + + # Output the gcov result of each file and save to mcdc_results.txt + echo "$gcov_output" | tee -a mcdc_results.txt + + # Process gcov results for coverage summary + while IFS= read -r line; do + if [[ $line == *"Condition outcomes covered:"* ]]; then + condition_covered=$(echo "$line" | grep -oP 'Condition outcomes covered:\K[0-9.]+') + total_conditions_in_file=$(echo "$line" | grep -oP 'of \K[0-9]+') + + covered_functions_in_file=$(awk -v pct="$condition_covered" -v total="$total_conditions_in_file" 'BEGIN {printf "%.2f", (pct / 100) * total}') + + total_functions=$((total_functions + total_conditions_in_file)) + total_covered_functions=$(awk -v covered="$total_covered_functions" -v new_covered="$covered_functions_in_file" 'BEGIN {printf "%.2f", covered + new_covered}') + + file_count=$((file_count + 1)) + elif [[ $line == *"No conditions"* ]]; then + no_conditions_count=$((no_conditions_count + 1)) + fi + done <<< "$gcov_output" + done + else + echo "No .gcda files found for $module_name under parent directory $parent_dir." + fi + done + else + echo "Directory for module $module_name \(e.g., ${module_name}.dir\) not found." + fi + + if [ "$total_functions" -ne 0 ]; then + average_condition_coverage=$(awk -v covered="$total_covered_functions" -v total="$total_functions" 'BEGIN {printf "%.2f", (covered / total) * 100}') + else + average_condition_coverage=0 + fi + + overall_total_functions=$((overall_total_functions + total_functions)) + overall_total_covered_functions=$(awk -v covered="$overall_total_covered_functions" -v new_covered="$total_covered_functions" 'BEGIN {printf "%.2f", covered + new_covered}') + overall_file_count=$((overall_file_count + file_count)) + overall_no_conditions_count=$((overall_no_conditions_count + no_conditions_count)) + + module_count=$((module_count + 1)) + + echo "Summary for $module_name_no_testrunner module:" + echo " Total files processed: $file_count" + echo " Number of files with no condition data: $no_conditions_count" + echo " Condition outcomes covered: ${average_condition_coverage}% of $total_functions" + echo " " +done + +if [ "$overall_total_functions" -ne 0 ]; then + overall_condition_coverage=$(awk -v covered="$overall_total_covered_functions" -v total="$overall_total_functions" 'BEGIN {printf "%.2f", (covered / total) * 100}') +else + overall_condition_coverage=0 +fi + +echo " " +echo "Overall summary:" +echo " Total files processed: $overall_file_count" +echo " Number of files with no condition data: $overall_no_conditions_count" +echo " Overall condition outcomes covered: ${overall_condition_coverage}% of $overall_total_functions" \ No newline at end of file diff --git a/.github/scripts/mcdc-compare.sh b/.github/scripts/mcdc-compare.sh new file mode 100644 index 000000000..548012b61 --- /dev/null +++ b/.github/scripts/mcdc-compare.sh @@ -0,0 +1,203 @@ +#!/bin/bash + +exec > >(tee -a mcdc_compare.txt) 2>&1 + +# Function to check if a file exists and return an error message for missing files +check_file_exists() { + file=$1 + if [ ! -f "$file" ]; then + echo "Error: File '$file' does not exist." + missing_files=true + fi +} + +# Function to extract the relevant numbers from a module's "Summary for module" section +extract_module_numbers() { + file=$1 + module=$2 + + total_files_processed=$(sed -n "/^Summary for ${module} module:/,/^$/p" "$file" | head -n 4 | grep -Po 'Total files processed:\s*\K\d*') + no_condition_data=$(sed -n "/^Summary for ${module} module:/,/^$/p" "$file" | head -n 4 | grep -Po 'Number of files with no condition data:\s*\K\d+') + condition_outcomes_covered_percent=$(sed -n "/^Summary for ${module} module:/,/^$/p" "$file" | head -n 4 | grep -Po 'Condition outcomes covered:\s*\K[0-9]+(\.[0-9]+)?') + condition_outcomes_out_of=$(sed -n "/^Summary for ${module} module:/,/^$/p" "$file" | head -n 4 | grep -Po 'Condition outcomes covered:.*of\s*\K\d*') + + echo "$total_files_processed $no_condition_data $condition_outcomes_covered_percent $condition_outcomes_out_of" +} + +# Compare results for each module between two files +compare_mcdc_results() { + main_results_file=$1 + pr_results_file=$2 + modules_file=$3 + + # Initialize a flag to track if any files are missing + missing_files=false + + # Check if the files exist before proceeding + check_file_exists "$main_results_file" + check_file_exists "$pr_results_file" + check_file_exists "$modules_file" + + # If any files are missing, exit early + if [ "$missing_files" = true ]; then + echo "Error: One or more input files are missing. Exiting." + exit 1 + fi + + # Read modules from modules.txt (passed as argument) + modules=$(cat "$modules_file") + + # Check if modules are empty or not + if [ -z "$modules" ]; then + echo "Error: No modules found in $modules_file" + exit 1 + fi + + # Initialize variables to store the output for modules with and without changes + modules_with_changes="" + modules_without_changes="" + + # Loop through all modules to compare each one + for module in $modules; do + + # Extract numbers for the main results file and PR results file for the current module + read main_total_files main_no_condition main_condition_covered_percent main_condition_out_of <<< $(extract_module_numbers "$main_results_file" "$module") + read pr_total_files pr_no_condition pr_condition_covered_percent pr_condition_out_of <<< $(extract_module_numbers "$pr_results_file" "$module") + + # Echo numbers extracted from each file for each module + echo -e "\nResults for module: $module" + echo "PR Branch - Total files processed: $pr_total_files, No condition data: $pr_no_condition, Covered condition %: $pr_condition_covered_percent%, Out of value: $pr_condition_out_of" + echo "Main Branch - Total files processed: $main_total_files, No condition data: $main_no_condition, Covered condition %: $main_condition_covered_percent%, Out of value: $main_condition_out_of" + + # Initialize variables to store differences + total_files_diff="" + no_condition_data_diff="" + condition_outcomes_covered_diff_percent="" + condition_outcomes_out_of_diff="" + + # Calculate difference between files + total_files_diff=$((pr_total_files - main_total_files)) + no_condition_data_diff=$((pr_no_condition - main_no_condition)) + condition_outcomes_covered_diff_percent=$(echo "$pr_condition_covered_percent - $main_condition_covered_percent" | bc) + condition_outcomes_out_of_diff=$((pr_condition_out_of - main_condition_out_of)) + + echo "Differences:" + echo " Total files processed difference: $total_files_diff" + echo " No condition data difference: $no_condition_data_diff" + echo " Covered condition % difference: $condition_outcomes_covered_diff_percent" + echo " Out of value difference: $condition_outcomes_out_of_diff" + echo " " + + changes="" + + if [ "$total_files_diff" -gt 0 ]; then + changes="${changes} Number of files processed: +$total_files_diff\n" + elif [ "$total_files_diff" -lt 0 ]; then + changes="${changes} Number of files processed: $total_files_diff\n" + fi + + if [ "$no_condition_data_diff" -gt 0 ]; then + changes="${changes} Number of files with no condition data: +$no_condition_data_diff\n" + elif [ "$no_condition_data_diff" -lt 0 ]; then + changes="${changes} Number of files with no condition data: $no_condition_data_diff\n" + fi + + if [ $(echo "$condition_outcomes_covered_diff_percent > 0" | bc) -eq 1 ]; then + changes="${changes} Percentage of covered conditions: +$condition_outcomes_covered_diff_percent%\n" + elif [ $(echo "$condition_outcomes_covered_diff_percent < 0" | bc) -eq 1 ]; then + changes="${changes} Percentage of covered conditions: $condition_outcomes_covered_diff_percent%\n" + fi + + if [ "$condition_outcomes_out_of_diff" -gt 0 ]; then + changes="${changes} Number of conditions: +$condition_outcomes_out_of_diff\n" + elif [ "$condition_outcomes_out_of_diff" -lt 0 ]; then + changes="${changes} Number of conditions: $condition_outcomes_out_of_diff\n" + fi + + if [ -n "$changes" ]; then + modules_with_changes="${modules_with_changes} $module\n$changes\n" + else + modules_without_changes="${modules_without_changes} $module\n" + fi + done + + echo " " + echo "MC/DC results compared to latest dev branch:" + echo " " + echo "Modules with changes:" + echo -e "$modules_with_changes" + echo "Modules without changes:" + echo -e "$modules_without_changes" + + # Write results to mcdc_comment.txt / pull request + if [ -n "$modules_with_changes" ]; then + echo "MC/DC results compared to latest dev branch:" > mcdc_comment.txt + echo "" >> mcdc_comment.txt + echo "Modules with changes:" >> mcdc_comment.txt + echo -e "$modules_with_changes" >> mcdc_comment.txt + echo "" >> mcdc_comment.txt + echo "See file uncovered.json for more details" + else + echo "No MC/DC changes were made." > mcdc_comment.txt + fi + +} + +# creates single json file that contains info on all uncovered branches +generate_json_report() { + jq_script=$(find $GITHUB_WORKSPACE -name "uncovered_filter.jq" | tail -n 1) + if [ -z "$jq_script" ]; then + echo "Error: Could not find uncovered_filter.jq" + return 1 + fi + + for zipped_file in *.gcov.json.gz; do + if [ -f "$zipped_file" ]; then + base_name="${zipped_file%.gcov.json.gz}" + gunzip -c "$zipped_file" > "${base_name}.json" + if [ -f "${base_name}.json" ]; then + jq -f "$jq_script" "${base_name}.json" > "${base_name}_filtered.json" + else + echo "Error: Failed to decompress $zipped_file" + return 1 + fi + else + echo "Warning: No .gcov.json.gz files found" + return 0 + fi + done + + if ls *_filtered.json 1> /dev/null 2>&1; then + jq -s '.' *_filtered.json > uncovered.json + echo "Successfully created uncovered.json" + else + echo "No filtered JSON files found to merge" + return 1 + fi + + if jq 'flatten' uncovered.json > temp.json; then + mv temp.json uncovered.json + else + rm -f temp.json + echo "Error processing JSON file" + exit 1 + fi + + if jq '.' uncovered.json > temp.json; then + mv temp.json uncovered.json + else + rm -f temp.json + echo "Error processing JSON file" + exit 1 + fi +} + +# Check the script arguments +if [ $# -ne 3 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Run the comparison function with the provided arguments +generate_json_report +compare_mcdc_results "$1" "$2" "$3" \ No newline at end of file diff --git a/.github/workflows/add-to-project-reusable.yml b/.github/workflows/add-to-project-reusable.yml new file mode 100644 index 000000000..692fb13e5 --- /dev/null +++ b/.github/workflows/add-to-project-reusable.yml @@ -0,0 +1,404 @@ +name: Add Issues or PRs to Project Reusable Workflow + +on: + workflow_call: + inputs: + project-url: + description: 'URL of the GitHub project to add items to' + required: false + type: string + # Default project is NASA cFS Development + # https://github.com/orgs/nasa/projects/72 + # For testing purposes use personal project + default: 'https://github.com/users/arielswalker/projects/3' + +jobs: + add-to-project: + name: Add issue or pull request to project + runs-on: ubuntu-latest + steps: + - uses: actions/add-to-project@v1.0.2 + with: + project-url: ${{ inputs.project-url }} + github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} + + - name: Add pull request to current sprint as to do + uses: actions/github-script@v7 + # Testing purposes changed pull_request_target to pull_request + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.draft == false }} + env: + PROJECT_URL: ${{ inputs.project-url }} + with: + github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} + script: | + const projectUrl = process.env.PROJECT_URL; + const pr = context.payload.pull_request; + const prId = pr.node_id; + + // Extract owner type (users/orgs), owner name, and project number + const urlMatch = projectUrl.match(/(orgs|users)\/([^\/]+)\/projects\/(\d+)/); + if (!urlMatch) { + core.setFailed(`Invalid project URL format: ${projectUrl}`); + return; + } + + const ownerType = urlMatch[1]; // 'orgs' or 'users' + const ownerName = urlMatch[2]; // 'arielswalker' + const projectNumber = parseInt(urlMatch[3]); // 3 + + // Determine the correct GraphQL entity to query + const queryEntity = ownerType === 'orgs' ? 'organization' : 'user'; + + // Get project ID and field configurations dynamically + const projectData = await github.graphql(` + query($owner: String!, $number: Int!) { + ${queryEntity}(login: $owner) { + projectV2(number: $number) { + id + title + sprintField: field(name: "Sprint") { + ... on ProjectV2IterationField { + id + name + configuration { + iterations { + id + title + startDate + duration + } + completedIterations { + id + title + startDate + duration + } + } + } + } + statusField: field(name: "Status") { + ... on ProjectV2SingleSelectField { + id + name + options { + id + name + } + } + } + } + } + } + `, { + owner: ownerName, + number: projectNumber + }); + + // Dynamically check the queryEntity (user or organization) + const ownerData = projectData[queryEntity]; + + if (!ownerData || !ownerData.projectV2) { + core.setFailed(`Project #${projectNumber} not found for ${queryEntity} ${ownerName}`); + return; + } + + const project = ownerData.projectV2; + const projectId = project.id; + const sprintField = project.sprintField; + const statusField = project.statusField; + + if (!sprintField) { + core.setFailed(`Sprint field not found in project`); + return; + } + + if (!statusField) { + core.setFailed(`Status field not found in project`); + return; + } + + // Find the Todo option in Status field + const todoOption = statusField.options.find(opt => opt.name === 'Todo'); + if (!todoOption) { + core.setFailed(`Todo option not found in Status field`); + return; + } + + // Combine active and completed iterations + const allIterations = [ + ...(sprintField.configuration.iterations || []), + ...(sprintField.configuration.completedIterations || []) + ]; + + if (allIterations.length === 0) { + core.setFailed('No iterations found in Sprint field'); + return; + } + + // Find current iteration + const today = new Date(); + const currentIteration = allIterations.find(iter => { + const start = new Date(iter.startDate); + const end = new Date(start); + end.setDate(start.getDate() + iter.duration); + return today >= start && today <= end; + }); + + if (!currentIteration) { + core.setFailed("No current iteration found."); + return; + } + + // Add PR to the project + const addItem = await github.graphql(` + mutation($projectId: ID!, $contentId: ID!) { + addProjectV2ItemById(input: { + projectId: $projectId, + contentId: $contentId + }) { + item { + id + } + } + } + `, { + projectId, + contentId: prId + }); + + const itemId = addItem.addProjectV2ItemById.item.id; + + console.log(`PR added to project with item ID: ${itemId}`); + + // Set iteration field to current iteration + await github.graphql(` + mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!, $iterationId: String!) { + updateProjectV2ItemFieldValue(input: { + projectId: $projectId, + itemId: $itemId, + fieldId: $fieldId, + value: { + iterationId: $iterationId + } + }) { + projectV2Item { + id + } + } + } + `, { + projectId, + itemId, + fieldId: sprintField.id, + iterationId: currentIteration.id + }); + + console.log(`Set PR to iteration: ${currentIteration.title}`); + + // Set status to "Todo" + await github.graphql(` + mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!, $optionId: String!) { + updateProjectV2ItemFieldValue(input: { + projectId: $projectId, + itemId: $itemId, + fieldId: $fieldId, + value: { + singleSelectOptionId: $optionId + } + }) { + projectV2Item { + id + } + } + } + `, { + projectId, + itemId, + fieldId: statusField.id, + optionId: todoOption.id + }); + + console.log("Set PR status to Todo"); + + remove-draft-pr-from-sprint: + # Testing purposes changed pull_request_target to pull_request + if: ${{ github.event_name == 'pull_request' && github.event.action == 'converted_to_draft' }} + name: Remove draft PR from current sprint + runs-on: ubuntu-latest + steps: + - name: Remove draft PR from sprint iteration + uses: actions/github-script@v7 + env: + PROJECT_URL: ${{ inputs.project-url }} + with: + github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} + script: | + const projectUrl = process.env.PROJECT_URL; + const pr = context.payload.pull_request; + const prNodeId = pr.node_id; + + console.log(`PR #${pr.number} is now draft, checking if it needs to be removed from sprint`); + + // Extract owner type (users/orgs), owner name, and project number from URL + const urlMatch = projectUrl.match(/(orgs|users)\/([^\/]+)\/projects\/(\d+)/); + if (!urlMatch) { + core.setFailed(`Invalid project URL format: ${projectUrl}`); + return; + } + + const ownerType = urlMatch[1]; // 'orgs' or 'users' + const ownerName = urlMatch[2]; + const projectNumber = parseInt(urlMatch[3]); + + // Determine the correct GraphQL entity to query + const queryEntity = ownerType === 'orgs' ? 'organization' : 'user'; + + // Get project ID and Sprint field ID dynamically + const projectData = await github.graphql(` + query($owner: String!, $number: Int!) { + ${queryEntity}(login: $owner) { + projectV2(number: $number) { + id + sprintField: field(name: "Sprint") { + ... on ProjectV2IterationField { + id + } + } + } + } + } + `, { + owner: ownerName, + number: projectNumber + }); + + // Check if data was found + const ownerData = projectData[queryEntity]; + + if (!ownerData || !ownerData.projectV2) { + core.setFailed(`Project #${projectNumber} not found for ${queryEntity} ${ownerName}`); + return; + } + + const projectId = ownerData.projectV2.id; + + if (!ownerData.projectV2.sprintField) { + core.setFailed(`Sprint field not found in project`); + return; + } + + const iterationFieldId = ownerData.projectV2.sprintField.id; + + try { + // Find the PR in the project + let hasNextPage = true; + let cursor = null; + let foundPrItem = null; + + while (hasNextPage && !foundPrItem) { + const result = await github.graphql(` + query($projectId: ID!, $cursor: String) { + node(id: $projectId) { + ... on ProjectV2 { + items(first: 100, after: $cursor) { + pageInfo { + hasNextPage + endCursor + } + nodes { + id + content { + ... on PullRequest { + id + number + repository { + name + owner { + login + } + } + } + } + fieldValues(first: 20) { + nodes { + ... on ProjectV2ItemFieldIterationValue { + field { + ... on ProjectV2IterationField { + id + } + } + iterationId + title + } + } + } + } + } + } + } + } + `, { + projectId, + cursor + }); + + const items = result.node.items.nodes; + console.log(`Checking batch of ${items.length} items for PR #${pr.number}`); + + // Look for the PR + foundPrItem = items.find(item => + item.content && + item.content.id === prNodeId + ); + + if (foundPrItem) { + console.log(`Found PR #${pr.number} in project items`); + break; + } + + hasNextPage = result.node.items.pageInfo.hasNextPage; + cursor = result.node.items.pageInfo.endCursor; + } + + if (!foundPrItem) { + console.log(`PR #${pr.number} not found in project items - nothing to update`); + return; + } + + // Check if PR is assigned to an iteration + const iterationField = foundPrItem.fieldValues.nodes.find( + value => value.field && value.field.id === iterationFieldId + ); + + if (!iterationField || !iterationField.iterationId) { + console.log(`PR #${pr.number} is not assigned to any sprint iteration`); + return; + } + + console.log(`PR #${pr.number} is currently assigned to iteration: ${iterationField.title}`); + + // Clear the iteration field (set to null) + await github.graphql(` + mutation($projectId: ID!, $itemId: ID!, $fieldId: ID!) { + clearProjectV2ItemFieldValue(input: { + projectId: $projectId, + itemId: $itemId, + fieldId: $fieldId + }) { + projectV2Item { + id + } + } + } + `, { + projectId, + itemId: foundPrItem.id, + fieldId: iterationFieldId + }); + + console.log(`Successfully removed PR #${pr.number} from sprint iteration: ${iterationField.title}`); + + } catch (error) { + console.log(`Error removing PR from sprint:`, error.message); + console.log(`Details:`, JSON.stringify(error, null, 2)); + } \ No newline at end of file diff --git a/.github/workflows/add-to-project.yml b/.github/workflows/add-to-project.yml new file mode 100644 index 000000000..84bfa9660 --- /dev/null +++ b/.github/workflows/add-to-project.yml @@ -0,0 +1,17 @@ +name: Add Issue or PR to Project + +on: + issues: + types: [opened] + # Testing purpose changed pull_request_target to pull_request + # pull_request_target trigger always runs the workflow using the code that exists in the target (base) branch + pull_request: + types: [opened, ready_for_review, converted_to_draft] + +jobs: + add-to-project: + name: Add issue or pull request to project + # do not run on integration candidate branches + if: ${{ !startsWith(github.head_ref != '' && github.head_ref || github.ref_name, 'ic-') }} + uses: arielswalker/cFS/.github/workflows/add-to-project-reusable.yml@test-cfs/workflows122 + secrets: inherit \ No newline at end of file diff --git a/.github/workflows/build-cfs-deprecated.yml b/.github/workflows/build-cfs-deprecated.yml deleted file mode 100644 index fbd048f1c..000000000 --- a/.github/workflows/build-cfs-deprecated.yml +++ /dev/null @@ -1,229 +0,0 @@ -name: Build, Test, and Run [OMIT_DEPRECATED = false] - -# Run every time a new commit pushed or for pull requests -on: - push: - branches: - - dev - - main - pull_request: - types: - - opened - - reopened - - synchronize - workflow_dispatch: - -env: - SIMULATION: native - OMIT_DEPRECATED: false - ENABLE_UNIT_TESTS: true - CTEST_OUTPUT_ON_FAILURE: true - REPO_NAME: ${{ github.event.repository.name }} - -# Force bash to apply pipefail option so pipeline failures aren't masked -defaults: - run: - shell: bash - -jobs: - #Checks for duplicate actions. Skips push actions if there is a matching or duplicate pull-request action. - check-for-duplicates: - runs-on: ubuntu-latest - # Map a step output to a job output - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - id: skip_check - uses: fkirc/skip-duplicate-actions@master - with: - concurrent_skipping: 'same_content' - skip_after_successful_duplicate: 'true' - do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' - - build-cfs-omit-deprecated-false: - name: "[Deprecated] Build" - needs: check-for-duplicates - if: ${{ needs.check-for-duplicates.outputs.should_skip != 'true' }} - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - - steps: - - name: Cache Source and Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: deprecated-build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - name: Checkout cFS - if: steps.cache-src-bld.outputs.cache-hit != 'true' - uses: actions/checkout@v4 - with: - submodules: true - - - name: Check versions - run: git submodule - - - name: Copy Makefile - run: | - cp ./cfe/cmake/Makefile.sample Makefile - cp -r ./cfe/cmake/sample_defs sample_defs - - - name: Prep Build - run: make prep - - - name: Make - run: make install - - tests-and-coverage-omit-deprecated-false: - name: "[Deprecated] Run Unit Tests and Check Coverage" - needs: build-cfs-omit-deprecated-false - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - ENABLE_UNIT_TESTS: true - - steps: - - name: Install Dependencies - run: sudo apt-get install lcov -y - - - name: Cache Source and Deprecated Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: deprecated-build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - name: Run Tests - run: make test - - - name: Check Coverage - run: make lcov - - run-cfs-omit-deprecated-false: - name: "[Deprecated] Run cFS" - needs: build-cfs-omit-deprecated-false - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - - steps: - - name: Cache Source and Deprecated Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: deprecated-build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - - name: List cpu1 - run: ls build/exe/cpu1/ - - - name: Run cFS - run: | - ./core-cpu1 > cFS_startup_cpu1.txt & - sleep 30 - ../host/cmdUtil --endian=LE --pktid=0x1806 --cmdcode=2 --half=0x0002 - working-directory: ./build/exe/cpu1/ - - - name: Archive cFS Startup Artifacts - uses: actions/upload-artifact@v4 - with: - name: cFS-startup-log-omit-deprecate-false${{ matrix.buildtype }} - path: ./build/exe/cpu1/cFS_startup_cpu1.txt - - - name: Check for cFS Warnings - run: | - if [[ -n $(grep -i "warn\|err\|fail" cFS_startup_cpu1.txt) ]]; then - echo "Must resolve warn|err|fail in cFS startup before submitting a pull request" - echo "" - grep -i 'warn\|err\|fail' cFS_startup_cpu1.txt - exit -1 - fi - working-directory: ./build/exe/cpu1/ - - run-functional-test-app-omit-deprecated-false: - #Continue if check-for-duplicates found no duplicates. Always runs for pull-requests. - needs: run-cfs-omit-deprecated-false - name: "[DEPRECATED] cFS Functional Tests" - runs-on: ubuntu-22.04 - timeout-minutes: 15 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - steps: - - name: Cache Source and Deprecated Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: deprecated-build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - name: List cpu1 - run: ls build/exe/cpu1/ - - # Run cFS, send commands to set perf trigger and start perf data, and run functional tests - - name: Run cFS Functional Tests - run: | - ./core-cpu1 & - sleep 10 - ../host/cmdUtil --pktid=0x1806 --cmdcode=17 --endian=LE --uint32=3 --uint32=0x40000000 - ../host/cmdUtil --pktid=0x1806 --cmdcode=14 --endian=LE --uint32=2 - ../host/cmdUtil --pktid=0x1806 --cmdcode=4 --endian=LE --string="20:CFE_TEST" --string="20:CFE_TestMain" --string="64:cfe_testcase" --uint64=16384 --uint8=0 --uint8=0 --uint16=100 --uint32=0 - sleep 30 - counter=0 - while [[ ! -f cf/cfe_test.log ]]; do - temp=$(grep -c "BEGIN" cf/cfe_test.tmp) - - if [ $temp -eq $counter ]; then - echo "Test is frozen. Quiting" - break - fi - counter=$(grep -c "BEGIN" cf/cfe_test.tmp) - echo "Waiting for CFE Tests" - sleep 120 - done - - ../host/cmdUtil --endian=LE --pktid=0x1806 --cmdcode=2 --half=0x0002 - working-directory: ./build/exe/cpu1/ - - - name: Archive Functional Test Artifacts - uses: actions/upload-artifact@v4 - with: - name: cFS-functional-test-log-omit-deprecate-false-${{ matrix.buildtype }} - path: ./build/exe/cpu1/cf/cfe_test.log - - - name: Check for cFS Warnings - run: | - if [[ -z $(grep -i "SUMMARY.*FAIL::0.*TSF::0.*TTF::0" cf/cfe_test.log) ]]; then - echo "Must resolve Test Failures in cFS Test App before submitting a pull request" - echo "" - grep -i '\[ FAIL]\|\[ TSF]\|\[ TTF]' cf/cfe_test.log - exit -1 - fi - working-directory: ./build/exe/cpu1/ diff --git a/.github/workflows/build-cfs-multitarget.yml b/.github/workflows/build-cfs-multitarget.yml new file mode 100644 index 000000000..7d0f67794 --- /dev/null +++ b/.github/workflows/build-cfs-multitarget.yml @@ -0,0 +1,125 @@ +name: Build and Test using multitarget makefile + +# Run every time a new commit pushed or for pull requests +on: + workflow_call: + inputs: + config-name: + description: 'configuration to build' + type: string + required: false + default: 'native_eds' + run-local-tests: + description: 'whether to execute test procedures locally' + type: boolean + required: false + default: false + omit-deprecated: + description: 'whether to use OMIT_DEPRECATED flag' + type: boolean + required: false + default: false + container-image: + description: 'container image to use for build' + type: string + required: false + default: 'aetd-dockerlab.gsfc.nasa.gov/gsfc-cfs/github-actions-ci-cd/cfsbuildenv-ubuntu22' + check-coverage: + description: 'whether to execute gcov locally' + type: boolean + required: false + default: false + compression-type: + description: 'which compression type to use, xz or gz is supported' + type: string + required: false + default: xz + +env: + OMIT_DEPRECATED: ${{ inputs.omit-deprecated }} + +# Force bash to apply pipefail option so pipeline failures aren't masked +defaults: + run: + shell: bash + +jobs: + build-cfs: + runs-on: ubuntu-22.04 + container: + image: ${{ inputs.container-image }} + # fakeroot runs extremely slow without this limit + # See https://github.com/moby/moby/issues/38814 + options: --ulimit "nofile=1024:1048576" + + steps: + - name: Check Environment + run: | + env + echo "WORK_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV + + - name: Verify Container Contents + run: | + which git || echo "Git is MISSING!" + cat /etc/os-release + + - name: Checkout cFS with submodules + uses: actions/checkout@v4 + with: + submodules: true + token: ${{ secrets.GH_PAT || github.token }} + + - name: Configure CFS + run: make ${{ inputs.config-name }}.prep + + - name: Build CFS + run: make ${{ inputs.config-name }}.install + + - name: Configure xz compression + if: inputs.compression-type == 'xz' + run: | + echo "SUFFIX=xz" >> $GITHUB_ENV + echo "COMPRESSOR=xz -z -c" >> $GITHUB_ENV + + - name: Configure gz compression + if: inputs.compression-type == 'gz' + run: | + echo "SUFFIX=gz" >> $GITHUB_ENV + echo "COMPRESSOR=gzip -c" >> $GITHUB_ENV + + - name: Archive binaries + run: | + cd $GITHUB_WORKSPACE/build-${{ inputs.config-name }}/exe + find -maxdepth 1 -mindepth 1 -type d | while read dir + do + inst=$(basename ${dir}) + tar cvf - -C ${inst} . | ${COMPRESSOR} > ${GITHUB_WORKSPACE}/${inst}-bin.tar.${SUFFIX} + done + + - name: Build target images + run: make IMAGE_TYPE=ext4 ${{ inputs.config-name }}.image + + - name: Archive target images + run: | + if cd $GITHUB_WORKSPACE/build-${{ inputs.config-name }}/deploy + then + find -maxdepth 1 -mindepth 1 -type d | while read dir + do + inst=$(basename ${dir}) + tar cvf - -C ${inst} . | ${COMPRESSOR} > ${GITHUB_WORKSPACE}/${inst}-target-img.tar.${SUFFIX} + done + fi + + - name: Upload all artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.config-name }}-bin + path: ./*.tar.${{ inputs.compression-type }} + + - name: Run Local Tests + if: ${{ inputs.run-local-tests }} + run: make ${{ inputs.config-name }}.runtest + + - name: Generate Coverage Report + if: ${{ inputs.check-coverage }} + run: make ${{ inputs.config-name }}.lcov \ No newline at end of file diff --git a/.github/workflows/build-cfs-rtems5.yml b/.github/workflows/build-cfs-rtems5.yml deleted file mode 100644 index 33eba69ca..000000000 --- a/.github/workflows/build-cfs-rtems5.yml +++ /dev/null @@ -1,140 +0,0 @@ -name: Build and Test rtems 5 [OMIT_DEPRECATED=true] - -# Run every time a new commit pushed or for pull requests -on: - push: - branches: - - dev - - main - pull_request: - types: - - opened - - reopened - - synchronize - workflow_dispatch: - -env: - OMIT_DEPRECATED: true - CTEST_OUTPUT_ON_FAILURE: true - -# Force bash to apply pipefail option so pipeline failures aren't masked -defaults: - run: - shell: bash - -jobs: - #Checks for duplicate actions. Skips push actions if there is a matching or duplicate pull-request action. - check-for-duplicates: - runs-on: ubuntu-latest - # Map a step output to a job output - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - id: skip_check - uses: fkirc/skip-duplicate-actions@master - with: - concurrent_skipping: 'same_content' - skip_after_successful_duplicate: 'true' - do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' - - build-cfs: - #Continue if check-for-duplicates found no duplicates. Always runs for pull-requests. - needs: check-for-duplicates - if: ${{ needs.check-for-duplicates.outputs.should_skip != 'true' }} - name: Build - runs-on: ubuntu-22.04 - container: ghcr.io/core-flight-system/qemu-rtems-5:latest - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - # Set home to where rtems is located - HOME: /root - - steps: - # Check out the cfs bundle - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: true - - # Setup the build system - - name: Copy Files - run: | - cp ./cfe/cmake/Makefile.sample Makefile - cp -r ./cfe/cmake/sample_defs sample_defs - ln -s /root/rtems-5 /opt/rtems-5 - - # Setup the build system - - name: Make Prep - run: make SIMULATION=i686-rtems5 prep - - - name: Make - run: make - - test-cfs: - name: Test - runs-on: ubuntu-22.04 - container: ghcr.io/core-flight-system/qemu-rtems-5:latest - - needs: build-cfs - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - ENABLE_UNIT_TESTS: true - # Set home to where rtems is located - HOME: /root - # Disable mcopy check otherwise disk image build fails - MTOOLS_SKIP_CHECK: 1 - - steps: - # Checks out a copy of your repository on the ubuntu-latest machine - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: true - - # Setup the build system - - name: Copy Files - run: | - cp ./cfe/cmake/Makefile.sample Makefile - cp -r ./cfe/cmake/sample_defs sample_defs - ln -s /root/rtems-5 /opt/rtems-5 - - # Setup the build system - - name: Make - run: | - make SIMULATION=i686-rtems5 prep - make install - - - name: Test - #run: .github/scripts/qemu_test.sh && .github/scripts/log_failed_tests.sh - run: make O=build SKIP_NET_TESTS=true RTEMS_VERSION=i686-rtems5 -f .github/scripts/rtems-test.mk all_tests -k - - - name: Output Failed Tests - run: | - # Check if failed-tests is empty or not - if [ -s ./build/exe/cpu1/failed-tests.log ]; then - echo "Failing tests found:" - cat ./build/exe/cpu1/failed-tests.log - fi - - # Always archive test logs - - name: Archive cFS Test Artifacts - uses: actions/upload-artifact@v4 - # Runs even if previous steps have failed - if: always() - with: - name: cFS-rtems-log-summary-${{ matrix.buildtype }} - path: ./build/exe/cpu1/*.log diff --git a/.github/workflows/build-cfs.yml b/.github/workflows/build-cfs.yml deleted file mode 100644 index 5024b02c5..000000000 --- a/.github/workflows/build-cfs.yml +++ /dev/null @@ -1,227 +0,0 @@ -name: Build, Test, and Run [OMIT_DEPRECATED = true] - -# Run every time a new commit pushed or for pull requests -on: - push: - branches: - - dev - - main - pull_request: - types: - - opened - - reopened - - synchronize - workflow_dispatch: - -env: - SIMULATION: native - OMIT_DEPRECATED: true - ENABLE_UNIT_TESTS: true - CTEST_OUTPUT_ON_FAILURE: true - REPO_NAME: ${{ github.event.repository.name }} - -# Force bash to apply pipefail option so pipeline failures aren't masked -defaults: - run: - shell: bash - -jobs: - #Checks for duplicate actions. Skips push actions if there is a matching or duplicate pull-request action. - check-for-duplicates: - runs-on: ubuntu-latest - # Map a step output to a job output - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - id: skip_check - uses: fkirc/skip-duplicate-actions@master - with: - concurrent_skipping: 'same_content' - skip_after_successful_duplicate: 'true' - do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' - - build-cfs-omit-deprecated-true: - name: Build - needs: check-for-duplicates - if: ${{ needs.check-for-duplicates.outputs.should_skip != 'true' }} - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - - steps: - - name: Cache Source and Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - name: Checkout cFS - if: steps.cache-src-bld.outputs.cache-hit != 'true' - uses: actions/checkout@v4 - with: - submodules: true - - - name: Check versions - run: git submodule - - - name: Copy Makefile - run: | - cp ./cfe/cmake/Makefile.sample Makefile - cp -r ./cfe/cmake/sample_defs sample_defs - - name: Prep Build - run: make prep - - - name: Make - run: make install - - tests-and-coverage-omit-deprecated-true: - name: Run Unit Tests and Check Coverage - needs: build-cfs-omit-deprecated-true - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - ENABLE_UNIT_TESTS: true - - steps: - - name: Install Dependencies - run: sudo apt-get install lcov -y - - - name: Cache Source and Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - name: Run Tests - run: make test - - - name: Check Coverage - run: make lcov - - run-cfs-omit-deprecated-true: - name: Run - needs: build-cfs-omit-deprecated-true - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - # Set the type of machine to run on - env: - BUILDTYPE: ${{ matrix.buildtype }} - - steps: - - name: Cache Source and Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - - name: List cpu1 - run: ls build/exe/cpu1/ - - - name: Run cFS - run: | - ./core-cpu1 > cFS_startup_cpu1.txt & - sleep 30 - ../host/cmdUtil --endian=LE --pktid=0x1806 --cmdcode=2 --half=0x0002 - working-directory: ./build/exe/cpu1/ - - - name: Archive cFS Startup Artifacts - uses: actions/upload-artifact@v4 - with: - name: cFS-startup-log-omit-deprecate-true-${{ matrix.buildtype }} - path: ./build/exe/cpu1/cFS_startup_cpu1.txt - - - name: Check for cFS Warnings - run: | - if [[ -n $(grep -i "warn\|err\|fail" cFS_startup_cpu1.txt) ]]; then - echo "Must resolve warn|err|fail in cFS startup before submitting a pull request" - echo "" - grep -i 'warn\|err\|fail' cFS_startup_cpu1.txt - exit -1 - fi - working-directory: ./build/exe/cpu1/ - - run-functional-test-app-omit-deprecated-true: - #Continue if check-for-duplicates found no duplicates. Always runs for pull-requests. - needs: run-cfs-omit-deprecated-true - runs-on: ubuntu-22.04 - timeout-minutes: 15 - - strategy: - fail-fast: false - matrix: - buildtype: [debug, release] - - steps: - - name: Cache Source and Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ env.REPO_NAME }}/${{ env.REPO_NAME }}/* - key: build-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.buildtype }} - - - name: List cpu1 - run: ls build/exe/cpu1/ - - # Run cFS, send commands to set perf trigger and start perf data, and run functional tests - - name: Run cFS Functional Tests - run: | - ./core-cpu1 & - sleep 10 - ../host/cmdUtil --pktid=0x1806 --cmdcode=17 --endian=LE --uint32=3 --uint32=0x40000000 - ../host/cmdUtil --pktid=0x1806 --cmdcode=14 --endian=LE --uint32=2 - ../host/cmdUtil --pktid=0x1806 --cmdcode=4 --endian=LE --string="20:CFE_TEST" --string="20:CFE_TestMain" --string="64:cfe_testcase" --uint64=16384 --uint8=0 --uint8=0 --uint16=100 --uint32=0 - sleep 30 - counter=0 - while [[ ! -f cf/cfe_test.log ]]; do - temp=$(grep -c "BEGIN" cf/cfe_test.tmp) - - if [ $temp -eq $counter ]; then - echo "Test is frozen. Quiting" - break - fi - counter=$(grep -c "BEGIN" cf/cfe_test.tmp) - echo "Waiting for CFE Tests" - sleep 120 - done - - ../host/cmdUtil --endian=LE --pktid=0x1806 --cmdcode=2 --half=0x0002 - working-directory: ./build/exe/cpu1/ - - - name: Archive cFS Startup Artifacts - uses: actions/upload-artifact@v4 - with: - name: cFS-functional-test-log-omit-deprecate-true-${{ matrix.buildtype }} - path: ./build/exe/cpu1/cf/cfe_test.log - - - name: Check for cFS Warnings - run: | - if [[ -z $(grep -i "SUMMARY.*FAIL::0.*TSF::0.*TTF::0" cf/cfe_test.log) ]]; then - echo "Must resolve Test Failures in cFS Test App before submitting a pull request" - echo "" - grep -i '\[ FAIL]\|\[ TSF]\|\[ TTF]' cf/cfe_test.log - exit -1 - fi - working-directory: ./build/exe/cpu1/ diff --git a/.github/workflows/build-documentation.yml b/.github/workflows/build-documentation.yml deleted file mode 100644 index ab68bda86..000000000 --- a/.github/workflows/build-documentation.yml +++ /dev/null @@ -1,87 +0,0 @@ -name: cFS Documentation and Guides - -on: - push: - branches: - - dev - - main - pull_request: - types: - - opened - - reopened - - synchronize - workflow_dispatch: - -# Force bash to apply pipefail option so pipeline failures aren't masked -defaults: - run: - shell: bash - -jobs: - # Checks for duplicate actions. Skips push actions if there is a matching or - # duplicate pull-request action. - checks-for-duplicates: - runs-on: ubuntu-latest - # Map a step output to a job output - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - id: skip_check - uses: fkirc/skip-duplicate-actions@master - with: - concurrent_skipping: 'same_content' - skip_after_successful_duplicate: 'true' - do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' - - checkout-and-cache: - name: Custom checkout and cache for cFS documents - needs: checks-for-duplicates - if: ${{ needs.checks-for-duplicates.outputs.should_skip != 'true' || contains(github.ref, 'main') }} - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: true - - - name: Cache Source and Build - id: cache-src-bld - uses: actions/cache@v4 - with: - path: /home/runner/work/${{ github.event.repository.name }}/${{ github.event.repository.name }}/* - key: cfs-doc-${{ github.run_id }}-${{ github.run_attempt }} - - build-cfs-documentation: - needs: checkout-and-cache - name: Build cFS documents - uses: nasa/cFS/.github/workflows/build-deploy-doc.yml@main - with: - target: "[\"cfe-usersguide\", \"osal-apiguide\"]" - cache-key: cfs-doc-${{ github.run_id }}-${{ github.run_attempt }} - deploy: false - - deploy-documentation: - needs: build-cfs-documentation - if: ${{ github.event_name == 'push' && contains(github.ref, 'main') }} - name: Deploy documentation to gh-pages - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - uses: actions/download-artifact@v4 - - - name: Display structure of downloaded files - run: ls -R - - - name: Move pdfs to deployment directory - run: mkdir deploy; mv */*.pdf deploy - - - name: Deploy to GitHub - uses: JamesIves/github-pages-deploy-action@v4 - with: - token: ${{ secrets.GITHUB_TOKEN }} - branch: gh-pages - folder: deploy - single-commit: true diff --git a/.github/workflows/format-check.yml b/.github/workflows/format-check.yml index 8eada3daf..a101092fe 100644 --- a/.github/workflows/format-check.yml +++ b/.github/workflows/format-check.yml @@ -67,7 +67,6 @@ jobs: check-commit-message: name: Check Commit Message - needs: check-for-duplicates # Only run for pull-requests. if: ${{ github.event_name == 'pull_request' && !startsWith(github.head_ref, 'ic-') }} runs-on: ubuntu-22.04 diff --git a/.github/workflows/mcdc-reusable.yml b/.github/workflows/mcdc-reusable.yml new file mode 100644 index 000000000..809f01f43 --- /dev/null +++ b/.github/workflows/mcdc-reusable.yml @@ -0,0 +1,208 @@ +name: MCDC Reusable Workflow + +on: + workflow_call: + inputs: + # Optional inputs + app-name: + description: Application name, if different from repo name + type: string + required: false + default: '' + +env: + SIMULATION: native + ENABLE_UNIT_TESTS: true + OMIT_DEPRECATED: false + BUILDTYPE: debug + TESTS_RAN: false + LOCAL_BRANCH: dev + +# Force bash to apply pipefail option so pipeline failures aren't masked +defaults: + run: + shell: bash + +jobs: + # Checks for duplicate actions. Skips push actions if there is a matching or + # duplicate pull-request action. + checks-for-duplicates: + runs-on: ubuntu-latest + # Map a step output to a job output + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@master + with: + concurrent_skipping: 'same_content' + skip_after_successful_duplicate: 'false' + do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' + + mcdc: + needs: checks-for-duplicates + if: needs.checks-for-duplicates.outputs.should_skip != 'true' || contains(github.ref, 'dev') || github.event_name == 'pull_request' + name: Build and Run MCDC + runs-on: ubuntu-22.04 + container: ghcr.io/core-flight-system/cfsbuildenv-mcdc:latest + + steps: + # Note this also sets up the environment variables + - name: Set up app source + uses: nasa/cFS/actions/setup-app@dev + + # This builds the whole bundle for cpu1 + - name: Set up local environment (bundle) + if: ${{ inputs.app-name == '' }} + run: | + echo "BUILD_SUBDIR=build/native/default_cpu1" >> $GITHUB_ENV + + # This isolates the build to a single app subdirectory (faster) + - name: Set up local environment (app) + if: ${{ inputs.app-name != '' }} + run: | + echo "BUILD_SUBDIR=build/native/default_cpu1/apps/$APP_LOWER" >> $GITHUB_ENV + + - name: Fetch MCDC Check Script + run: wget -nv -O mcdc-analyze.sh ${{ github.server_url }}/arielswalker/cFS/raw/refs/heads/test-cfs%2Fworkflows122/.github/scripts/mcdc-analyze.sh + + - name: Include conditional coverage flags + run: | + mkdir -p ./sample_defs/cpu1 + echo "target_compile_options(ut_coverage_compile INTERFACE -fcondition-coverage -fprofile-abs-path)" >> ./sample_defs/cpu1/install_custom.cmake + + - name: Reconfigure CMake with condition coverage flags + run: cmake build + + - name: Build dependencies + run: make -C build mission-prebuild + + - name: Build unit under test + run: make -C $BUILD_SUBDIR all + + - name: Generate test list as JSON + run: | + (cd $BUILD_SUBDIR && ctest --show-only=json-v1) | tee test_list.json + + - name: Make test + run: | + (cd $BUILD_SUBDIR && ctest --verbose || true) | tee test_results.txt + echo "TESTS_RAN=true" >> $GITHUB_ENV + + - name: Grab test modules + # Grab test modules if tests ran, even if step failed + if: ${{ env.TESTS_RAN == 'true' }} + run: | + cat test_list.json | jq -rc '.tests[] | .name | @sh' | xargs echo | tee modules.txt + echo "Got modules.txt" + echo "MODULES=$(cat modules.txt)" >> $GITHUB_ENV + + - name: Run MCDC analysis + # Run MCDC analysis if tests ran, even if step failed + if: ${{ env.TESTS_RAN == 'true' }} + run: bash ./mcdc-analyze.sh + + - name: Save PR number + if: always() && (github.event_name == 'pull_request' || github.event_name == 'pull_request_target') + env: + PR_NUMBER: ${{ github.event.number }} + run: echo $PR_NUMBER > pr_number + + - name: Archive unit test results + # Archive unit test results if tests ran, even if step failed + if: ${{ env.TESTS_RAN == 'true' }} + uses: actions/upload-artifact@v4 + with: + name: Unit test results + path: | + test_results.txt + + - name: Archive MCDC results + # Archive MCDC results if tests ran, even if step failed + if: success() || failure() + uses: actions/upload-artifact@v4 + with: + name: MCDC results + path: | + **/*.gcov.json.gz + mcdc_results.txt + pr_number + modules.txt + + summary-mcdc: + needs: mcdc + if: always() && (github.event_name == 'pull_request' ) && needs.mcdc.result != 'skipped' + name: Generate MCDC Comparison Summary + runs-on: ubuntu-22.04 + + steps: + - name: Checkout MCDC Script + uses: actions/checkout@v4 + with: + repository: arielswalker/cFS + path: workflows + + - name: Download latest main branch artifact + continue-on-error: true + uses: dawidd6/action-download-artifact@v2 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + workflow: mcdc-internal.yml + search_artifacts: true + branch: dev + name: MCDC results + path: ./main-branch-results + + - name: Downloads PR artifacts + uses: actions/download-artifact@v4 + with: + name: MCDC results + + - name: Compare main and PR artifacts + run: | + if [ -f "main-branch-results/mcdc_results.txt" ]; then + echo "Main branch artifact found. Running comparison." + bash workflows/.github/scripts/mcdc-compare.sh main-branch-results/mcdc_results.txt mcdc_results.txt main-branch-results/modules.txt + else + echo "Main branch artifact not found. Skipping comparison step." + fi + + - name: Output summary to workflow + run: | + if [ -s "mcdc_comment.txt" ]; then + echo "### MC/DC Results (Comparison with dev branch)" >> $GITHUB_STEP_SUMMARY + echo '```plaintext' >> $GITHUB_STEP_SUMMARY + cat mcdc_comment.txt >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + elif [ -s "mcdc_results.txt" ]; then + echo "### MC/DC Results (Current PR)" >> $GITHUB_STEP_SUMMARY + echo '```plaintext' >> $GITHUB_STEP_SUMMARY + cat mcdc_results.txt >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + else + echo "No MCDC results found." >> $GITHUB_STEP_SUMMARY + fi + + # Output uncovered branches if the file exists and is not empty + if [ -s "uncovered.json" ]; then + echo "" >> $GITHUB_STEP_SUMMARY + echo "
" >> $GITHUB_STEP_SUMMARY + echo "Click to view uncovered branches (uncovered.json)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo '```json' >> $GITHUB_STEP_SUMMARY + cat uncovered.json >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "
" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + fi + + - name: Archive mcdc comparison + # Upload if success or failure which supports skipping, unlike always() + if: success() || failure() + uses: actions/upload-artifact@v4 + with: + name: MCDC main branch comparison + path: | + mcdc_comment.txt + mcdc_compare.txt \ No newline at end of file diff --git a/.github/workflows/mcdc.yml b/.github/workflows/mcdc.yml new file mode 100644 index 000000000..4b9d05045 --- /dev/null +++ b/.github/workflows/mcdc.yml @@ -0,0 +1,18 @@ +name: MCDC Analysis + +on: + push: + branches: + - dev + - main + workflow_dispatch: + pull_request: + types: + - opened + - reopened + - synchronize + +jobs: + mcdc: + name: Run MCDC Analysis + uses: arielswalker/cFS/.github/workflows/mcdc-reusable.yml@test-cfs/workflows122 \ No newline at end of file diff --git a/.github/workflows/static-analysis-misra.yml b/.github/workflows/static-analysis-misra.yml deleted file mode 100644 index 928d33a6d..000000000 --- a/.github/workflows/static-analysis-misra.yml +++ /dev/null @@ -1,131 +0,0 @@ -name: Static Analysis with MISRA - -# Run this workflow manually from the Actions tab -on: - workflow_dispatch: - -# Force bash to apply pipefail option so pipeline failures aren't masked -defaults: - run: - shell: bash - -jobs: - #Checks for duplicate actions. Skips push actions if there is a matching or duplicate pull-request action. - check-for-duplicates: - runs-on: ubuntu-latest - # Map a step output to a job output - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - id: skip_check - uses: fkirc/skip-duplicate-actions@master - with: - concurrent_skipping: 'same_content' - skip_after_successful_duplicate: 'true' - do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' - - misra-analysis: - #Continue if check-for-duplicates found no duplicates. Always runs for pull-requests. - needs: check-for-duplicates - if: ${{ needs.check-for-duplicates.outputs.should_skip != 'true' }} - name: Run cppcheck with misra - runs-on: ubuntu-latest - - strategy: - fail-fast: false - matrix: - cppcheck: [bundle, cfe, osal, psp] - - steps: - - name: Install cppcheck - run: sudo apt-get install cppcheck -y - - # Checks out a copy of the cfs bundle - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: true - - - name: Get MISRA addon - run: | - sudo apt-get install git -y - git clone https://github.com/danmar/cppcheck.git - cp cppcheck/addons/misra.py misra.py - cp cppcheck/addons/cppcheckdata.py cppcheckdata.py - cp cppcheck/addons/misra_9.py misra_9.py - - - name: Run bundle cppcheck - if: ${{matrix.cppcheck =='bundle'}} - run: | - cppcheck --addon=misra --force --inline-suppr --quiet . --xml 2> ${{matrix.cppcheck}}_cppcheck_err.xml - cppcheck --addon=misra --force --inline-suppr --quiet . 2> ${{matrix.cppcheck}}_cppcheck_err.txt - - # Run strict static analysis for embedded portions of cfe, osal, and psp - - name: cfe strict cppcheck - if: ${{matrix.cppcheck =='cfe'}} - run: | - cd ${{matrix.cppcheck}} - cppcheck --addon=misra --force --inline-suppr --std=c99 --language=c --enable=warning,performance,portability,style --suppress=variableScope --inconclusive ./modules/core_api/fsw ./modules/core_private/fsw ./modules/es/fsw ./modules/evs/fsw ./modules/fs/fsw ./modules/msg/fsw ./modules/resourceid/fsw ./modules/sb/fsw ./modules/sbr/fsw ./modules/tbl/fsw ./modules/time/fsw -UCFE_PLATFORM_TIME_CFG_CLIENT -DCFE_PLATFORM_TIME_CFG_SERVER --xml 2> ${{matrix.cppcheck}}_cppcheck_err.xml - cppcheck --addon=misra --force --inline-suppr --std=c99 --language=c --enable=warning,performance,portability,style --suppress=variableScope --inconclusive ./modules/core_api/fsw ./modules/core_private/fsw ./modules/es/fsw ./modules/evs/fsw ./modules/fs/fsw ./modules/msg/fsw ./modules/resourceid/fsw ./modules/sb/fsw ./modules/sbr/fsw ./modules/tbl/fsw ./modules/time/fsw -UCFE_PLATFORM_TIME_CFG_CLIENT -DCFE_PLATFORM_TIME_CFG_SERVER 2> ${{matrix.cppcheck}}_cppcheck_err.txt - - - name: osal strict cppcheck - if: ${{matrix.cppcheck =='osal'}} - run: | - cd ${{matrix.cppcheck}} - cppcheck --addon=misra --force --inline-suppr --std=c99 --language=c --enable=warning,performance,portability,style --suppress=variableScope --inconclusive ./src/bsp ./src/os --xml 2> ${{matrix.cppcheck}}_cppcheck_err.xml - cppcheck --addon=misra --force --inline-suppr --std=c99 --language=c --enable=warning,performance,portability,style --suppress=variableScope --inconclusive ./src/bsp ./src/os 2> ${{matrix.cppcheck}}_cppcheck_err.txt - - - name: psp strict cppcheck - if: ${{matrix.cppcheck =='psp'}} - run: | - cd ${{matrix.cppcheck}} - cppcheck --addon=misra --force --inline-suppr --std=c99 --language=c --enable=warning,performance,portability,style --suppress=variableScope --inconclusive ./fsw --xml 2> ${{matrix.cppcheck}}_cppcheck_err.xml - cppcheck --addon=misra --force --inline-suppr --std=c99 --language=c --enable=warning,performance,portability,style --suppress=variableScope --inconclusive ./fsw 2> ${{matrix.cppcheck}}_cppcheck_err.txt - - - name: Convert bundle cppcheck to sarif - uses: airtower-luna/convert-to-sarif@v0.2.0 - if: ${{matrix.cppcheck =='bundle'}} - with: - tool: 'CppCheck' - input_file: '${{matrix.cppcheck}}_cppcheck_err.xml' - sarif_file: '${{matrix.cppcheck}}_cppcheck_err.sarif' - - - name: Convert cfe, osal, psp cppcheck to sarif - uses: airtower-luna/convert-to-sarif@v0.2.0 - if: ${{matrix.cppcheck !='bundle'}} - with: - tool: 'CppCheck' - input_file: '${{matrix.cppcheck}}/${{matrix.cppcheck}}_cppcheck_err.xml' - sarif_file: '${{matrix.cppcheck}}_cppcheck_err.sarif' - - - name: Define workspace - run: | - echo "CONTAINER_WORKSPACE=${PWD}" >> ${GITHUB_ENV} - - - name: Archive bundle static analysis artifacts - uses: actions/upload-artifact@v4 - if: ${{matrix.cppcheck =='bundle'}} - with: - name: ${{matrix.cppcheck}}-cppcheck-err - path: ./*cppcheck_err.* - - - name: Archive osal, cfe, and psp static analysis artifacts - uses: actions/upload-artifact@v4 - if: ${{matrix.cppcheck !='bundle'}} - with: - name: ${{matrix.cppcheck}}-cppcheck-err - path: ./${{matrix.cppcheck}}/*cppcheck_err.* - - - name: Upload sarif results - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: '${{matrix.cppcheck}}_cppcheck_err.sarif' - checkout_path: ${{ env.CONTAINER_WORKSPACE }} - - - name: Check for errors - run: | - if [[ -s ${{matrix.cppcheck}}_cppcheck_err.txt ]]; - then - cat ${{matrix.cppcheck}}_cppcheck_err.txt - exit -1 - fi diff --git a/.github/workflows/static-analysis-reuse.yml b/.github/workflows/static-analysis-reuse.yml deleted file mode 100644 index 225b4a249..000000000 --- a/.github/workflows/static-analysis-reuse.yml +++ /dev/null @@ -1,9 +0,0 @@ -name: Bundle Static Analysis - -on: - workflow_dispatch: - -jobs: - static-analysis: - name: Static Analysis - uses: nasa/cFS/.github/workflows/static-analysis.yml@main diff --git a/.github/workflows/static-analysis.yml b/.github/workflows/static-analysis.yml deleted file mode 100644 index ec2a8c90e..000000000 --- a/.github/workflows/static-analysis.yml +++ /dev/null @@ -1,122 +0,0 @@ -name: Static Analysis - -on: - workflow_call: - inputs: - strict-dir-list: - description: 'Directory List' - type: string - default: '' - cmake-project-options: - description: 'Command line options to pass to CMake' - type: string - default: '' - cppcheck-xslt-path: - description: 'Path to XSLT file for translating cppcheck XML output' - type: string - default: 'nasa/cFS/main/.github/scripts' - -# Force bash to apply pipefail option so pipeline failures aren't masked -defaults: - run: - shell: bash - -jobs: - #Checks for duplicate actions. Skips push actions if there is a matching or duplicate pull-request action. - check-for-duplicates: - name: Check for Duplicates - runs-on: ubuntu-latest - # Map a step output to a job output - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - id: skip_check - uses: fkirc/skip-duplicate-actions@master - with: - concurrent_skipping: 'same_content' - skip_after_successful_duplicate: 'true' - do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' - - static-analysis: - #Continue if check-for-duplicates found no duplicates. Always runs for pull-requests. - needs: check-for-duplicates - if: ${{ needs.check-for-duplicates.outputs.should_skip != 'true' }} - name: Run cppcheck - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - - steps: - - name: Install cppcheck - run: | - sudo apt-get update - sudo apt-get install cppcheck xsltproc -y - - - name: Install sarif tool - run: npm install @microsoft/sarif-multitool - - - name: Fetch conversion XSLT - run: | - wget -O cppcheck-xml2text.xslt https://raw.githubusercontent.com/${{ inputs.cppcheck-xslt-path }}/cppcheck-xml2text.xslt - wget -O cppcheck-merge.xslt https://raw.githubusercontent.com/${{ inputs.cppcheck-xslt-path }}/cppcheck-merge.xslt - - # Checks out a copy of the reference repository - - name: Checkout subject repository - uses: actions/checkout@v4 - with: - path: source - submodules: true - - # For a CMake-based project, get the list of files by setting up a build with CMAKE_EXPORT_COMPILE_COMMANDS=ON and - # referencing the compile_commands.json file produced by the tool. This will capture the correct include paths and - # compile definitions based on how the source is actually compiled. - - name: CMake Setup - if: ${{ inputs.cmake-project-options != '' }} - run: | - cmake -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/staging -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_BUILD_TYPE=debug ${{ inputs.cmake-project-options }} -S source -B build - echo CPPCHECK_OPTS=--project="$GITHUB_WORKSPACE/build/compile_commands.json" >> $GITHUB_ENV - - # For a Non-CMake project, just pass the base source directory of the repo. This will examine all .c files in the repo, - # but it will not see the macro definitions, and thus may not correctly interpret macro usage. - - name: Non-CMake Setup - if: ${{ inputs.cmake-project-options == '' }} - run: | - echo CPPCHECK_OPTS="$GITHUB_WORKSPACE/source" >> $GITHUB_ENV - - - name: Run general cppcheck - run: cppcheck --force --inline-suppr --xml $CPPCHECK_OPTS 2> cppcheck_err.xml - - # Run strict static analysis for selected portions of source code - - name: Run Strict cppcheck - if: ${{ inputs.strict-dir-list !='' }} - working-directory: ${{ github.workspace }}/source - run: cppcheck --force --inline-suppr --std=c99 --language=c --enable=warning,performance,portability,style --suppress=variableScope --inconclusive --xml ${{ inputs.strict-dir-list }} 2> ../strict_cppcheck_err.xml - - - name: Merge cppcheck results - if: ${{ inputs.strict-dir-list !='' }} - run: | - mv cppcheck_err.xml general_cppcheck_err.xml - xsltproc --stringparam merge_file strict_cppcheck_err.xml cppcheck-merge.xslt general_cppcheck_err.xml > cppcheck_err.xml - - - name: Convert cppcheck results to SARIF - run: npx "@microsoft/sarif-multitool" convert "cppcheck_err.xml" --tool "CppCheck" --output "cppcheck_err.sarif" - - - name: Convert cppcheck results to Markdown - run: xsltproc cppcheck-xml2text.xslt cppcheck_err.xml | tee $GITHUB_STEP_SUMMARY cppcheck_err.txt - - - name: Upload SARIF results - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: ${{ github.workspace }}/cppcheck_err.sarif - checkout_path: ${{ github.workspace }}/source - category: 'cppcheck' - - - name: Archive static analysis artifacts - uses: actions/upload-artifact@v4 - with: - name: cppcheck-errors - path: ./*cppcheck_err.* - - - name: Check for reported errors - run: tail -n 1 cppcheck_err.txt | grep -q '^\*\*0 error(s) reported\*\*$' diff --git a/.github/workflows/test-cfs-qemu.yml b/.github/workflows/test-cfs-qemu.yml new file mode 100644 index 000000000..c92197114 --- /dev/null +++ b/.github/workflows/test-cfs-qemu.yml @@ -0,0 +1,206 @@ +name: Build and execute CFS with multiple configurations + +# Run on all pull requests, and pushes in dev and main branches +on: + push: + branches: + - dev + - main + pull_request: + types: + - opened + - reopened + - synchronize + workflow_dispatch: + +# Force bash to apply pipefail option so pipeline failures aren't masked +defaults: + run: + shell: bash + +jobs: + build-docs: + strategy: + fail-fast: false + matrix: + config: [ native_std ] + doctype: [ detaildesign, usersguide ] + include: + - doctype: detaildesign + path: mission-doc + - doctype: usersguide + path: cfe-usersguide + + name: Build ${{ matrix.config }}.${{ matrix.doctype }} Documentation + runs-on: ubuntu-22.04 + container: ghcr.io/arielswalker/cfsbuildenv-doxygen:ci + steps: + - name: Checkout Bundle + uses: actions/checkout@v4 + with: + submodules: true + + - name: Prepare ${{ matrix.config }} Build + run: make ${{ matrix.config }}.prep + + - name: Build ${{ matrix.doctype }} documentation + run: make ${{ matrix.config }}.${{ matrix.doctype }} + + - name: Set environment + run: | + echo "FULL_WARNING_LOG=build-${{ matrix.config }}/docs/${{ matrix.path }}/${{ matrix.path }}-warnings.log" >> $GITHUB_ENV + echo "SCRUBBED_WARNING_LOG=build-${{ matrix.config }}/${{ matrix.path }}-internal-warnings.log" >> $GITHUB_ENV + echo "OTHER_WARNING_LOG=build-${{ matrix.config }}/${{ matrix.path }}-other-warnings.log" >> $GITHUB_ENV + + + # the intent of this is to separate the warnings into those caused by submodule problems vs those caused + # by problems in the configuration files within this bundle repo + - name: Scrub warnings + run: | + grep -Ev "^$GITHUB_WORKSPACE/(apps|libs|cfe|osal|psp|tools)" $FULL_WARNING_LOG > $SCRUBBED_WARNING_LOG || /bin/true + grep -E "^$GITHUB_WORKSPACE/(apps|libs|cfe|osal|psp|tools)" $FULL_WARNING_LOG > $OTHER_WARNING_LOG || /bin/true + + # warnings reported in submodules are likely a problem in that submodule. + # These can be reported for information but do not fail here. + - name: Check for submodule warnings + run: | + if [ -s "${OTHER_WARNING_LOG}" ] + then + echo '

Doxygen Warnings in other submodules

' >> $GITHUB_STEP_SUMMARY + echo '
' >> $GITHUB_STEP_SUMMARY
+            cat "${OTHER_WARNING_LOG}" >> $GITHUB_STEP_SUMMARY
+            echo '
' >> $GITHUB_STEP_SUMMARY + fi + + - name: Check for bundle errors + run: | + if [ -s "${SCRUBBED_WARNING_LOG}" ] + then + echo '

Doxygen Errors

' >> $GITHUB_STEP_SUMMARY + echo '
' >> $GITHUB_STEP_SUMMARY
+            cat "${SCRUBBED_WARNING_LOG}" >> $GITHUB_STEP_SUMMARY
+            echo '
' >> $GITHUB_STEP_SUMMARY + /bin/false + fi + + build-cfs: + strategy: + matrix: + include: + # - config: qemu_yocto_linux + # build-image: 'ghcr.io/core-flight-system/cfsbuildenv-yocto:ci' + - config: native_std + build-image: 'ghcr.io/arielswalker/cfsbuildenv-linux:ci' + - config: native_eds + build-image: 'ghcr.io/arielswalker/cfsbuildenv-linux:ci' + - config: pc686_rtems5 + build-image: 'ghcr.io/arielswalker/cfsbuildenv-rtems5:ci' + - config: rpi_linux + build-image: 'ghcr.io/arielswalker/cfsbuildenv-arm-linux:ci' + - config: gr712_rtems5 + build-image: 'ghcr.io/arielswalker/cfsbuildenv-gaisler-sparc-rcc:ci' + name: Build CFS with ${{ matrix.config }} configuration + uses: arielswalker/cFS/.github/workflows/build-cfs-multitarget.yml@test-cfs/workflows122 + with: + config-name: ${{ matrix.config }} + container-image: ${{ matrix.build-image }} + compression-type: 'gz' + secrets: inherit +# continue-on-error: ${{ matrix.omit-deprecated }} + + execute-containers: + name: Execute CFS containers + permissions: + contents: read + actions: read + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_HOST: github.com + GH_REPO: nasa/cFS + needs: build-cfs + runs-on: ubuntu-22.04 + strategy: + matrix: + include: + - config: native_std + healthcheck-regex: 'CFE_ES_Main entering OPERATIONAL state$' + exec-image: ghcr.io/arielswalker/cfsbuildenv-linux:ci + cpu1-test-procedure: trad_le + cpu2-test-procedure: trad_le + - config: native_eds + healthcheck-regex: 'CFE_ES_Main entering OPERATIONAL state$' + exec-image: ghcr.io/arielswalker/cfsbuildenv-linux:ci + cpu1-test-procedure: eds + cpu2-test-procedure: eds + # - config: qemu_yocto_linux + # healthcheck-regex: '^Poky (Yocto Project Reference Distro)' + # exec-image: ghcr.io/core-flight-system/cfsexec-qemu:ci + # cpu1-test-procedure: trad_le + # cpu2-test-procedure: trad_be + - config: pc686_rtems5 + healthcheck-regex: 'CFE_ES_Main entering OPERATIONAL state' + exec-image: ghcr.io/arielswalker/cfsexec-qemu:ci + cpu1-test-procedure: trad_le + cpu2-test-procedure: trad_le + + steps: + - name: Download artifact from build +# This alternate download fetches the artifacts from a different run +# It is useful to skip the build step and go straight to execution- saving lots of time +# run: | +# echo "${{ secrets.GITHUB_TOKEN }}" | gh auth login --with-token +# gh run download 58922 -p ${{ matrix.config }}-bin +# ls -lR . + uses: actions/download-artifact@v4 + with: + name: ${{ matrix.config }}-bin + path: ${{ matrix.config }}-bin + + - name: Unpack target runtime images + run: | + ls -lR + for tarball in $GITHUB_WORKSPACE/${{ matrix.config }}-bin/*.tar.gz + do + inst=$(basename ${tarball}) + echo tarball=${tarball} inst=${inst} + inst=${inst%%-*} + echo inst=${inst} + mkdir -p $GITHUB_WORKSPACE/${inst} + gunzip -c ${tarball} | tar xvf - -C $GITHUB_WORKSPACE/${inst} + done + ls -lR + + - name: Start CPU1 container + id: start-cpu1 + uses: nasa/cFS/actions/start-cfs-container@dev + with: + binary-dir: ${{ github.workspace }}/cpu1 + exec-image: ${{ matrix.exec-image }} + + - name: Check CPU1 container + id: check-cpu1 + uses: nasa/cFS/actions/healthcheck-logs@dev + with: + container-id: ${{ steps.start-cpu1.outputs.container-id }} + healthcheck-regex: ${{ matrix.healthcheck-regex }} + + - name: Execute cpu1 test (traditional little endian) + if: matrix.cpu1-test-procedure == 'trad_le' + run: | + ./host/cmd_send --host=${{ steps.check-cpu1.outputs.ip-addr }} --endian=LE --pktid=0x1806 --cmdcode=0 + + - name: Execute cpu1 test (traditional big endian) + if: matrix.cpu1-test-procedure == 'trad_be' + run: | + ./host/cmd_send --host=${{ steps.check-cpu1.outputs.ip-addr }} --endian=BE --pktid=0x1806 --cmdcode=0 + + - name: Execute test procedure (EDS) + if: matrix.cpu1-test-procedure == 'eds' + run: | + ./host/cmd_send --host=${{ steps.check-cpu1.outputs.ip-addr }} -D CFE_ES/Application/CMD.NoopCmd + + - name: Stop CPU1 Container + if: always() + uses: nasa/cFS/actions/stop-cfs-container@dev + with: + container-id: ${{ steps.start-cpu1.outputs.container-id }} \ No newline at end of file