diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml index 48f8baeba77..5f08c32f812 100644 --- a/.github/workflows/ci-core.yml +++ b/.github/workflows/ci-core.yml @@ -24,15 +24,6 @@ jobs: pull-requests: read outputs: affected-modules: ${{ steps.changed-modules.outputs.modules-json }} - # Runs on workflow changes, any deployment change, or any (non-ignored) core change - should-run-deployment-tests: >- - ${{ - steps.match-some.outputs.workflow == 'true' || - steps.match-some.outputs.deployment == 'true' || - steps.match-every.outputs.core-non-ignored == 'true' || - github.event_name == 'schedule' || - github.event_name == 'workflow_dispatch' - }} # Runs on workflow changes, and any (non-ignored) core changes should-run-core-tests: >- ${{ @@ -53,12 +44,9 @@ jobs: with: # "if any changed file matches one or more of the conditions" (https://github.com/dorny/paths-filter/issues/225) predicate-quantifier: some - # deployment - any changes in the deployment module # workflow - any changes that could affect this workflow definition # - Assume any repository action changes affect this workflow filters: | - deployment: - - 'deployment/**' workflow: - '.github/workflows/ci-core.yml' - '.github/actions/**' @@ -123,8 +111,6 @@ jobs: workflow-files: - ".github/workflows/ci-core.yml" - ".github/actions/**" - deployment-files: - - "deployment/**" ignored-files: - "core/scripts/cre/environment/examples/workflows/**" # Note: @@ -154,11 +140,6 @@ jobs: inclusion-sets: [ go-files, core-files, all-test-files, workflow-files ] paths: - "tools/bin/go_core_tests_integration" - deployment-tests: - exclusion-sets: [ e2e-tests-files, core-test-files, ignored-files ] - inclusion-sets: [ go-files, core-files, deployment-files, workflow-files ] - paths: - - "tools/bin/go_core_ccip_deployment_tests" - name: Changed modules id: changed-modules @@ -176,16 +157,18 @@ jobs: golangci: name: GolangCI Lint - needs: [filter, run-frequency] + needs: [ filter, run-frequency ] # We don't directly merge dependabot PRs to not waste the resources. - if: ${{ needs.filter.outputs.affected-modules != '[]' && github.event_name != 'merge_group' && github.actor != 'dependabot[bot]' }} + if: ${{ needs.filter.outputs.affected-modules != '[]' && github.event_name != + 'merge_group' && github.actor != 'dependabot[bot]' }} permissions: # To annotate code in the PR. checks: write contents: read # For golangci-lint-action's `only-new-issues` option. pull-requests: read - runs-on: runs-on=${{ github.run_id }}-${{ strategy.job-index }}/cpu=16/ram=32/family=c6gd/spot=false/image=ubuntu24-full-arm64/extras=s3-cache + runs-on: runs-on=${{ github.run_id }}-${{ strategy.job-index + }}/cpu=16/ram=32/family=c6gd/spot=false/image=ubuntu24-full-arm64/extras=s3-cache strategy: fail-fast: false matrix: @@ -226,13 +209,14 @@ jobs: golangci-matrix-results-validation: name: lint if: ${{ always() }} - needs: [filter, golangci] + needs: [ filter, golangci ] runs-on: ubuntu-latest steps: - name: Check Golangci-lint Matrix Results env: GOLANGCI_RESULT: ${{ needs.golangci.result }} - ALLOW_FAILURE: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), 'allow-lint-issues') }} + ALLOW_FAILURE: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), + 'allow-lint-issues') }} SKIPPED: ${{ needs.filter.outputs.affected-modules == '[]' }} run: | if [[ "${GOLANGCI_RESULT}" == "success" ]]; then @@ -270,42 +254,57 @@ jobs: matrix: type: - cmd: go_core_tests - os: runs-on=${{ github.run_id }}-unit/cpu=48/ram=96/family=c6id+c5ad/spot=false/image=ubuntu24-full-x64/extras=s3-cache + os: runs-on=${{ github.run_id + }}-unit/cpu=48/ram=96/family=c6id+c5ad/spot=false/image=ubuntu24-full-x64/extras=s3-cache should-run: ${{ needs.filter.outputs.should-run-core-tests }} trunk-auto-quarantine: "true" + go-mod-directory: "" + setup-solana: "false" + setup-aptos: "false" + setup-sui: "false" + install-loopps: "false" - cmd: go_core_tests_integration - os: runs-on=${{ github.run_id }}-integ/cpu=48/ram=96/family=c6i/spot=false/image=ubuntu24-full-x64/extras=s3-cache+tmpfs + os: runs-on=${{ github.run_id + }}-integ/cpu=48/ram=96/family=c6i/spot=false/image=ubuntu24-full-x64/extras=s3-cache+tmpfs should-run: ${{ needs.filter.outputs.should-run-core-tests }} trunk-auto-quarantine: "true" setup-solana: "true" install-loopps: "true" + go-mod-directory: "" + setup-aptos: "false" + setup-sui: "false" - cmd: go_core_fuzz - os: runs-on=${{ github.run_id}}-fuzz/cpu=8/ram=32/family=m6id+m6idn/spot=false/image=ubuntu24-full-x64/extras=s3-cache + os: runs-on=${{ + github.run_id}}-fuzz/cpu=8/ram=32/family=m6id+m6idn/spot=false/image=ubuntu24-full-x64/extras=s3-cache should-run: ${{ needs.filter.outputs.should-run-core-tests }} + trunk-auto-quarantine: "false" + go-mod-directory: "" + setup-solana: "false" + setup-aptos: "false" + setup-sui: "false" + install-loopps: "false" - cmd: go_core_race_tests - os: runs-on=${{ github.run_id}}-race/cpu=64/ram=128/family=c7i/volume=80gb/spot=false/image=ubuntu24-full-x64/extras=s3-cache + os: runs-on=${{ + github.run_id}}-race/cpu=64/ram=128/family=c7i/volume=80gb/spot=false/image=ubuntu24-full-x64/extras=s3-cache should-run: ${{ needs.filter.outputs.should-run-core-tests }} - - - cmd: go_core_deployment_tests - os: runs-on=${{ github.run_id }}-deployment/cpu=48/ram=96/family=c6id+c5ad/spot=false/image=ubuntu24-full-x64/extras=s3-cache - should-run: ${{ needs.filter.outputs.should-run-deployment-tests }} - trunk-auto-quarantine: "true" - go-mod-directory: "deployment/" - setup-solana: "true" - setup-aptos: "true" - install-loopps: "true" - setup-sui: "true" + trunk-auto-quarantine: "false" + go-mod-directory: "" + setup-solana: "false" + setup-aptos: "false" + setup-sui: "false" + install-loopps: "false" name: Core Tests (${{ matrix.type.cmd }}) # Be careful modifying the job name, as it is used to fetch the job URL # We don't directly merge dependabot PRs, so let's not waste the resources if: ${{ github.actor != 'dependabot[bot]' }} - needs: [filter, run-frequency] + needs: [ filter, run-frequency ] timeout-minutes: 60 # Use ubuntu-latest for jobs that will be skipped - runs-on: ${{ matrix.type.should-run == 'true' && matrix.type.os || 'ubuntu-latest' }} + runs-on: ${{ matrix.type.should-run == 'true' && matrix.type.os || + 'ubuntu-latest' }} permissions: id-token: write contents: read @@ -327,13 +326,15 @@ jobs: uses: ./.github/actions/setup-go with: # race/fuzz tests don't benefit repeated caching, so restore from develop's build cache - restore-build-cache-only: ${{ matrix.type.cmd == 'go_core_fuzz' || matrix.type.cmd == 'go_core_race_tests' }} + restore-build-cache-only: ${{ matrix.type.cmd == 'go_core_fuzz' || + matrix.type.cmd == 'go_core_race_tests' }} build-cache-version: ${{ matrix.type.cmd }} go-version-file: ${{ matrix.type.go-mod-directory }}go.mod go-module-file: ${{ matrix.type.go-mod-directory }}go.sum - name: Setup Solana - if: ${{ matrix.type.should-run == 'true' && matrix.type.setup-solana == 'true' }} + if: ${{ matrix.type.should-run == 'true' && matrix.type.setup-solana == 'true' + }} uses: ./.github/actions/setup-solana - name: Setup Aptos @@ -375,16 +376,17 @@ jobs: CL_DATABASE_URL: ${{ env.DB_URL }} - name: Install LOOP Plugins - if: ${{ matrix.type.should-run == 'true' && matrix.type.install-loopps == 'true' }} + if: ${{ matrix.type.should-run == 'true' && matrix.type.install-loopps == 'true' + }} run: make install-plugins - name: Increase Timeouts for Fuzz/Race # Increase timeouts for scheduled runs only if: ${{ github.event.schedule != '' && matrix.type.should-run == 'true' }} run: | - echo "TIMEOUT=10m" >> $GITHUB_ENV - echo "COUNT=50" >> $GITHUB_ENV - echo "FUZZ_TIMEOUT_MINUTES=10">> $GITHUB_ENV + echo "TIMEOUT=10m" >> "$GITHUB_ENV" + echo "COUNT=50" >> "$GITHUB_ENV" + echo "FUZZ_TIMEOUT_MINUTES=10" >> "$GITHUB_ENV" - name: Run tests if: ${{ matrix.type.should-run == 'true' }} @@ -404,7 +406,8 @@ jobs: # See: https://github.com/golang/go/issues/69179 - name: Analyze and upload test results - if: ${{ matrix.type.should-run == 'true' && matrix.type.trunk-auto-quarantine == 'true' && !cancelled() }} + if: ${{ matrix.type.should-run == 'true' && matrix.type.trunk-auto-quarantine == + 'true' && !cancelled() }} uses: smartcontractkit/.github/actions/branch-out-upload@branch-out-upload/v1 with: junit-file-path: "./junit.xml" @@ -418,7 +421,8 @@ jobs: - name: Print Races id: print-races - if: ${{ failure() && matrix.type.cmd == 'go_core_race_tests' && matrix.type.should-run == 'true' }} + if: ${{ failure() && matrix.type.cmd == 'go_core_race_tests' && + matrix.type.should-run == 'true' }} env: GH_REPO: ${{ github.repository }} GH_RUN_ID: ${{ github.run_id }} @@ -452,7 +456,6 @@ jobs: ./coverage.txt ./postgres_logs.txt ./junit.xml - ./deployment/junit.xml retention-days: 7 - name: Notify Slack on Race Test Failure @@ -472,7 +475,7 @@ jobs: core-scripts-tests: name: test-scripts - needs: [filter] + needs: [ filter ] runs-on: ubuntu-latest if: ${{ needs.filter.outputs.should-run-core-tests == 'true' }} steps: @@ -502,7 +505,7 @@ jobs: scan: name: SonarQube Scan - needs: [golangci, core, core-scripts-tests] + needs: [ golangci, core, core-scripts-tests ] # If core is cancelled, skip this to not delay the cancellation of the workflow. if: ${{ always() && !cancelled() && github.actor != 'dependabot[bot]' }} runs-on: ubuntu-latest @@ -560,13 +563,17 @@ jobs: # Check and assign paths for lint reports # To find reports in the folders named differently (because of the matrix strategy), # We need to loop through the artifacts. It allows usage of RegExp folders (skipped if not found). + sonarqube_lint_report_paths="" + shopt -s nullglob for golang_lint_artifact in golangci-lint-report* do - echo "Found golangci-lint-report artifacts" - sonarqube_lint_report_paths=$(find -type f -name 'golangci-lint-report.xml' -printf "%p,") - echo "Lint report paths: $sonarqube_lint_report_paths" - break + [[ -d "$golang_lint_artifact" ]] || continue + echo "Found golangci-lint-report artifact: $golang_lint_artifact" + part=$(find "$golang_lint_artifact" -type f -name 'golangci-lint-report.xml' -printf '%p,') + sonarqube_lint_report_paths+="$part" done + shopt -u nullglob + echo "Lint report paths: $sonarqube_lint_report_paths" ARGS="" if [[ -z "$sonarqube_tests_report_paths" ]]; then @@ -591,7 +598,7 @@ jobs: fi echo "Final SONARQUBE_ARGS: $ARGS" - echo "SONARQUBE_ARGS=$ARGS" >> $GITHUB_ENV + echo "SONARQUBE_ARGS=$ARGS" >> "$GITHUB_ENV" - name: SonarQube Scan if: ${{ env.SONARQUBE_ARGS != '' }} @@ -619,7 +626,10 @@ jobs: with: only-modules: "true" - name: Install protoc-gen-go-wsrpc - run: curl https://github.com/smartcontractkit/wsrpc/raw/main/cmd/protoc-gen-go-wsrpc/protoc-gen-go-wsrpc --output $HOME/go/bin/protoc-gen-go-wsrpc && chmod +x $HOME/go/bin/protoc-gen-go-wsrpc + run: curl + https://github.com/smartcontractkit/wsrpc/raw/main/cmd/protoc-gen-go-wsrpc/protoc-gen-go-wsrpc + --output "$HOME/go/bin/protoc-gen-go-wsrpc" && chmod +x + "$HOME/go/bin/protoc-gen-go-wsrpc" - name: make generate run: | make rm-mocked @@ -662,7 +672,7 @@ jobs: # Check if the current hour is 00 (one per day) if [ "$current_hour" -eq "00" ]; then - echo "one-per-day-frequency=true" | tee -a $GITHUB_OUTPUT + echo "one-per-day-frequency=true" | tee -a "$GITHUB_OUTPUT" fi misc: diff --git a/.github/workflows/ci-deployments.yml b/.github/workflows/ci-deployments.yml new file mode 100644 index 00000000000..29f9204569b --- /dev/null +++ b/.github/workflows/ci-deployments.yml @@ -0,0 +1,390 @@ +name: Deployment Tests +run-name: Deployment Tests + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name + }}-deployments-tests + cancel-in-progress: true + +env: + DEPLOYMENT_TEST_SHARD_COUNT: "7" + +# Run on key branches to make sure integration is good, otherwise run on all PR's +on: + push: + branches: + - develop + - "release/*" + merge_group: + pull_request: + schedule: + - cron: "0 0,6,12,18 * * *" + workflow_dispatch: + +jobs: + filter: + name: Detect Changes + permissions: + pull-requests: read + outputs: + affected-modules: ${{ steps.changed-modules.outputs.modules-json }} + # Runs on workflow changes, any deployment change, or any (non-ignored) core change + should-run: >- + ${{ + steps.match-some.outputs.workflow == 'true' || + steps.match-some.outputs.deployment == 'true' || + steps.match-every.outputs.core-non-ignored == 'true' || + github.event_name == 'schedule' || + github.event_name == 'workflow_dispatch' + }} + runs-on: ubuntu-latest + steps: + - name: Checkout the repo + uses: actions/checkout@v6 + with: + persist-credentials: false + repository: smartcontractkit/chainlink + - uses: dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4.0.1 + id: match-some + with: + # "if any changed file matches one or more of the conditions" (https://github.com/dorny/paths-filter/issues/225) + predicate-quantifier: some + # deployment - any changes in the deployment module + # workflow - any changes that could affect this workflow definition + # - Assume any repository action changes affect this workflow + filters: | + deployment: + - 'deployment/**' + workflow: + - '.github/workflows/ci-deployments.yml' + - '.github/actions/**' + - uses: dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4.0.1 + id: match-every + with: + # "if any changed file match all of the conditions" (https://github.com/dorny/paths-filter/issues/225) + # - Enables listing of files matching each filter. + # - Paths to files will be available in `${FILTER_NAME}_files` output variable. + # - Paths will be formatted as JSON array + predicate-quantifier: every + # core-non-ignored - all changes except for paths which do not affect core module + # - This is opt-in on purpose. To be safe, new files are assumed to have an affect on core unless listed here specifically. + # - For example: core module does not depend on deployment or integration-tests module. + # all - changes in any directory + # - This is used resolve all affected modules based on changed files + list-files: json + filters: | + core-non-ignored: + - '**' + - '!deployment/**' + - '!integration-tests/**' + - '!tools/secrets/**' + - '!tools/docker/**' + - '!tools/benchmark/**' + - '!**/README.md' + - '!**/CHANGELOG.md' + - '!*.nix' + - '!sonar-project.properties' + - '!nix.conf' + - '!nix-darwin-shell-hook.sh' + - '!LICENSE' + - '!.github/**' + - '!core/scripts/cre/environment/examples/workflows/**' + all: + - '**' + + - name: Decide which tests to run (rollout-only) + # To validate that this properly tests, we will run this beside the dorny/paths-filter actions + # before using its output to actually gate any jobs. + id: triggers + uses: smartcontractkit/.github/actions/advanced-triggers@advanced-triggers/v1 + continue-on-error: true + with: + file-sets: | + go-files: + - "**/*.go" + - "**/go.mod" + - "**/go.sum" + core-files: + - "core/**" + core-test-files: + - "testdata/**" + - "core/**/testdata/**" + - "core/**/*_test.go" + e2e-tests-files: + - "system-tests/**" + - "integration-tests/**" + workflow-files: + - ".github/workflows/ci-deployments.yml" + - ".github/actions/**" + deployment-files: + - "deployment/**" + ignored-files: + - "core/scripts/cre/environment/examples/workflows/**" + # Note: + # - for pull_request, merge_group, and push events, a trigger will resolve to true if any changed files match the path/glob patterns + # - exclusion-sets/negations are applied first, and therefore filter all changed files before inclusion sets are applied + # - by default these will resolve to true for schedule, and workflow_dispatch events + triggers: | + deployment-tests: + exclusion-sets: [ e2e-tests-files, core-test-files, ignored-files ] + inclusion-sets: [ go-files, core-files, deployment-files, workflow-files ] + paths: + - "tools/bin/go_deployment_tests" + + - name: Changed modules + id: changed-modules + uses: smartcontractkit/.github/actions/changed-modules-go@changed-modules-go/v1 + with: + # when scheduled/workflow_dispatch, run against all modules + no-change-behaviour: all + file-patterns: | + **/*.go + **/go.mod + **/go.sum + module-patterns: | + ** + !core/scripts/cre/environment/examples/workflows/** + + shard-matrices: + name: Build Shard Matrices + needs: [filter] + runs-on: ubuntu-latest + if: ${{ needs.filter.outputs.should-run == 'true' }} + permissions: {} + outputs: + deployment-test-matrix: ${{ steps.shard-matrices.outputs.deployment-test-matrix }} + steps: + - name: Build shard matrices + id: shard-matrices + shell: bash + run: | + python - <<'PY' >> "$GITHUB_OUTPUT" + import json + import os + + run_id = os.environ["GITHUB_RUN_ID"] + + rows = [] + + def add_deployment_rows(): + count = int(os.environ["DEPLOYMENT_TEST_SHARD_COUNT"]) + for i in range(count): + rows.append({ + "job-name": f"go_deployment_tests shard {i}/{count}", + "os": f"runs-on={run_id}-deployment-{i}/cpu=8/ram=32/family=m6id+m6idn/spot=false/image=ubuntu24-full-x64/extras=s3-cache", + "shard-count": str(count), + "shard-index": str(i), + "run-timeout-minutes": "25", + "logs-artifact-name": f"go_deployment_tests_logs_shard_{i}", + }) + + add_deployment_rows() + + print(f"deployment-test-matrix={json.dumps(rows)}") + PY + + deployment-shard-consistency: + name: Check Deployment Test Sharding Consistency + needs: [filter] + if: ${{ github.actor != 'dependabot[bot]' }} + runs-on: ubuntu-latest + permissions: + contents: read + steps: + - name: Checkout the repo + uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Setup Go + uses: ./.github/actions/setup-go + with: + go-version-file: deployment/go.mod + go-module-file: deployment/go.sum + + - name: Download Go vendor packages + run: go mod download + + - name: Verify deployment test sharding + shell: bash + run: | + ( + cd deployment || exit 1 + go list -f '{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./... + ) | go run ./tools/ci-testshard verify --shard-count "${DEPLOYMENT_TEST_SHARD_COUNT}" + + deployment-tests: + env: + # We explicitly have this env var not be "CL_DATABASE_URL" to avoid having it be used by core related tests + # when they should not be using it, while still allowing us to DRY up the setup + DB_URL: postgresql://postgres:postgres@localhost:5432/chainlink_test?sslmode=disable + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.shard-matrices.outputs.deployment-test-matrix) }} + name: Deployment Tests (${{ matrix.job-name }}) + if: ${{ github.actor != 'dependabot[bot]' }} + needs: [filter, shard-matrices, run-frequency] + timeout-minutes: 35 + runs-on: ${{ matrix.os }} + permissions: + id-token: write + contents: read + actions: read + steps: + - name: Enable S3 Cache for Self-Hosted Runners + uses: runs-on/action@742bf56072eb4845a0f94b3394673e4903c90ff0 # v2.1.0 + with: + metrics: cpu,network,memory,disk + - name: Checkout the repo + uses: actions/checkout@v6 + with: + persist-credentials: false + - name: Setup Go + uses: ./.github/actions/setup-go + with: + restore-build-cache-only: false + build-cache-version: go_deployment_tests + go-version-file: deployment/go.mod + go-module-file: deployment/go.sum + - name: Setup Solana + uses: ./.github/actions/setup-solana + - name: Setup Aptos + uses: aptos-labs/actions/install-aptos-cli@63740b290d839b87ecfafbcf75ed03a36a54a29f # jan 15, 2025 + with: + CLI_VERSION: 8.1.0 + - name: Setup Sui CLI v1.69.2 + uses: ./.github/actions/setup-sui + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + version: mainnet-v1.69.2 + - name: Setup wasmd + uses: ./.github/actions/setup-wasmd + - name: Setup Postgres + uses: smartcontractkit/.github/actions/setup-postgres@setup-postgres/v1 + with: + tmpfs: "true" + image-tag: "16-alpine" + - name: Download Go vendor packages + run: go mod download + - name: Setup DB + run: go run ./core/store/cmd/preparetest + env: + CL_DATABASE_URL: ${{ env.DB_URL }} + - name: Install LOOP Plugins + run: make install-plugins + - name: Run tests + timeout-minutes: ${{ fromJson(matrix.run-timeout-minutes) }} + continue-on-error: true + id: run-tests + shell: bash + env: + CL_DATABASE_URL: ${{ env.DB_URL }} + OUTPUT_FILE: ./output.txt + PRODUCE_JUNIT_XML: "true" + TRUNK_AUTO_QUARANTINE: "true" + RUN_QUARANTINED_TESTS: "true" + JUNIT_FILE: ${{ github.workspace }}/junit.xml + GO_TEST_SHARD_COUNT: ${{ matrix.shard-count }} + GO_TEST_SHARD_INDEX: ${{ matrix.shard-index }} + run: | + GODEBUG=goindex=0 "./tools/bin/go_deployment_tests" ./... + # See: https://github.com/golang/go/issues/69179 + - name: Analyze and upload test results + if: ${{ !cancelled() }} + uses: smartcontractkit/.github/actions/branch-out-upload@branch-out-upload/v1 + with: + junit-file-path: "./junit.xml" + trunk-org-slug: chainlink + trunk-token: ${{ secrets.TRUNK_API_KEY }} + trunk-previous-step-outcome: ${{ steps.run-tests.outcome }} + trunk-upload-only: false + artifact-name: ${{ matrix.logs-artifact-name }} + - name: Print postgres logs + if: ${{ always() }} + uses: smartcontractkit/.github/actions/setup-postgres@setup-postgres/v1 + with: + print-logs: "true" + - name: Store logs artifacts + if: ${{ always() }} + uses: actions/upload-artifact@v7 + with: + name: ${{ matrix.logs-artifact-name }} + path: | + ./deployment/coverage.txt + ./postgres_logs.txt + ./junit.xml + ./deployment/output.txt + ./deployment/output-short.txt + retention-days: 7 + + deployment-tests-result: + # Do not modify this job name, it is used by branch protection rules + name: Core Tests (go_core_deployment_tests) + if: ${{ always() && github.actor != 'dependabot[bot]' && needs.filter.outputs.should-run == 'true' }} + needs: [filter, deployment-tests, deployment-shard-consistency] + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + steps: + - name: Check deployment shard results + env: + EXPECTED_ROWS: ${{ env.DEPLOYMENT_TEST_SHARD_COUNT }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + JOB_NAME_PREFIX: "Deployment Tests (go_deployment_tests shard " + run: | + gh api \ + --paginate \ + --jq '.jobs[] | {name, conclusion} | @json' \ + "/repos/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/jobs?per_page=100" \ + > jobs.jsonl + + python - <<'PY' + import json + import os + import sys + + prefix = os.environ["JOB_NAME_PREFIX"] + expected = int(os.environ["EXPECTED_ROWS"]) + rows = [] + with open("jobs.jsonl", encoding="utf-8") as f: + for line in f: + row = json.loads(line) + if row["name"].startswith(prefix): + rows.append(row) + + if len(rows) != expected: + print(f"Expected {expected} rows for {prefix}, got {len(rows)}") + sys.exit(1) + + conclusions = [row["conclusion"] for row in rows] + if any(conclusion not in {"success", "skipped"} for conclusion in conclusions): + print(f"Deployment shard group failed with conclusions: {conclusions}") + sys.exit(1) + print("All deployment shards succeeded.") + PY + + run-frequency: + name: Run frequency + outputs: + one-per-day-frequency: ${{ steps.check-time.outputs.one-per-day-frequency || 'false' }} + runs-on: ubuntu-latest + permissions: {} + steps: + - name: Check time and set frequencies + id: check-time + shell: bash + run: | + if [ "$GITHUB_EVENT_NAME" != "schedule" ]; then + # Not a scheduled event - no frequencies to set. They default to false. + exit 0 + fi + + # Scheduled event, check current time for frequencies + current_hour=$(date +"%H") + + # Check if the current hour is 00 (one per day) + if [ "$current_hour" -eq "00" ]; then + echo "one-per-day-frequency=true" | tee -a "$GITHUB_OUTPUT" + fi diff --git a/tools/bin/go_core_deployment_tests b/tools/bin/go_deployment_tests similarity index 68% rename from tools/bin/go_core_deployment_tests rename to tools/bin/go_deployment_tests index 46c420f7f59..5a299db9e2e 100755 --- a/tools/bin/go_core_deployment_tests +++ b/tools/bin/go_deployment_tests @@ -6,6 +6,8 @@ SCRIPT_PATH=`dirname "$0"`; SCRIPT_PATH=`eval "cd \"$SCRIPT_PATH\" && pwd"` OUTPUT_FILE=${OUTPUT_FILE:-"./output.txt"} JUNIT_FILE=${JUNIT_FILE:-"./junit.xml"} GO_TEST_TIMEOUT=${GO_TEST_TIMEOUT:-"15m"} +GO_TEST_SHARD_COUNT=${GO_TEST_SHARD_COUNT:?GO_TEST_SHARD_COUNT must be set} +GO_TEST_SHARD_INDEX=${GO_TEST_SHARD_INDEX:?GO_TEST_SHARD_INDEX must be set} # Adjusting freeport port block size for tests. export CL_RESERVE_PORTS=128 @@ -40,6 +42,30 @@ echo "Using GO_TEST_FLAGS: $GO_TEST_FLAGS" echo "Using RERUN_FLAGS: $RERUN_FLAGS" echo "Using JUNIT_FLAG: $JUNIT_FLAG" +TEST_PACKAGE_OUTPUT=$(go list -f '{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./... | sed '/^$/d') +if [[ $? -ne 0 ]]; then + exit 1 +fi +if [[ -z "$TEST_PACKAGE_OUTPUT" ]]; then + echo "No deployment test packages found." + exit 1 +fi +mapfile -t TEST_PACKAGES <<<"$TEST_PACKAGE_OUTPUT" + +SHARD_PACKAGE_OUTPUT=$(printf '%s\n' "${TEST_PACKAGES[@]}" | go run github.com/smartcontractkit/chainlink/v2/tools/ci-testshard list --shard-count "$GO_TEST_SHARD_COUNT" --shard-index "$GO_TEST_SHARD_INDEX") +if [[ $? -ne 0 ]]; then + exit 1 +fi +if [[ -z "$SHARD_PACKAGE_OUTPUT" ]]; then + echo "No packages assigned to shard ${GO_TEST_SHARD_INDEX}/${GO_TEST_SHARD_COUNT}." + exit 0 +fi +mapfile -t SHARD_PACKAGES <<<"$SHARD_PACKAGE_OUTPUT" + +echo "Using GO_TEST_SHARD_COUNT: $GO_TEST_SHARD_COUNT" +echo "Using GO_TEST_SHARD_INDEX: $GO_TEST_SHARD_INDEX" +echo "Shard package count: ${#SHARD_PACKAGES[@]}" + echo "Test execution results: ---------------------" echo "" @@ -49,11 +75,10 @@ gotestsum \ --format='standard-quiet' \ --hide-summary=skipped \ $RERUN_FLAGS \ - --packages='./...' \ --jsonfile "$OUTPUT_FILE" \ --post-run-command "sh -c \"echo; echo 'Slowest tests'; echo '----------------------------------------------'; echo; gotestsum tool slowest --num 20 --jsonfile ${OUTPUT_FILE}\"" \ "$JUNIT_FLAG" \ - -- $GO_TEST_FLAGS + -- $GO_TEST_FLAGS "${SHARD_PACKAGES[@]}" EXITCODE=${PIPESTATUS[0]} diff --git a/tools/ci-testshard/main.go b/tools/ci-testshard/main.go new file mode 100644 index 00000000000..1541e43d700 --- /dev/null +++ b/tools/ci-testshard/main.go @@ -0,0 +1,173 @@ +package main + +import ( + "bufio" + "errors" + "flag" + "fmt" + "hash/fnv" + "io" + "os" + "strings" +) + +func main() { + if err := run(os.Args[1:], os.Stdin, os.Stdout); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func run(args []string, stdin io.Reader, stdout io.Writer) error { + if len(args) == 0 { + return usageError("expected subcommand: list or verify") + } + + switch args[0] { + case "list": + return runList(args[1:], stdin, stdout) + case "verify": + return runVerify(args[1:], stdin, stdout) + case "-h", "--help", "help": + printUsage(stdout) + return nil + default: + return usageError("unknown subcommand %q", args[0]) + } +} + +func runList(args []string, stdin io.Reader, stdout io.Writer) error { + fs := flag.NewFlagSet("list", flag.ContinueOnError) + fs.SetOutput(io.Discard) + + shardCount := fs.Int("shard-count", 1, "total number of shards") + shardIndex := fs.Int("shard-index", 0, "zero-based shard index") + + if err := fs.Parse(args); err != nil { + return usageError("%v", err) + } + if fs.NArg() != 0 { + return usageError("list takes no positional arguments") + } + + packages, err := readPackages(stdin) + if err != nil { + return err + } + if err := validateShardArgs(*shardCount, *shardIndex); err != nil { + return err + } + + for _, pkg := range packages { + if shardForPackage(pkg, *shardCount) == *shardIndex { + if _, err := fmt.Fprintln(stdout, pkg); err != nil { + return err + } + } + } + + return nil +} + +func runVerify(args []string, stdin io.Reader, stdout io.Writer) error { + fs := flag.NewFlagSet("verify", flag.ContinueOnError) + fs.SetOutput(io.Discard) + + shardCount := fs.Int("shard-count", 1, "total number of shards") + + if err := fs.Parse(args); err != nil { + return usageError("%v", err) + } + if fs.NArg() != 0 { + return usageError("verify takes no positional arguments") + } + + packages, err := readPackages(stdin) + if err != nil { + return err + } + if *shardCount < 1 { + return fmt.Errorf("invalid --shard-count %d: must be >= 1", *shardCount) + } + + shardSizes := make([]int, *shardCount) + seen := make(map[string]int, len(packages)) + for _, pkg := range packages { + shardIndex := shardForPackage(pkg, *shardCount) + shardSizes[shardIndex]++ + seen[pkg]++ + } + + for _, pkg := range packages { + if seen[pkg] != 1 { + return fmt.Errorf("package %q assigned %d times", pkg, seen[pkg]) + } + } + + if _, err := fmt.Fprintf(stdout, "verified %d packages across %d shards\n", len(packages), *shardCount); err != nil { + return err + } + for shardIndex, size := range shardSizes { + if _, err := fmt.Fprintf(stdout, "shard %d: %d packages\n", shardIndex, size); err != nil { + return err + } + } + + return nil +} + +func readPackages(r io.Reader) ([]string, error) { + scanner := bufio.NewScanner(r) + packages := make([]string, 0) + seen := make(map[string]struct{}) + + for scanner.Scan() { + pkg := strings.TrimSpace(scanner.Text()) + if pkg == "" { + continue + } + + if _, exists := seen[pkg]; exists { + return nil, fmt.Errorf("duplicate package path %q", pkg) + } + seen[pkg] = struct{}{} + packages = append(packages, pkg) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + if len(packages) == 0 { + return nil, errors.New("no package paths provided on stdin") + } + + return packages, nil +} + +func validateShardArgs(shardCount, shardIndex int) error { + if shardCount < 1 { + return fmt.Errorf("invalid --shard-count %d: must be >= 1", shardCount) + } + if shardIndex < 0 || shardIndex >= shardCount { + return fmt.Errorf("invalid --shard-index %d: must be in [0,%d)", shardIndex, shardCount) + } + return nil +} + +func shardForPackage(pkg string, shardCount int) int { + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(pkg)) // hash.Hash.Write on fnv (Fowler-Noll-Vo) never returns an error + return int(int64(hasher.Sum32()) % int64(shardCount)) +} + +func printUsage(w io.Writer) { + fmt.Fprintln(w, "usage: ci-testshard [flags]") + fmt.Fprintln(w, "") + fmt.Fprintln(w, "Commands:") + fmt.Fprintln(w, " list read newline-delimited package paths from stdin and emit one shard") + fmt.Fprintln(w, " verify read newline-delimited package paths from stdin and verify shard coverage") +} + +func usageError(format string, args ...any) error { + return fmt.Errorf(format, args...) +} diff --git a/tools/ci-testshard/main_test.go b/tools/ci-testshard/main_test.go new file mode 100644 index 00000000000..5f820d85ddb --- /dev/null +++ b/tools/ci-testshard/main_test.go @@ -0,0 +1,271 @@ +package main + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "testing" +) + +func TestReadPackagesRejectsDuplicatePaths(t *testing.T) { + _, err := readPackages(strings.NewReader("pkg/a\npkg/a\n")) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package error, got %v", err) + } +} + +func TestReadPackagesRejectsEmptyInput(t *testing.T) { + _, err := readPackages(strings.NewReader("\n\n")) + if err == nil || !strings.Contains(err.Error(), "no package paths provided on stdin") { + t.Fatalf("expected empty input error, got %v", err) + } +} + +func TestReadPackagesTrimsWhitespace(t *testing.T) { + packages, err := readPackages(strings.NewReader(" pkg/a \n\tpkg/b\t\n")) + if err != nil { + t.Fatalf("readPackages failed: %v", err) + } + if len(packages) != 2 || packages[0] != "pkg/a" || packages[1] != "pkg/b" { + t.Fatalf("unexpected packages: %#v", packages) + } +} + +func TestReadPackagesIgnoresBlankLinesBetweenPackages(t *testing.T) { + packages, err := readPackages(strings.NewReader("pkg/a\n\n \n\t\npkg/b\n")) + if err != nil { + t.Fatalf("readPackages failed: %v", err) + } + if len(packages) != 2 || packages[0] != "pkg/a" || packages[1] != "pkg/b" { + t.Fatalf("unexpected packages: %#v", packages) + } +} + +func TestReadPackagesRejectsDuplicatePathsAfterTrimming(t *testing.T) { + _, err := readPackages(strings.NewReader("pkg/a\n pkg/a \n")) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package error after trimming, got %v", err) + } +} + +func TestListReturnsPartitionWithoutOverlap(t *testing.T) { + input := "pkg/a\npkg/b\npkg/c\npkg/d\n" + + seen := make(map[string]struct{}) + for shardIndex := 0; shardIndex < 4; shardIndex++ { + packages := runListForTest(t, input, 4, shardIndex) + for _, pkg := range packages { + if _, exists := seen[pkg]; exists { + t.Fatalf("package %s appeared in multiple shards", pkg) + } + seen[pkg] = struct{}{} + } + } + + for _, pkg := range []string{"pkg/a", "pkg/b", "pkg/c", "pkg/d"} { + if _, exists := seen[pkg]; !exists { + t.Fatalf("package %s missing from shard union", pkg) + } + } +} + +func TestListWithSingleShardReturnsEntireInputInOrder(t *testing.T) { + input := "pkg/a\npkg/b\npkg/c\n" + packages := runListForTest(t, input, 1, 0) + want := []string{"pkg/a", "pkg/b", "pkg/c"} + if len(packages) != len(want) { + t.Fatalf("unexpected package count: got %d want %d (%v)", len(packages), len(want), packages) + } + for i := range want { + if packages[i] != want[i] { + t.Fatalf("unexpected package at %d: got %q want %q", i, packages[i], want[i]) + } + } +} + +func TestListProducesDeterministicOutput(t *testing.T) { + input := "pkg/a\npkg/b\npkg/c\npkg/d\npkg/e\n" + first := runListOutputForTest(t, input, 4, 2) + second := runListOutputForTest(t, input, 4, 2) + if first != second { + t.Fatalf("list output changed between runs:\nfirst:\n%s\nsecond:\n%s", first, second) + } +} + +func TestListCanProduceEmptyShard(t *testing.T) { + input := "pkg/a\npkg/b\n" + foundEmpty := false + for shardIndex := 0; shardIndex < 10; shardIndex++ { + if output := runListOutputForTest(t, input, 10, shardIndex); output == "" { + foundEmpty = true + break + } + } + if !foundEmpty { + t.Fatal("expected at least one empty shard for 2 packages across 10 shards") + } +} + +func TestListAndVerifyAgreeOnPartition(t *testing.T) { + inputPackages := []string{ + "pkg/a", + "pkg/b", + "pkg/c", + "pkg/d", + "pkg/e", + "pkg/f", + } + input := strings.Join(inputPackages, "\n") + "\n" + seen := make(map[string]struct{}, len(inputPackages)) + + for shardIndex := 0; shardIndex < 4; shardIndex++ { + for _, pkg := range runListForTest(t, input, 4, shardIndex) { + if _, exists := seen[pkg]; exists { + t.Fatalf("package %s appeared in multiple shards", pkg) + } + seen[pkg] = struct{}{} + } + } + + for _, pkg := range inputPackages { + if _, exists := seen[pkg]; !exists { + t.Fatalf("package %s missing from shard union", pkg) + } + } + + var stdout bytes.Buffer + if err := run([]string{"verify", "--shard-count", "4"}, strings.NewReader(input), &stdout); err != nil { + t.Fatalf("verify failed: %v", err) + } +} + +func TestVerifyAllowsEmptyShard(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "10"}, strings.NewReader("pkg/a\npkg/b\n"), &stdout) + if err != nil { + t.Fatalf("verify failed: %v", err) + } + if !strings.Contains(stdout.String(), "verified 2 packages across 10 shards") { + t.Fatalf("unexpected verify output: %q", stdout.String()) + } +} + +func TestVerifyWithSingleShardCoversEntireInput(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "1"}, strings.NewReader("pkg/a\npkg/b\npkg/c\n"), &stdout) + if err != nil { + t.Fatalf("verify failed: %v", err) + } + + output := stdout.String() + if !strings.Contains(output, "verified 3 packages across 1 shards") { + t.Fatalf("unexpected verify summary: %q", output) + } + if !strings.Contains(output, "shard 0: 3 packages") { + t.Fatalf("unexpected shard coverage: %q", output) + } +} + +func TestVerifyRejectsDuplicatePaths(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "2"}, strings.NewReader("pkg/a\npkg/a\n"), &stdout) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package failure, got %v", err) + } +} + +func TestVerifyRejectsDuplicatePathsAmongOthers(t *testing.T) { + var stdout bytes.Buffer + err := run([]string{"verify", "--shard-count", "2"}, strings.NewReader("pkg/a\npkg/b\npkg/c\npkg/d\npkg/e\npkg/a\n"), &stdout) + if err == nil || !strings.Contains(err.Error(), `duplicate package path "pkg/a"`) { + t.Fatalf("expected duplicate package failure, got %v", err) + } +} + +func TestInvalidShardParamsFail(t *testing.T) { + tests := []struct { + name string + args []string + }{ + {name: "zero-count", args: []string{"list", "--shard-count", "0", "--shard-index", "0"}}, + {name: "negative-index", args: []string{"list", "--shard-count", "2", "--shard-index", "-1"}}, + {name: "index-out-of-range", args: []string{"list", "--shard-count", "2", "--shard-index", "2"}}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := run(tc.args, strings.NewReader("pkg/a\n"), &bytes.Buffer{}) + if err == nil { + t.Fatal("expected error") + } + }) + } +} + +func TestUnknownSubcommandFails(t *testing.T) { + err := run([]string{"wat"}, strings.NewReader("pkg/a\n"), &bytes.Buffer{}) + if err == nil || !strings.Contains(err.Error(), `unknown subcommand "wat"`) { + t.Fatalf("expected unknown subcommand error, got %v", err) + } +} + +func TestExtraPositionalArgsFail(t *testing.T) { + tests := []struct { + name string + args []string + want string + }{ + {name: "list", args: []string{"list", "--shard-count", "2", "--shard-index", "0", "extra"}, want: "list takes no positional arguments"}, + {name: "verify", args: []string{"verify", "--shard-count", "2", "extra"}, want: "verify takes no positional arguments"}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := run(tc.args, strings.NewReader("pkg/a\n"), &bytes.Buffer{}) + if err == nil || !strings.Contains(err.Error(), tc.want) { + t.Fatalf("expected %q, got %v", tc.want, err) + } + }) + } +} + +func TestLargePackageListParses(t *testing.T) { + var builder strings.Builder + for i := 0; i < 500; i++ { + fmt.Fprintf(&builder, "pkg/%03d\n", i) + } + + packages, err := readPackages(strings.NewReader(builder.String())) + if err != nil { + t.Fatalf("readPackages failed: %v", err) + } + if len(packages) != 500 { + t.Fatalf("unexpected package count: got %d want 500", len(packages)) + } + if packages[0] != "pkg/000" || packages[499] != "pkg/499" { + t.Fatalf("unexpected package boundaries: first=%q last=%q", packages[0], packages[499]) + } +} + +func runListForTest(t *testing.T, input string, shardCount, shardIndex int) []string { + t.Helper() + output := runListOutputForTest(t, input, shardCount, shardIndex) + if output == "" { + return nil + } + return strings.Fields(output) +} + +func runListOutputForTest(t *testing.T, input string, shardCount, shardIndex int) string { + t.Helper() + var stdout bytes.Buffer + if err := run( + []string{"list", "--shard-count", strconv.Itoa(shardCount), "--shard-index", strconv.Itoa(shardIndex)}, + strings.NewReader(input), + &stdout, + ); err != nil { + t.Fatalf("list failed for shard %d/%d: %v", shardIndex, shardCount, err) + } + return stdout.String() +}