From b6d64f531036882a5b4099fff83ee6fc1ba3bbd3 Mon Sep 17 00:00:00 2001 From: Erik Schierboom Date: Thu, 8 Aug 2024 14:26:56 +0200 Subject: [PATCH 1/4] Improve CI docs --- building/config.json | 6 ++ building/tracks/ci/README.md | 4 ++ building/tracks/ci/workflows.md | 41 +++++++++++++ .../new/setup-continuous-integration.md | 59 +++++++++---------- 4 files changed, 79 insertions(+), 31 deletions(-) create mode 100644 building/tracks/ci/workflows.md diff --git a/building/config.json b/building/config.json index 9cee04c6..2a7de427 100644 --- a/building/config.json +++ b/building/config.json @@ -661,6 +661,12 @@ "path": "building/tracks/stories/tuples.santas-helper.md", "title": "Santa's Helper" }, + { + "uuid": "b99fb54b-a9ce-4a50-bca4-6a928cc77ec6", + "slug": "tracks/ci/workflows", + "path": "building/tracks/ci/workflows.md", + "title": "Workflows" + }, { "uuid": "191b0fa1-96e2-48a6-ad2e-c34f57443799", "slug": "tracks/ci/migrating-from-travis", diff --git a/building/tracks/ci/README.md b/building/tracks/ci/README.md index d50c46aa..639d6379 100644 --- a/building/tracks/ci/README.md +++ b/building/tracks/ci/README.md @@ -2,3 +2,7 @@ At Exercism, we use [GitHub Actions](https://github.com/features/actions) to handle our [continuous integration](https://en.wikipedia.org/wiki/Continuous_integration) (CI) and [continuous deployment](https://en.wikipedia.org/wiki/Continuous_deployment) (CD) needs. This includes running tests, formatting things, and deploying things. + +For more information, check: + +- [Workflows](/docs/building/tracks/ci/workflows) diff --git a/building/tracks/ci/workflows.md b/building/tracks/ci/workflows.md new file mode 100644 index 00000000..93c4d68e --- /dev/null +++ b/building/tracks/ci/workflows.md @@ -0,0 +1,41 @@ +# Workflows + +GitHub Actions uses the concept of _workflows_, which are scripts that run automatically whenever a specific event occurs (e.g. pushing a commit). + +Each GitHub Actions workflow is defined in a `.yml` file in the `.github/workflows` directory. +For information on workflows, check the following docs: + +- [Workflow syntax](https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions) +- [Choosing when your workflow runs](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/triggering-a-workflow) +- [Choosing where your workflow runs](https://docs.github.com/en/actions/writing-workflows/choosing-where-your-workflow-runs) +- [Choose what your workflow does](https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does) +- [Writing workflows](https://docs.github.com/en/actions/writing-workflows) +- [Best practices](/docs/building/github/gha-best-practices) + +## Shared workflows + +Some workflows are shared across repositories. +These workflows _should not be changed_. + +### General workflows + +- `sync-labels.yml`: automatically syncs the repository's labels from a `labels.yml` file + +### Track-specific workflows + +- `configlet.yml`: runs the [configlet tool](/docs/building/configlet), which checks if a track's (configuration) files are properly structured - both syntactically and semantically +- `no-important-files-changed.yml`: checks if pull requests would cause all existing solutions of one or more changes exercises to be re-run +- `test.yml`: verify the track's exercises + +### Tooling-specific workflows + +- `deploy.yml`: deploy the tooling Docker image to Docker Hub and ECR + +## Custom workflows + +Maintainers are free to add custom workflows to their repos. +Examples of such workflows could be: + +- Linting of shell scripts ([example](https://github.com/exercism/configlet/blob/3baa09608c8ac327315c887608c13a68ae8ac359/.github/workflows/shellcheck.yml)) +- Auto-commenting on pull requests ([example](https://github.com/exercism/elixir/blob/b737f80cc93fcfdec6c53acb7361819834782470/.github/workflows/pr-comment.yml)) +- Etc. diff --git a/building/tracks/new/setup-continuous-integration.md b/building/tracks/new/setup-continuous-integration.md index 2feb2f15..a2df7279 100644 --- a/building/tracks/new/setup-continuous-integration.md +++ b/building/tracks/new/setup-continuous-integration.md @@ -6,39 +6,21 @@ Setting up Continuous Integration (CI) for your track is very important, as it h Our tracks (and other repositories) use [GitHub Actions](https://docs.github.com/en/actions) to run their CI. GitHub Actions uses the concept of _workflows_, which are scripts that run automatically whenever a specific event occurs (e.g. pushing a commit). - -Each GitHub Actions workflow is defined in a `.yml` file in the `.github/workflows` directory. -For information on workflows, check the following docs: - -- [Workflow syntax](https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions) -- [Choosing when your workflow runs](https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/triggering-a-workflow) -- [Choosing where your workflow runs](https://docs.github.com/en/actions/writing-workflows/choosing-where-your-workflow-runs) -- [Choose what your workflow does](https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does) -- [Writing workflows](https://docs.github.com/en/actions/writing-workflows) -- [Best practices](/docs/building/github/gha-best-practices) - -## Pre-defined workflows - -A track repository contains several pre-defined workflows: - -- `configlet.yml`: runs the [configlet tool](/docs/building/configlet), which checks if a track's (configuration) files are properly structured - both syntactically and semantically -- `no-important-files-changed.yml`: checks if pull requests would cause all existing solutions of one or more changes exercises to be re-run -- `sync-labels.yml`: automatically syncs the repository's labels from a `labels.yml` file -- `test.yml`: verify the track's exercises - -Of these workflows, _only_ the `test.yml` workflow requires manual work. -The other workflows should not be changed (we keep them up-to-date automatically). +For more information on workflow, check the [workflows docs](/docs/building/tracks/ci/workflows). ## Test workflow -The test workflow should verify the track's exercises. +Each track comes with a `test.yml` workflow. +This workflow should verify that the track's exercises are in proper shape. +The workflow is setup to run automatically (in GitHub Actions terminology: is _triggered_) when a push is made to the `main` branch or to a pull request's branch. + The workflow itself should not do much, except for: - Checking out the code (already implemented) - Installing dependencies (e.g. installing an SDK, optional) -- Running the script to verify the exercises (already implemented) +- Running the verify exercises script (already implemented) -### Verify exercises script +## Implement the verify exercises script As mentioned, the exercises are verified via a script, namely the `bin/verify-exercises` (bash) script. This script is _almost_ done, and does the following: @@ -51,7 +33,7 @@ This script is _almost_ done, and does the following: The `run_tests` and `unskip_tests` functions are the only things that you need to implement. -### Unskipping tests +### Unskip tests If your track supports skipping tests, we must ensure that no tests are skipped when verifying an exercise's example/exemplar solution. In general, there are two ways in which tracks support "unskipping" tests: @@ -69,7 +51,7 @@ The `unskip_test` function runs on a copy of an exercise directory, so feel free If unskipping tests requires an environment variable to be set, make sure that it is set in the `run_tests` function. -### Running tests +### Run tests The `run_tests` function is responsible for running the tests of an exercise. When the function is called, the example/exemplar files will already have been copied to (stub) solution files, so you only need to call the right command to run the tests. @@ -80,7 +62,10 @@ The function must return a zero as the exit code if all tests pass, otherwise re The `run_tests` function runs on a copy of an exercise directory, so feel free to modify the files as you see fit. ``` -### Example: Arturo track +### Option 1: use language tooling + +The default option for the verify exercises script is to use the language's tooling (SDK/binary/etc.). +It assumes (and possibly checks) that the language tooling is installed, a This is what the [`bin/verify-exercises` file](https://github.com/exercism/arturo/blob/79560f853f5cb8e2f3f0a07cbb8fcce8438ee996/bin/verify-exercises) looks file for the Arturo track: @@ -164,10 +149,22 @@ and runs the tests via the `arturo` command: arturo tester.art ``` +### Option 2: use the test runner Docker image + +In this option, we're using the fact that each track must have a test runner which already knows how to verify exercises. +To enable this option, we first need to download (pull) the track's test runner Docker image and then run the `bin/verify-exercises` script, which is modified to use the test runner Docker image to run the tests. + +```exercism/note +The main benefit of this approach is that it best mimics how tests are being run in production (on the website). +With the approach, it is less likely that things will fail in production that passed in CI. +The downside of this approach is that it likely is slower, due to having to pull the Docker image and the overhead of Docker. +``` + +```` + ## Implement the test workflow -The goal of the test workflow (defined in `.github/workflows/test.yml`) is to automatically verify that the track's exercises are in proper shape. -The workflow is setup to run automatically (in GitHub Actions terminology: is _triggered_) when a push is made to the `main` branch or to a pull request's branch. +Now that the `verify-exercises` script is There are three options when implementing this workflow: @@ -208,7 +205,7 @@ jobs: - name: Verify all exercises run: bin/verify-exercises -``` +```` #### Option 2: running the verify exercises script within test runner Docker image From 5f88cb94e05946419949c5c20480d1a9c665820f Mon Sep 17 00:00:00 2001 From: Erik Schierboom Date: Fri, 9 Aug 2024 11:14:25 +0200 Subject: [PATCH 2/4] More dos --- .../new/setup-continuous-integration.md | 226 ++++++++++++++---- 1 file changed, 175 insertions(+), 51 deletions(-) diff --git a/building/tracks/new/setup-continuous-integration.md b/building/tracks/new/setup-continuous-integration.md index a2df7279..583db598 100644 --- a/building/tracks/new/setup-continuous-integration.md +++ b/building/tracks/new/setup-continuous-integration.md @@ -1,23 +1,24 @@ # Set up Continuous Integration -Setting up Continuous Integration (CI) for your track is very important, as it helps automatically catch mistakes. +Setting up Continuous Integration (CI) for your track is very important, as it helps catch mistakes. ## GitHub Actions -Our tracks (and other repositories) use [GitHub Actions](https://docs.github.com/en/actions) to run their CI. -GitHub Actions uses the concept of _workflows_, which are scripts that run automatically whenever a specific event occurs (e.g. pushing a commit). -For more information on workflow, check the [workflows docs](/docs/building/tracks/ci/workflows). +Exercism repos (including track repos) use [GitHub Actions](https://docs.github.com/en/actions) to run their CI. +GitHub Actions are based on _workflows_, which define scripts to run automatically whenever a specific event occurs (e.g. pushing a commit). +For more information on GitHub Actions workflows, check the [workflows docs](/docs/building/tracks/ci/workflows). ## Test workflow Each track comes with a `test.yml` workflow. -This workflow should verify that the track's exercises are in proper shape. +The goal of this workflow is to verify that the track's exercises are in proper shape. The workflow is setup to run automatically (in GitHub Actions terminology: is _triggered_) when a push is made to the `main` branch or to a pull request's branch. The workflow itself should not do much, except for: - Checking out the code (already implemented) -- Installing dependencies (e.g. installing an SDK, optional) +- Installing dependencies (e.g. installing packages, optional) +- Installing tooling (e.g. installing an SDK, optional) - Running the verify exercises script (already implemented) ## Implement the verify exercises script @@ -45,11 +46,23 @@ In general, there are two ways in which tracks support "unskipping" tests: If skipping tests is file-based (the first option mentioned above), edit the `unskip_tests` function to modify the test files (the existing code already handles the looping over the test files). +As an example, the [Arturo track's `bin/verify-exercises file`](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) uses `sed` to unskip the tests within the test files: + +```bash +unskip_tests() { + jq -r '.files.test[]' .meta/config.json | while read -r test_file; do + sed -i 's/test.skip/test/g' "${test_file}" + done +} +``` + ```exercism/note The `unskip_test` function runs on a copy of an exercise directory, so feel free to modify the files as you see fit. ``` +```exercism/caution If unskipping tests requires an environment variable to be set, make sure that it is set in the `run_tests` function. +``` ### Run tests @@ -62,12 +75,24 @@ The function must return a zero as the exit code if all tests pass, otherwise re The `run_tests` function runs on a copy of an exercise directory, so feel free to modify the files as you see fit. ``` -### Option 1: use language tooling +#### Option 1: use language tooling -The default option for the verify exercises script is to use the language's tooling (SDK/binary/etc.). -It assumes (and possibly checks) that the language tooling is installed, a +The default option for the verify exercises script is to use the language's tooling (SDK/binary/etc.), which is what most tracks use. +Each track will have its own way of running the tests, but usually it is just a single command. -This is what the [`bin/verify-exercises` file](https://github.com/exercism/arturo/blob/79560f853f5cb8e2f3f0a07cbb8fcce8438ee996/bin/verify-exercises) looks file for the Arturo track: +As an example, the [Arturo track's `bin/verify-exercises file`](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) simply calls the `arturo` command on the test file: + +```bash +run_tests() { + arturo tester.art +} +``` + +```exercism/caution +Please make sure that the +``` + +This is what the [`bin/verify-exercises` file](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) looks file for the Arturo track: ```bash #!/usr/bin/env bash @@ -151,6 +176,16 @@ arturo tester.art ### Option 2: use the test runner Docker image +The second option is to verify the exercises by running the track's [test runner](/docs/building/tracks/new/build-test-runner). +This of course depends on the track having a working [test runner](/docs/building/tracks/new/build-test-runner). + +If your track does not yet have a test runner, you can either: + +- build a working test runner, or +- use option 1 and directly use the language tooling + +Assuming there _is_ a working test runner, the `bin/verify-exercises` script should + In this option, we're using the fact that each track must have a test runner which already knows how to verify exercises. To enable this option, we first need to download (pull) the track's test runner Docker image and then run the `bin/verify-exercises` script, which is modified to use the test runner Docker image to run the tests. @@ -160,18 +195,102 @@ With the approach, it is less likely that things will fail in production that pa The downside of this approach is that it likely is slower, due to having to pull the Docker image and the overhead of Docker. ``` -```` +```bash +#!/usr/bin/env bash + +# Synopsis: +# Verify that each exercise's example/exemplar solution passes the tests. +# You can either verify all exercises or a single exercise. + +# Example: verify all exercises +# bin/test + +# Example: verify single exercise +# bin/test two-fer + +set -eo pipefail + +die() { echo "$*" >&2; exit 1; } + +required_tool() { + command -v "${1}" >/dev/null 2>&1 || + die "${1} is required but not installed. Please install it and make sure it's in your PATH." +} + +required_tool jq + +copy_example_or_examplar_to_solution() { + jq -c '[.files.solution, .files.exemplar // .files.example] | transpose | map({src: .[1], dst: .[0]}) | .[]' .meta/config.json \ + | while read -r src_and_dst; do + cp "$(jq -r '.src' <<< "${src_and_dst}")" "$(jq -r '.dst' <<< "${src_and_dst}")" + done +} + +docker pull exercism/unison-test-runner + +run_tests() { + local slug + + slug="${1}" + + docker run \ + --rm \ + --network none \ + --mount type=bind,src="${PWD}",dst=/solution \ + --mount type=bind,src="${PWD}",dst=/output \ + --tmpfs /tmp:rw \ + exercism/unison-test-runner "${slug}" "/solution" "/output" + jq -e '.status == "pass"' "${PWD}/results.json" >/dev/null 2>&1 +} + +verify_exercise() { + local dir + local slug + local tmp_dir + + dir=$(realpath "${1}") + slug=$(basename "${dir}") + tmp_dir=$(mktemp -d -t "exercism-verify-${slug}-XXXXX") + + echo "Verifying ${slug} exercise..." + + ( + trap 'rm -rf "$tmp_dir"' EXIT # remove tempdir when subshell ends + cp -r "${dir}/." "${tmp_dir}" + cd "${tmp_dir}" + + copy_example_or_examplar_to_solution + run_tests "${slug}" + ) +} + +exercise_slug="${1:-*}" + +shopt -s nullglob +count=0 +for exercise_dir in ./exercises/{concept,practice}/${exercise_slug}/; do + if [[ -d "${exercise_dir}" ]]; then + verify_exercise "${exercise_dir}" + ((++count)) + fi +done +((count > 0)) || die 'no matching exercises found!' +``` ## Implement the test workflow -Now that the `verify-exercises` script is +Now that the `verify-exercises` script is finished, it's time to finalize the `test.yml` workflow. +How to do so depends on what option was chosen for the `verify-exercises` script implementation. -There are three options when implementing this workflow: +### Option 1: use language tooling -### Option 1: install track-specific tooling (e.g. an SDK) in the GitHub Actions runner instance +If the `verify-exercises` script directly uses the language's tooling, the test workflow will need to install: -In this approach, any track-specific tooling (e.g. an SDK) is installed directly in the GitHub Actions runner instance. -Once done, you then run the `bin/verify-exercises` script (which assumes the track tooling is installed). +- Language tooling dependencies, such as openssh or a C/C++ compiler. +- Language tooling, such as an SDK or binary. + If the language tooling installation does _not_ add the installed binary/binaries to the path, make sure to [add it to GitHub Actions' system path](https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/workflow-commands-for-github-actions#adding-a-system-path). + +Once that is done, the `verify-exercises` should work as expected, and you've successfully set up CI! For an example, see the [Arturo track's `test.yml` workflow](https://github.com/exercism/arturo/blob/79560f853f5cb8e2f3f0a07cbb8fcce8438ee996/.github/workflows/test.yml): @@ -205,83 +324,88 @@ jobs: - name: Verify all exercises run: bin/verify-exercises -```` +``` -#### Option 2: running the verify exercises script within test runner Docker image +### Option 2: use the test runner Docker image -In this option, we're using the fact that each track must have a test runner which has all dependencies already installed -To enable this option, we need to set the workflow's container to the test runner: +The second option is to verify the exercises by running the track's [test runner](/docs/building/tracks/new/build-test-runner). +This option requires two things to be true: -```yml -container: - image: exercism/vimscript-test-runner -``` +1. The track has a working [test runner](/docs/building/tracks/new/build-test-runner) +2. The `verify-exercises` script use the test runner Docker image to run an exercise's tests + +If your track does not yet have a test runner, you can either: -This will then automatically pull the test runner Docker image when the workflow executes, and run the `verify-exercises` script within that Docker container. +- build a working test runner, or +- use option 1 and directly use the language tooling + +If the above mentioned two things _are_ true, the test workflow will need to download (pull) the track's test runner Docker image and then run the `bin/verify-exercises` script. ```exercism/note -The main benefit of this approach is that it better mimics how tests are being run in production (on the website). +The main benefit of this approach is that it best mimics how tests are being run in production (on the website). With the approach, it is less likely that things will fail in production that passed in CI. The downside of this approach is that it likely is slower, due to having to pull the Docker image and the overhead of Docker. ``` -For an example, see the [vimscript track's `test.yml` workflow](https://github.com/exercism/vimscript/blob/e599cd6e02cbcab2c38c5112caed8bef6cdb3c38/.github/workflows/test.yml). +For an example, see the [Standard ML track's `test.yml` workflow](https://github.com/exercism/sml/blob/e63e93ee50d8d7f0944ff4b7ad385819b86e1693/.github/workflows/ci.yml). ```yml -name: Verify Exercises +name: sml / ci on: + pull_request: push: branches: [main] - pull_request: workflow_dispatch: jobs: ci: - runs-on: ubuntu-24.04 - container: - image: exercism/vimscript-test-runner + runs-on: ubuntu-22.04 steps: - - name: Checkout repository + - name: Checkout code uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - - name: Verify all exercises - run: bin/verify-exercises + - run: docker pull exercism/sml-test-runner + + - name: Run tests for all exercises + run: sh ./bin/test ``` -### Option 3: download the test runner Docker image and change verify exercises script +### Option 3: running the verify exercises script within test runner Docker image -In this option, we're using the fact that each track must have a test runner which already knows how to verify exercises. -To enable this option, we first need to download (pull) the track's test runner Docker image and then run the `bin/verify-exercises` script, which is modified to use the test runner Docker image to run the tests. +A third, alternative option is a hybrid of the previous two options. +Here, we're also using the test runner Docker image, only this time we run the `verify-exercises` script _within that Docker image_. +To enable this option, we need to set the workflow's container to the test runner: -```exercism/note -The main benefit of this approach is that it best mimics how tests are being run in production (on the website). -With the approach, it is less likely that things will fail in production that passed in CI. -The downside of this approach is that it likely is slower, due to having to pull the Docker image and the overhead of Docker. +```yml +container: + image: exercism/vimscript-test-runner ``` -For an example, see the [Standard ML track's `test.yml` workflow](https://github.com/exercism/sml/blob/e63e93ee50d8d7f0944ff4b7ad385819b86e1693/.github/workflows/ci.yml). +We can then skip the dependencies and tooling installation steps (as those will have been installed within the test runner Docker image) and proceed with running the `bin/verify-exercises` script. + +For an example, see the [vimscript track's `test.yml` workflow](https://github.com/exercism/vimscript/blob/e599cd6e02cbcab2c38c5112caed8bef6cdb3c38/.github/workflows/test.yml). ```yml -name: sml / ci +name: Verify Exercises on: - pull_request: push: branches: [main] + pull_request: workflow_dispatch: jobs: ci: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 + container: + image: exercism/vimscript-test-runner steps: - - name: Checkout code + - name: Checkout repository uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - - run: docker pull exercism/sml-test-runner - - - name: Run tests for all exercises - run: sh ./bin/test + - name: Verify all exercises + run: bin/verify-exercises ``` From 32f54589aafee30f24671ac7d6905ca774c9df3b Mon Sep 17 00:00:00 2001 From: Erik Schierboom Date: Fri, 9 Aug 2024 11:39:14 +0200 Subject: [PATCH 3/4] More docs --- .../new/setup-continuous-integration.md | 236 ++++-------------- 1 file changed, 54 insertions(+), 182 deletions(-) diff --git a/building/tracks/new/setup-continuous-integration.md b/building/tracks/new/setup-continuous-integration.md index 583db598..469dbbcf 100644 --- a/building/tracks/new/setup-continuous-integration.md +++ b/building/tracks/new/setup-continuous-integration.md @@ -44,9 +44,17 @@ In general, there are two ways in which tracks support "unskipping" tests: 2. Providing an environment variable. For example, setting `SKIP_TESTS=false`. +#### Removing annotations/code/text from the test files + If skipping tests is file-based (the first option mentioned above), edit the `unskip_tests` function to modify the test files (the existing code already handles the looping over the test files). -As an example, the [Arturo track's `bin/verify-exercises file`](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) uses `sed` to unskip the tests within the test files: +```exercism/note +The `unskip_test` function runs on a copy of an exercise directory, so feel free to modify the files as you see fit. +``` + +##### Example + +The [Arturo track's `bin/verify-exercises file`](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) uses `sed` to unskip the tests within the test files: ```bash unskip_tests() { @@ -56,9 +64,7 @@ unskip_tests() { } ``` -```exercism/note -The `unskip_test` function runs on a copy of an exercise directory, so feel free to modify the files as you see fit. -``` +#### Providing an environment variable ```exercism/caution If unskipping tests requires an environment variable to be set, make sure that it is set in the `run_tests` function. @@ -69,7 +75,7 @@ If unskipping tests requires an environment variable to be set, make sure that i The `run_tests` function is responsible for running the tests of an exercise. When the function is called, the example/exemplar files will already have been copied to (stub) solution files, so you only need to call the right command to run the tests. -The function must return a zero as the exit code if all tests pass, otherwise return a non-zero exit code. +The function must return zero as the exit code if all tests pass, otherwise return a non-zero exit code. ```exercism/note The `run_tests` function runs on a copy of an exercise directory, so feel free to modify the files as you see fit. @@ -80,98 +86,14 @@ The `run_tests` function runs on a copy of an exercise directory, so feel free t The default option for the verify exercises script is to use the language's tooling (SDK/binary/etc.), which is what most tracks use. Each track will have its own way of running the tests, but usually it is just a single command. -As an example, the [Arturo track's `bin/verify-exercises file`](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) simply calls the `arturo` command on the test file: - -```bash -run_tests() { - arturo tester.art -} -``` - -```exercism/caution -Please make sure that the -``` +#### Example -This is what the [`bin/verify-exercises` file](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) looks file for the Arturo track: +The [Arturo track's `bin/verify-exercises file`](https://github.com/exercism/arturo/blob/2393d62933058f011baea3631e9295b7884925e0/bin/verify-exercises) modifies the `run_tests` function to simply call the `arturo` command on the test file: ```bash -#!/usr/bin/env bash - -# Synopsis: -# Test the track's exercises. - -# Example: verify all exercises -# ./bin/verify-exercises - -# Example: verify single exercise -# ./bin/verify-exercises two-fer - -set -eo pipefail - -required_tool() { - command -v "${1}" >/dev/null 2>&1 || - die "${1} is required but not installed. Please install it and make sure it's in your PATH." -} - -required_tool jq - -copy_example_or_examplar_to_solution() { - jq -c '[.files.solution, .files.exemplar // .files.example] | transpose | map({src: .[1], dst: .[0]}) | .[]' .meta/config.json | while read -r src_and_dst; do - cp "$(echo "${src_and_dst}" | jq -r '.src')" "$(echo "${src_and_dst}" | jq -r '.dst')" - done -} - -unskip_tests() { - jq -r '.files.test[]' .meta/config.json | while read -r test_file; do - sed -i 's/test.skip/test/g' "${test_file}" - done -} - run_tests() { arturo tester.art } - -verify_exercise() { - local dir - local slug - local tmp_dir - - dir=$(realpath "${1}") - slug=$(basename "${dir}") - tmp_dir=$(mktemp -d -t "exercism-verify-${slug}-XXXXX") - - echo "Verifying ${slug} exercise..." - - ( - cp -r "${dir}/." "${tmp_dir}" - cd "${tmp_dir}" - - copy_example_or_examplar_to_solution - unskip_tests - run_tests - ) -} - -exercise_slug="${1:-*}" - -shopt -s nullglob -for exercise_dir in ./exercises/{concept,practice}/${exercise_slug}/; do - if [ -d "${exercise_dir}" ]; then - verify_exercise "${exercise_dir}" - fi -done -``` - -It uses `sed` to unskip tests: - -```bash -sed -i 's/test.skip/test/g' "${test_file}" -``` - -and runs the tests via the `arturo` command: - -```bash -arturo tester.art ``` ### Option 2: use the test runner Docker image @@ -184,50 +106,37 @@ If your track does not yet have a test runner, you can either: - build a working test runner, or - use option 1 and directly use the language tooling -Assuming there _is_ a working test runner, the `bin/verify-exercises` script should +The following modifications need to be made to the default `bin/verify-exercises` script: -In this option, we're using the fact that each track must have a test runner which already knows how to verify exercises. -To enable this option, we first need to download (pull) the track's test runner Docker image and then run the `bin/verify-exercises` script, which is modified to use the test runner Docker image to run the tests. +1. Verify that the `docker` command is available +2. Pull (download) the test runner Docker image +3. Use `docker run` to run the test runner Docker image on each exercise +4. Use `jq` to verify that the `results.json` file returned by the Docker container indicates all tests passed +5. Remove the `unskip_test` function and the call to that function ```exercism/note The main benefit of this approach is that it best mimics how tests are being run in production (on the website). -With the approach, it is less likely that things will fail in production that passed in CI. -The downside of this approach is that it likely is slower, due to having to pull the Docker image and the overhead of Docker. +With this approach, it is less likely that things fail in production that passed in CI. +The downside of this approach is that it usually is slower, due to having to pull the Docker image and the overhead of Docker. ``` -```bash -#!/usr/bin/env bash - -# Synopsis: -# Verify that each exercise's example/exemplar solution passes the tests. -# You can either verify all exercises or a single exercise. - -# Example: verify all exercises -# bin/test - -# Example: verify single exercise -# bin/test two-fer +#### Example -set -eo pipefail - -die() { echo "$*" >&2; exit 1; } - -required_tool() { - command -v "${1}" >/dev/null 2>&1 || - die "${1} is required but not installed. Please install it and make sure it's in your PATH." -} +The [Unison track's `bin/verify-exercises file`](https://github.com/exercism/unison/blob/f39ab0e6bd0d6ac538f343474a01bf9755d4a93c/bin/test) adds the check to verify that the `docker` command is also installed: -required_tool jq +```bash +required_tool docker +``` -copy_example_or_examplar_to_solution() { - jq -c '[.files.solution, .files.exemplar // .files.example] | transpose | map({src: .[1], dst: .[0]}) | .[]' .meta/config.json \ - | while read -r src_and_dst; do - cp "$(jq -r '.src' <<< "${src_and_dst}")" "$(jq -r '.dst' <<< "${src_and_dst}")" - done -} +Then, it pulls the track's test runner image: +```bash docker pull exercism/unison-test-runner +``` +It then modifies the `run_tests` function to use `docker run` to run the test runner on the current exercise (which is in the working directory), followed by a `jq` command to check for the right status: + +```bash run_tests() { local slug @@ -242,39 +151,12 @@ run_tests() { exercism/unison-test-runner "${slug}" "/solution" "/output" jq -e '.status == "pass"' "${PWD}/results.json" >/dev/null 2>&1 } +``` -verify_exercise() { - local dir - local slug - local tmp_dir - - dir=$(realpath "${1}") - slug=$(basename "${dir}") - tmp_dir=$(mktemp -d -t "exercism-verify-${slug}-XXXXX") - - echo "Verifying ${slug} exercise..." - - ( - trap 'rm -rf "$tmp_dir"' EXIT # remove tempdir when subshell ends - cp -r "${dir}/." "${tmp_dir}" - cd "${tmp_dir}" - - copy_example_or_examplar_to_solution - run_tests "${slug}" - ) -} +Finally, we need to modify the calling of the `run_tests` command, as it now requires the slug: -exercise_slug="${1:-*}" - -shopt -s nullglob -count=0 -for exercise_dir in ./exercises/{concept,practice}/${exercise_slug}/; do - if [[ -d "${exercise_dir}" ]]; then - verify_exercise "${exercise_dir}" - ((++count)) - fi -done -((count > 0)) || die 'no matching exercises found!' +```bash +run_tests "${slug}" ``` ## Implement the test workflow @@ -339,38 +221,26 @@ If your track does not yet have a test runner, you can either: - build a working test runner, or - use option 1 and directly use the language tooling -If the above mentioned two things _are_ true, the test workflow will need to download (pull) the track's test runner Docker image and then run the `bin/verify-exercises` script. +This approach has a couple of advantages: -```exercism/note -The main benefit of this approach is that it best mimics how tests are being run in production (on the website). -With the approach, it is less likely that things will fail in production that passed in CI. -The downside of this approach is that it likely is slower, due to having to pull the Docker image and the overhead of Docker. -``` +1. You don't need to install any dependencies/tooling within the test workflow (as those will have been installed within the Docker image) +2. The approach best mimics how tests are being run in production (on the website), reducing the likelihood of production issues. -For an example, see the [Standard ML track's `test.yml` workflow](https://github.com/exercism/sml/blob/e63e93ee50d8d7f0944ff4b7ad385819b86e1693/.github/workflows/ci.yml). +The main downside is that it likely is slower, due to having to pull the Docker image and the overhead of Docker. -```yml -name: sml / ci +There a couple of ways in which could pull the test runner Docker image: -on: - pull_request: - push: - branches: [main] - workflow_dispatch: +1. Download the image within the `verify-exercises` file. + This is the approach taken by the [Unison track](https://github.com/exercism/unison/blob/f39ab0e6bd0d6ac538f343474a01bf9755d4a93c/bin/test#L32). +2. Download the image within the workflow. + This is the approach taken by the [Standard ML track](https://github.com/exercism/sml/blob/e63e93ee50d8d7f0944ff4b7ad385819b86e1693/.github/workflows/ci.yml#L16). +3. Build the image within the workflow. + This is the approach taken by the [8th track](https://github.com/exercism/8th/blob/9034bcb6aa38540e1a67ba2fa6b76001f50c094b/.github/workflows/test.yml#L18-L40). -jobs: - ci: - runs-on: ubuntu-22.04 - - steps: - - name: Checkout code - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - - - run: docker pull exercism/sml-test-runner - - - name: Run tests for all exercises - run: sh ./bin/test -``` +So which approach to use? +We recommend _at least_ implementing option number 1, to make the `verify-exercises` script be _standalone_. +If your image is particularly large, it might be beneficial to also implement option 3, which will store the built Docker image into the GitHub Actions cache. +Subsequent runs can then just read the Docker image from cache, instead of downloading it, which might be better for performance (please measure to be sure). ### Option 3: running the verify exercises script within test runner Docker image @@ -385,7 +255,9 @@ container: We can then skip the dependencies and tooling installation steps (as those will have been installed within the test runner Docker image) and proceed with running the `bin/verify-exercises` script. -For an example, see the [vimscript track's `test.yml` workflow](https://github.com/exercism/vimscript/blob/e599cd6e02cbcab2c38c5112caed8bef6cdb3c38/.github/workflows/test.yml). +#### Example + +The [vimscript track's `test.yml` workflow](https://github.com/exercism/vimscript/blob/e599cd6e02cbcab2c38c5112caed8bef6cdb3c38/.github/workflows/test.yml) uses this option: ```yml name: Verify Exercises From 9ace5de967ac9f8c93cd0bd9f96efe410b079a7f Mon Sep 17 00:00:00 2001 From: Erik Schierboom Date: Fri, 9 Aug 2024 11:40:32 +0200 Subject: [PATCH 4/4] Link --- building/tracks/ci/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/building/tracks/ci/README.md b/building/tracks/ci/README.md index 639d6379..5fe63e58 100644 --- a/building/tracks/ci/README.md +++ b/building/tracks/ci/README.md @@ -6,3 +6,4 @@ This includes running tests, formatting things, and deploying things. For more information, check: - [Workflows](/docs/building/tracks/ci/workflows) +- [Setting up CI for new tracks](/docs/building/tracks/new/setup-continuous-integration)