-
-
Notifications
You must be signed in to change notification settings - Fork 7
Update run.sh #91
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Update run.sh #91
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
| @@ -1,23 +1,31 @@ | ||||||||||
| #!/usr/bin/env bash | ||||||||||
|
|
||||||||||
| # Synopsis: | ||||||||||
| # Run the test runner on a solution. | ||||||||||
|
|
||||||||||
| # Arguments: | ||||||||||
| # $1: exercise slug | ||||||||||
| # $2: absolute path to solution folder | ||||||||||
| # $3: absolute path to output directory | ||||||||||
|
|
||||||||||
| # Output: | ||||||||||
| # Writes the test results to a results.json file in the passed-in output directory. | ||||||||||
| # The test results are formatted according to the specifications at https://github.com/exercism/docs/blob/main/building/tooling/test-runners/interface.md | ||||||||||
|
|
||||||||||
| # Example: | ||||||||||
| # ./bin/run.sh two-fer /absolute/path/to/two-fer/solution/folder/ /absolute/path/to/output/directory/ | ||||||||||
|
|
||||||||||
| # If any required arguments is missing, print the usage and exit | ||||||||||
| # Read the .meta/config.json to get test name to task ID mapping | ||||||||||
| read_config() { | ||||||||||
| local config_file="$1" | ||||||||||
| if [[ -f "$config_file" ]]; then | ||||||||||
| cat "$config_file" | ||||||||||
| else | ||||||||||
| echo "{}" | ||||||||||
| fi | ||||||||||
| } | ||||||||||
|
|
||||||||||
| # Extract test name from test output line | ||||||||||
| extract_test_name() { | ||||||||||
| local line="$1" | ||||||||||
| echo "$line" | sed 's/^── Failure.*── //' | sed 's/ ──$//' | head -1 | ||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
| } | ||||||||||
|
|
||||||||||
| # Get task_id for a test name from config | ||||||||||
| get_task_id() { | ||||||||||
| local test_name="$1" | ||||||||||
| local config="$2" | ||||||||||
| echo "$config" | jq -r ".tests[] | select(.name==\"$test_name\") | .task_id" 2>/dev/null | ||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
What STDERR do you expect here? Why? can you use |
||||||||||
| } | ||||||||||
|
|
||||||||||
| # Main script | ||||||||||
| if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then | ||||||||||
| echo "usage: ./bin/run.sh exercise-slug /absolute/path/to/two-fer/solution/folder/ /absolute/path/to/output/directory/" | ||||||||||
| echo "usage: ./bin/run.sh exercise-slug /absolute/path/to/solution/folder/ /absolute/path/to/output/directory/" | ||||||||||
| exit 1 | ||||||||||
| fi | ||||||||||
|
|
||||||||||
|
|
@@ -26,39 +34,40 @@ input_dir="${2%/}" | |||||||||
| output_dir="${3%/}" | ||||||||||
| tests_file="test_${slug}.R" | ||||||||||
| results_file="${output_dir}/results.json" | ||||||||||
| config_file="${input_dir}/.meta/config.json" | ||||||||||
|
|
||||||||||
| # Create the output directory if it doesn't exist | ||||||||||
| mkdir -p "${output_dir}" | ||||||||||
|
|
||||||||||
| echo "${slug}: testing..." | ||||||||||
|
|
||||||||||
| pushd "${input_dir}" > /dev/null | ||||||||||
|
|
||||||||||
| # Run the tests for the provided implementation file and redirect stdout and | ||||||||||
| # stderr to capture it | ||||||||||
| # Run tests and capture output | ||||||||||
| test_output=$(Rscript "${tests_file}" 2>&1) | ||||||||||
| exit_code=$? | ||||||||||
| failed=$(echo "${test_output}" | grep -c -E '── (Failure|Error)') | ||||||||||
|
|
||||||||||
| if [[ $exit_code -eq 0 ]] && [[ ! $failed -eq 0 ]]; then | ||||||||||
| failed=$(echo "${test_output}" | grep -c -E '── (Failure|Error)') | ||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
| if [[ $exit_code -eq 0 ]] && [[ ! $failed -eq 0 ]]; then | ||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
| exit_code=1 | ||||||||||
| fi | ||||||||||
|
|
||||||||||
| popd > /dev/null | ||||||||||
|
|
||||||||||
| # Write the results.json file based on the exit code of the command that was | ||||||||||
| # just executed that tested the implementation file | ||||||||||
| # Read config | ||||||||||
| config=$(read_config "${config_file}") | ||||||||||
|
|
||||||||||
| # Generate results.json based on exit code | ||||||||||
| if [[ $exit_code -eq 0 ]] && [ $failed -eq 0 ]; then | ||||||||||
| jq -n '{version: 1, status: "pass"}' > ${results_file} | ||||||||||
| jq -n '{version: 3, status: "pass", tests: []}' > ${results_file} | ||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
| else | ||||||||||
| # Sanitize the test output | ||||||||||
| # Parse test output and extract individual test results | ||||||||||
| # For version 3, you need to create test objects with task_id | ||||||||||
|
|
||||||||||
| sanitized_test_output=$(echo "${test_output}" | sed -E 's/🥇|🌈|🥳|🎊|😸|😀|🎉/🥇/g') | ||||||||||
|
|
||||||||||
| # Manually add colors to the output to help scanning the output for errors | ||||||||||
| colorized_test_output=$(echo "${sanitized_test_output}" | \ | ||||||||||
| GREP_COLOR='01;31' grep --color=always -E -e '^── (Error|Failure).*|$') | ||||||||||
|
|
||||||||||
| jq -n --arg output "${colorized_test_output}" '{version: 1, status: "fail", message: $output}' > ${results_file} | ||||||||||
| colorized_test_output=$(echo "${sanitized_test_output}" | GREP_COLOR='01;31' grep --color=always -E -e '^── (Error|Failure).*|$') | ||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
The |
||||||||||
|
|
||||||||||
| jq -n --arg output "${colorized_test_output}" --argjson config "$config" \ | ||||||||||
| '{version: 3, status: "fail", message: $output, tests: []}' > ${results_file} | ||||||||||
|
Comment on lines
+68
to
+69
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
| fi | ||||||||||
|
|
||||||||||
| echo "${slug}: done" | ||||||||||
|
|
||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These docs here are helpful and probably worth keeping around. This is very helpful for anyone not sure how the test runner works.