diff --git a/NEWS.md b/NEWS.md index 9268171e8..1cfccdc45 100644 --- a/NEWS.md +++ b/NEWS.md @@ -47,6 +47,7 @@ learnr (development version) * `exercise_result()` no longer combines the code output and feedback; this now happens just before presenting the exercise result to the user ([#522](https://github.com/rstudio/learnr/pull/522)). * Correct/incorrect question markers are now configurable via CSS. You can change or style these markers using the `.tutorial-question .question-final .correct::before` and `.tutorial-qusetion .question-final .incorrect::before` selectors. A new helper function, `finalize_question()`, can be used to apply the `.question-final` class to custom learnr questions. ([#531](https://github.com/rstudio/learnr/pull/531)) * `options()` and environment variables are now reset after rendering exercises so changes made by user input or checking code cannot affect other exercises. ([#542](https://github.com/rstudio/learnr/pull/542)) +* Exercise checking is now conducted in the same temporary directory where exercises are evaluated. ([#544](https://github.com/rstudio/learnr/pull/544/)) ## Bug fixes diff --git a/R/exercise.R b/R/exercise.R index 54760d97b..901b1c423 100644 --- a/R/exercise.R +++ b/R/exercise.R @@ -264,7 +264,7 @@ upgrade_exercise <- function(exercise, require_items = NULL) { # returns NULL if everything is okay, otherwise a character string describing # the reason the validation check failed. validate_exercise <- function(exercise, require_items = NULL) { -required_names <- c("code", "label", "options", "chunks", require_items) + required_names <- c("code", "label", "options", "chunks", require_items) missing_names <- setdiff(required_names, names(exercise)) if (length(missing_names)) { return(paste("Missing exercise items:", paste(missing_names, collapse = ", "))) @@ -273,6 +273,22 @@ required_names <- c("code", "label", "options", "chunks", require_items) NULL } +standardize_code <- function(code) { + if (inherits(code, "AsIs")) { + return(code) + } + if (is.null(code) || !length(code)) { + return("") + } + str_trim(paste0(code, collapse = "\n")) +} + +standardize_exercise_code <- function(exercise) { + ex_code_items <- c("error_check", "code_check", "check", "code", "global_setup") + exercise[ex_code_items] <- lapply(exercise[ex_code_items], standardize_code) + exercise +} + # evaluate an exercise and return a list containing output and dependencies # @param evaluate_global_setup - If `FALSE`, will not evaluate the global setup # code. Instead, it just concatenates the exercise- specific setup code and @@ -295,11 +311,14 @@ evaluate_exercise <- function( require_items = if (evaluate_global_setup) "global_setup" ) + # standardize exercise code to single string (code, *check, global_setup) + exercise <- standardize_exercise_code(exercise) + i18n_set_language_option(exercise$tutorial$language) # return immediately and clear visible results # do not consider this an exercise submission - if (!nzchar(str_trim(paste0(exercise$code, collapse = "\n")))) { + if (!nzchar(exercise$code)) { # " " since html_output needs to pass a req() return(exercise_result(html_output = " ")) } @@ -308,17 +327,9 @@ evaluate_exercise <- function( eval(parse(text = exercise$global_setup), envir = envir) } - # Setup a temporary directory for rendering the exercise - exercise_dir <- tempfile(pattern = "lnr-ex") - dir.create(exercise_dir) - on.exit(unlink(exercise_dir), add = TRUE) - - # Copy files from data directory into exercise - copy_data_dir(data_dir, exercise_dir) - checker_feedback <- NULL - # Run the checker pre-evaluation _if_ there is code checking to do - if (length(exercise$code_check)) { + # Check the code pre-evaluation, if code_check is provided + if (nzchar(exercise$code_check)) { checker_feedback <- try_checker( exercise, "exercise.checker", check_code = exercise$code_check, @@ -333,10 +344,37 @@ evaluate_exercise <- function( } } - # Render exercise in temporary exercise directory - rmd_results <- withr::with_dir( - exercise_dir, - render_exercise(exercise, envir) + # Setup a temporary directory for rendering the exercise + exercise_dir <- withr::local_tempdir(pattern = "lrn-ex") + + # Copy files from data directory into exercise + copy_data_dir(data_dir, exercise_dir) + + # Move into the temp exercise directory for evaluation and checking + withr::local_dir(exercise_dir) + + # Evaluate the submitted code by rendering the exercise in a special .Rmd + rmd_results <- tryCatch( + render_exercise(exercise, envir), + error = function(err_render) { + if (nzchar(exercise$error_check)) { + # Check the error thrown by the submitted code when there's error + # checking: the exercise could be to throw an error! + checker_feedback <- try_checker( + exercise, "exercise.checker", + check_code = exercise$error_check, + envir_result = err_render$envir_result, + evaluate_result = err_render$evaluate_result, + envir_prep = err_render$envir_prep, + last_value = err_render, + engine = exercise$engine + ) + if (is_exercise_result(checker_feedback)) { + return(checker_feedback) + } + } + exercise_result_error(err_render$error_message) + } ) if (is_exercise_result(rmd_results)) { @@ -344,7 +382,7 @@ evaluate_exercise <- function( } # Run the checker post-evaluation (for checking code results) - if (length(exercise$check)) { + if (nzchar(exercise$check)) { checker_feedback <- try_checker( exercise, "exercise.checker", check_code = exercise$check, @@ -356,7 +394,7 @@ evaluate_exercise <- function( ) } - # include any checker feedback with the exercise results + # Return checker feedback (if any) with the exercise results exercise_result( feedback = checker_feedback$feedback, html_output = rmd_results$html_output @@ -565,25 +603,18 @@ render_exercise <- function(exercise, envir) { if (grepl(pattern, msg, fixed = TRUE)) { return(exercise_result_timeout()) } - if (length(exercise$error_check)) { - # Run the condition through an error checker (the exercise could be to throw an error!) - checker_feedback <- try_checker( - exercise, "exercise.checker", - check_code = exercise$error_check, - envir_result = envir_result, - evaluate_result = evaluate_result, - envir_prep = envir_prep, - last_value = e, - engine = exercise$engine - ) - if (is_exercise_result(checker_feedback)) { - return(checker_feedback) - } - } - exercise_result_error(msg) + rlang::abort( + class = "learnr_render_exercise_error", + envir_result = envir_result, + evaluate_result = evaluate_result, + envir_prep = envir_prep, + last_value = e, + error_message = msg + ) }) if (is_exercise_result(output_file)) { + # this only happens when the render result is a timeout error return(output_file) } diff --git a/docs/examples.html b/docs/examples.html index 16f6fc61b..bfc81bf36 100644 --- a/docs/examples.html +++ b/docs/examples.html @@ -13,7 +13,6 @@ Examples - diff --git a/docs/exercises.html b/docs/exercises.html index 458449eb7..96c6da82e 100644 --- a/docs/exercises.html +++ b/docs/exercises.html @@ -13,7 +13,6 @@ Exercises - diff --git a/docs/formats.html b/docs/formats.html index ae228d1c4..6f0c838d5 100644 --- a/docs/formats.html +++ b/docs/formats.html @@ -13,7 +13,6 @@ Formats - diff --git a/docs/index.html b/docs/index.html index 3694d276c..34d538b24 100644 --- a/docs/index.html +++ b/docs/index.html @@ -13,7 +13,6 @@ Interactive Tutorials for R - diff --git a/docs/publishing.html b/docs/publishing.html index 22a84bb23..9e98abc7a 100644 --- a/docs/publishing.html +++ b/docs/publishing.html @@ -13,7 +13,6 @@ Publishing - @@ -388,7 +387,7 @@

R Package

install.packages("learnr")
 learnr::run_tutorial("introduction", package = "mypackage")
-

Exercise Checkers

+

Exercise Checkers

Note that if your tutorial performs exercise checking via an external package then you should be sure to add the package you use for checking as an Imports dependency of your package so it’s installed automatically along with your package.

Note that it’s likely that the learnr package will eventually include or depend on another package that provides checking functions. For the time being though explicit installation of external checking packages is a requirement.

diff --git a/docs/questions.html b/docs/questions.html index b7e1498bb..5f5c1c672 100644 --- a/docs/questions.html +++ b/docs/questions.html @@ -13,7 +13,6 @@ Questions - diff --git a/docs/site_libs/header-attrs-2.9/header-attrs.js b/docs/site_libs/header-attrs-2.9/header-attrs.js deleted file mode 100644 index dd57d92e0..000000000 --- a/docs/site_libs/header-attrs-2.9/header-attrs.js +++ /dev/null @@ -1,12 +0,0 @@ -// Pandoc 2.9 adds attributes on both header and div. We remove the former (to -// be compatible with the behavior of Pandoc < 2.8). -document.addEventListener('DOMContentLoaded', function(e) { - var hs = document.querySelectorAll("div.section[class*='level'] > :first-child"); - var i, h, a; - for (i = 0; i < hs.length; i++) { - h = hs[i]; - if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6 - a = h.attributes; - while (a.length > 0) h.removeAttribute(a[0].name); - } -}); diff --git a/tests/testthat/test-exercise.R b/tests/testthat/test-exercise.R index 7c3399de3..d11dd0988 100644 --- a/tests/testthat/test-exercise.R +++ b/tests/testthat/test-exercise.R @@ -140,16 +140,15 @@ test_that("render_exercise() returns identical envir_prep and envir_result if an error_check = "unevaluated, triggers error_check in render_exercise()" ) - exercise_result <- withr::with_tempdir(render_exercise(exercise, new.env())) - - # the error during render causes a checker evaluation, so we can recover - # the environments from the checker_args returned by the debug checker - exercise_result <- exercise_result$feedback$checker_args - - expect_s3_class(exercise_result$last_value, "simpleError") - expect_equal(conditionMessage(exercise_result$last_value), "boom") - - expect_identical(exercise_result$envir_prep, exercise_result$envir_result) + render_result <- withr::with_tempdir( + rlang::catch_cnd( + render_exercise(exercise, new.env()), "learnr_render_exercise_error" + ) + ) + expect_s3_class(render_result$last_value, "simpleError") + expect_equal(conditionMessage(render_result$last_value), "boom") + expect_identical(render_result$envir_prep, render_result$envir_result) + expect_equal(get("x", render_result$envir_prep), 1) }) test_that("render_exercise() returns envir_result up to error", { @@ -162,37 +161,36 @@ test_that("render_exercise() returns envir_result up to error", { error_check = "unevaluated, triggers error_check in render_exercise()" ) - exercise_result <- withr::with_tempdir(render_exercise(exercise, new.env())) - - # the error during render causes a checker evaluation, so we can recover - # the environments from the checker_args returned by the debug checker - exercise_result <- exercise_result$feedback$checker_args + exercise_result <- withr::with_tempdir( + rlang::catch_cnd( + render_exercise(exercise, new.env()), "learnr_render_exercise_error" + ) + ) expect_s3_class(exercise_result$last_value, "simpleError") expect_equal(conditionMessage(exercise_result$last_value), "boom") - expect_false(identical(exercise_result$envir_prep, exercise_result$envir_result)) + expect_false( + identical(exercise_result$envir_prep, exercise_result$envir_result) + ) expect_setequal(ls(exercise_result$envir_prep), "x") expect_setequal(ls(exercise_result$envir_result), c("x", "y")) expect_identical(get("y", exercise_result$envir_result), 2) }) -test_that("render_exercise() with errors and no checker returns exercise result error", { +test_that("evaluate_exercise() with errors and no checker includes exercise result error", { exercise <- mock_exercise( user_code = "stop('user')", chunks = list(mock_chunk("setup-1", "stop('setup')")), setup_label = "setup-1" ) - - exercise_result <- withr::with_tempdir(render_exercise(exercise, new.env())) - expect_s3_class(exercise_result, "learnr_exercise_result") - expect_identical(exercise_result$error_message, "setup") + exercise_result <- evaluate_exercise(exercise, new.env()) + expect_equal(exercise_result$error_message, "setup") expect_null(exercise_result$feedback) - exercise <- mock_exercise(user_code = "stop('user')") - exercise_result <- withr::with_tempdir(render_exercise(exercise, new.env())) - expect_s3_class(exercise_result, "learnr_exercise_result") - expect_identical(exercise_result$error_message, "user") + exercise <- mock_exercise(user_code = "stop('user')") + exercise_result <- evaluate_exercise(exercise, new.env()) + expect_equal(exercise_result$error_message, "user") expect_null(exercise_result$feedback) }) @@ -228,13 +226,15 @@ test_that("render_exercise() cleans up exercise_prep files even when setup fails files <- expect_message( withr::with_tempdir({ - before <- dir() - res <- render_exercise(exercise, new.env()) + before <- dir() + e <- rlang::catch_cnd( + render_exercise(exercise, new.env()), "learnr_render_exercise_error" + ) + list( before = before, - before_error = get("dir_setup", res$feedback$checker_args$envir_prep), - during = res$feedback$checker_result, - after = dir() + during = get("dir_setup", e$envir_prep), + after = dir() ) }), "exercise_prep.Rmd" @@ -243,9 +243,7 @@ test_that("render_exercise() cleans up exercise_prep files even when setup fails # start with nothing expect_identical(files$before, character(0)) # prep file is present while evaluating prep - expect_identical(files$before_error, "exercise_prep.Rmd") - # prep files are cleaned up after error - expect_identical(files$during, character(0)) + expect_identical(files$during, "exercise_prep.Rmd") # nothing in directory after render_exercise() because user code didn't evaluate expect_identical(files$after, character(0)) }) @@ -323,6 +321,26 @@ test_that("serialized exercises produce equivalent evaluate_exercise() results", ) }) +test_that("standardize_exercise_result() ensures top-level code is length-1 string", { + ex <- standardize_exercise_code( + list( + code = c("a", "b"), + check = character(), + code_check = c(" ", " ", "\t\t\t"), + global_setup = c( + "", + "def return_one():", + "\treturn 1", + "" + ) + ) + ) + + expect_equal(ex$code, "a\nb") + expect_equal(ex$check, "") + expect_equal(ex$code_check, "") + expect_equal(ex$global_setup, "def return_one():\n\treturn 1") +}) # exercise_result() ------------------------------------------------------- @@ -766,3 +784,30 @@ test_that("env vars are protected from both user and author modification", { # evaluate_exercise() restores the TEST option after checking too expect_equal(res$after_eval, "APP") }) + + +# Timelimit --------------------------------------------------------------- + +test_that("Exercise timelimit error is returned when exercise takes too long", { + skip_on_cran() + skip_on_os("windows") + + ex <- mock_exercise(user_code = "Sys.sleep(3)", exercise.timelimit = 1) + + make_evaluator <- setup_forked_evaluator_factory(max_forked_procs = 1) + evaluator <- make_evaluator( + evaluate_exercise(ex, new.env()), + timelimit = ex$options$exercise.timelimit + ) + + evaluator$start() + while (!evaluator$completed()) { + Sys.sleep(1) + } + res <- evaluator$result() + + expect_s3_class(res, "learnr_exercise_result") + expect_true(res$timeout_exceeded) + expect_match(res$error_message, "permitted timelimit") + expect_match(as.character(res$html_output), "alert-danger") +})