Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ jobs:
dotnet-version: "5.0.x"
- name: Install tmc-r-tester
run: |
Rscript -e 'install.packages(c("devtools","testthat", "httr", "curl"),repos="https://ftp.eenet.ee/pub/cran/")'
Rscript -e 'devtools::install_github("RTMC/tmc-r-tester/tmcRtestrunner")'
Rscript -e 'install.packages(c("testthat", "jsonlite", "R.utils"))'
Rscript -e 'install.packages("plugins/r/tests/tmcRtestrunner", repos=NULL, type="source")'
- name: Build test binary
run: cargo test --no-run --verbose
- name: Run tests
Expand Down
2 changes: 2 additions & 0 deletions plugins/r/tests/tmcRtestrunner/.Rbuildignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
^.*\.Rproj$
^\.Rproj\.user$
9 changes: 9 additions & 0 deletions plugins/r/tests/tmcRtestrunner/.lintr
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
linters: with_defaults(
absolute_paths_linter = NULL, # 21
closed_curly_linter = NULL, # 2
open_curly_linter = NULL, # 2
single_quotes_linter = NULL, # 1
NULL,
line_length_linter(120),
object_length_linter(80)
)
27 changes: 27 additions & 0 deletions plugins/r/tests/tmcRtestrunner/DESCRIPTION
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
Package: tmcRtestrunner
Type: Package
Title: Runs TMC R projects
Version: 0.1.0
Author: RTMC
Maintainer: The package maintainer <yourself@somewhere.net>
Description: More about what it does (maybe more than one line)
Use four spaces when indenting paragraphs within the Description.
License: What license is it under?
Encoding: UTF-8
LazyData: true
Depends:
testthat,
jsonlite,
R.utils
Imports:
testthat,
jsonlite,
R.utils
Suggests:
lintr
Collate:
'ResultsJsonParser.R'
'ResultsCreator.R'
'RunTests.R'
'GetAvailablePoints.R'
'TestEnvironment.R'
4 changes: 4 additions & 0 deletions plugins/r/tests/tmcRtestrunner/NAMESPACE
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
exportPattern("^[[:alpha:]]+")
import(testthat)
import(jsonlite)
import(R.utils)
59 changes: 59 additions & 0 deletions plugins/r/tests/tmcRtestrunner/R/GetAvailablePoints.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@

.get_available_points <- function(project_path) {
.init_global_vars()
test_files <- list.files(path = paste0(project_path, "/tests/testthat"), pattern = "test.*\\.R",
full.names = T, recursive = FALSE)
for (test_file in test_files) {
.GlobalEnv$map_to_desc[[.GlobalEnv$counter]] <- list()
.GlobalEnv$file_points[[.GlobalEnv$counter]] <- list()
test_file(test_file, reporter = "silent", env = .create_counter_env(project_path))
.GlobalEnv$counter <- .GlobalEnv$counter + 1
}
return (.add_points(.GlobalEnv$test_available_points, .GlobalEnv$file_points, .GlobalEnv$map_to_desc))
}

.init_global_vars <- function() {
.GlobalEnv$test_available_points <- list()
.GlobalEnv$file_points <- list()
.GlobalEnv$map_to_desc <- list()
.GlobalEnv$counter <- 1
}

.add_points <- function(test_available_points, file_points, map_to_desc) {
all_available_points <- list()
for (i in (1:unlist(.GlobalEnv$counter - 1))) {
for (desc in map_to_desc[[i]]) {
all_available_points[[desc]] <- c(file_points[[i]], test_available_points[[desc]])
}
}
return (all_available_points)
}

.create_counter_env <- function(project_path) {
test_env <- new.env()
.define_counter_functions(test_env, project_path)
return (test_env)
}

.define_counter_functions <- function(test_env, project_path) {
.source_files(test_env, project_path)
test_env$test <- function(desc, point, code){
if (!(desc %in% .GlobalEnv$test_available_points)) {
.GlobalEnv$test_available_points[[desc]] <- list()
}
.GlobalEnv$test_available_points[[desc]] <- c(point)
.GlobalEnv$map_to_desc[[.GlobalEnv$counter]] <- c(.GlobalEnv$map_to_desc[[.GlobalEnv$counter]], desc)
}
test_env$points_for_all_tests <- function(points){
.GlobalEnv$file_points[[.GlobalEnv$counter]] <- c(points)
}
}

# Checks the available points for all test in the project without running test. Creates
# file .available_points.json in the project root.
run_available_points <- function(project_path = getwd()) {
available_points <- .get_available_points(project_path)

json_results <- .create_available_points_json_results(available_points)
.write_json(json_results, paste0(project_path, "/.available_points.json"))
}
109 changes: 109 additions & 0 deletions plugins/r/tests/tmcRtestrunner/R/ResultsCreator.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
.create_file_results <- function(testthat_file_output,
tests_points,
file_points) {

results <- list()
for (test in testthat_file_output) {
name <- test$test
status <- .get_status_for_test(test)
message <- .create_message_for_test(test, status)
backtrace <- .create_backtrace_for_test(test, status)
points <- .get_points_for_test(name,
tests_points,
file_points)

test_result <- list("name" = name,
"status" = status,
"points" = points,
"message" = message,
"backtrace" = backtrace)

results[[length(results) + 1]] <- test_result
}
return(results)
}

.get_points_for_test <- function(test_name, tests_points, file_points) {
if (is.null(tests_points[[test_name]])) {
test_points <- vector()
} else {
test_points <- tests_points[[test_name]]
}
test_points <- c(file_points, test_points)
return(test_points)
}

.get_status_for_test <- function(test) {
if (.check_if_test_passed(test)) {
status <- "pass"
} else {
status <- "fail"
}
return(status)
}

#Checks if a single test passed
.check_if_test_passed <- function(test) {
ret <- TRUE
for (result in test$results) {
if (!.check_if_result_passed(result)) {
ret <- FALSE
break
}
}
return(ret)
}

#Check if a single result passed
.check_if_result_passed <- function(result) {
return(format(result) == "As expected")
}

.message_from_failed_result <- function(result) {
message_rows <- strsplit(result$message, "\n")[[1]]
return(paste(message_rows, collapse = "\n"))
}

.create_message_for_test <- function(test, status) {
if (status == "pass") return("")

for (result in test$results) {
if (format(result) != "As expected") {
return(.message_from_failed_result(result))
}
}
return("")
}

.create_backtrace_for_test <- function(testthat_test_result, status) {
if (status == "pass") return(list())

for (result in testthat_test_result$results) {
if (format(result) != "As expected") {
backtrace <- list()
i <- 1;
for (call in result$call) {
backtrace <- append(backtrace, paste0(i, ": ", .create_call_message(call)))
i <- i + 1
}
return(backtrace)
}
}
return(list())

}

.create_call_message <- function(call) {
call_str <- format(call)
call_srcref <- attributes(call)$srcref
srcref_data <- c(call_srcref)
srcfile_filename <- attributes(call_srcref)$srcfile$filename

if (is.null(call_srcref)) {
message <- paste0(call_str)
} else {
message <- paste0(call_str, " in ", srcfile_filename, "#", srcref_data[[1]])
}

return(message)
}
47 changes: 47 additions & 0 deletions plugins/r/tests/tmcRtestrunner/R/ResultsJsonParser.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#Creates JSON containing test names and points availble from them, based on the test file.
.create_available_points_json_results <- function(available_points) {
results <- list()
for (desc in names(available_points)) {
results[[desc]] <- available_points[[desc]]
}
return (results)
}

.create_json_run_results <- function(run_results) {
json_test_results <- list()
for (test_result in run_results$test_results) {
json_test_results[[length(json_test_results) + 1]] <- .create_json_test_result(test_result)
}
json_run_results <- list("runStatus" = unbox(run_results$run_status),
"backtrace" = lapply(run_results$backtrace, unbox), "testResults" = json_test_results)
return(json_run_results)
}

#Creates JSON for each different test case.
.create_json_test_result <- function(test_result) {
test_result <- list(status = unbox(test_result$status),
name = unbox(format(test_result$name)),
message = unbox(test_result$message),
backtrace = lapply(test_result$backtrace, unbox),
points = test_result$points)
return(test_result)
}

#Writes JSON based on the whole test result.
.write_json <- function(results, file) {
#json utf-8 coded:
json <- enc2utf8(toJSON(results, pretty = FALSE))
json <- prettify(json)
#encode json to utf-8 and write file
write(json, file)
}

#Prints results.
.print_results_from_json <- function(json_result) {
for (test in json_result$testResults) {
cat(sep = "", test$name, ": ", test$status, "\n")
if (test$message != "") {
cat(sep = "", "\n", test$message, "\n")
}
}
}
75 changes: 75 additions & 0 deletions plugins/r/tests/tmcRtestrunner/R/RunTests.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# Runs the tests from project directory and writes results JSON to the root of the project
# as .results.json.
#
# Args:
# project_path: The absolute path to the root of the project being tested.
# print: If TRUE, prints results; if not, not. DEFAULT is FALSE.
#
# Returns:
# Run results list containing: runStatus (string), backtrace (list), test_results (list)
run_tests <- function(project_path = getwd(), print = FALSE) {
#Runs tests for project and returns the results.
#If sourcing_error occurs, .sourcing_error_run_results returns the results.
run_results <- tryCatch({.run_tests_project(project_path)},
sourcing_error = .sourcing_error_run_result,
run_error = .run_error_run_result)

json_run_results <- .create_json_run_results(run_results)
.write_json(json_run_results, file.path(project_path, ".results.json"))

if (print) {
.print_results_from_json(json_run_results)
}

invisible(run_results)
}

.run_tests_project <- function(project_path) {
test_results <- list()
#Lists all the files in the path beginning with "test" and ending in ".R"
test_files <- list.files(path = file.path(project_path, "tests", "testthat"), pattern = "test.*\\.R",
full.names = TRUE, recursive = FALSE)

for (test_file in test_files) {
file_results <- .run_tests_file(test_file, project_path)
test_results <- c(test_results, file_results)
}
return(list("run_status" = "success", "backtrace" = list(), "test_results" = test_results))
}

.run_tests_file <- function(file_path, project_path) {
.GlobalEnv$points <- list()
.GlobalEnv$points_for_all_tests <- list()

test_env = .create_test_env(project_path)
test_file_output <- tryCatch({test_file(file_path, reporter = "silent", env = test_env)},
error = .signal_run_error)

test_file_results <- .create_file_results(test_file_output, points, .GlobalEnv$points_for_all_tests)

return(test_file_results)
}

.signal_sourcing_error <- function(error) {
sourcing_error <- simpleError(message = error$message, call = error$call)
class(sourcing_error) <- c("sourcing_error", class(sourcing_error))
signalCondition(sourcing_error)
}

.sourcing_error_run_result <- function(sourcing_error) {
split_message <- strsplit(sourcing_error$message, split = "\n")
backtrace <- lapply(split_message[[1]], unbox)
return(list("run_status" = "sourcing_failed", "backtrace" = backtrace, "test_results" = list()))
}

.signal_run_error <- function(error) {
run_error <- simpleError(message = error$message, call = error$call)
class(run_error) <- c("run_error", class(run_error))
signalCondition(run_error)
}

.run_error_run_result <- function(run_error) {
split_message <- strsplit(run_error$message, split = "\n")
backtrace <- lapply(split_message[[1]], unbox)
return(list("run_status" = "run_failed", "backtrace" = backtrace, "test_results" = list()))
}
Loading