Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
54 commits
Select commit Hold shift + click to select a range
0b67a2c
Added a new test
Sep 21, 2017
55a137b
Added new dummy functions to check if coverage works as intended
Sep 21, 2017
2eece83
Changed the third dummy method to pass.
Sep 21, 2017
36e2d77
Added new test file.
aleksisv Sep 21, 2017
94ec7f0
Changed project structure
aleksisv Sep 21, 2017
bdc17e0
Changed .travis.yml to include new tests.
aleksisv Sep 21, 2017
8558880
Made the branch compatible with other branches
aleksisv Sep 22, 2017
cfcf32e
Made branch fully compatible with owntestmethod
aleksisv Sep 22, 2017
04960a8
Fixed comment on RunTests.R
aleksisv Sep 22, 2017
1c5c37b
Fixed namings of the functions. Test that checks if you can find hidd…
aleksisv Sep 22, 2017
ec424e1
Started writing tests to correct functions.
aleksisv Sep 22, 2017
3e8f011
Refactored runTests-class so that it would be easier to test.
aleksisv Sep 22, 2017
84c6ea0
Fixed travis build fail in runTests
aleksisv Sep 22, 2017
61263b0
Tried to fix travis issue.
aleksisv Sep 22, 2017
725357a
Refacored old runTests code and added tests
aleksisv Sep 22, 2017
82cf7a7
Changed test names.
aleksisv Sep 22, 2017
64c2b63
Fixing travis issues.
aleksisv Sep 22, 2017
0e56f73
Figuring out what causes the tests to fail on travis.
aleksisv Sep 22, 2017
97810ed
Figuring out what causes the tests to fail on travis.
aleksisv Sep 22, 2017
aa6546c
Figuring out what causes the tests to fail on travis.
aleksisv Sep 22, 2017
c240a35
Trying to fix travis
aleksisv Sep 22, 2017
0027098
Testing travis.
anttihaap Sep 22, 2017
186f8cf
Trying to fix travis again.
anttihaap Sep 22, 2017
d14c08e
Fixed travis (once more.)
aleksisv Sep 22, 2017
17524fd
Refactored code. Fixed an issue with JSON
aleksisv Sep 23, 2017
a81e631
Added test to test printing
aleksisv Sep 23, 2017
d718d91
Changed .travis.yml-file
aleksisv Sep 23, 2017
2b0aa80
Changed .travis.yml-file
aleksisv Sep 23, 2017
a3c60f3
Trying to wake up coveralls
aleksisv Sep 23, 2017
d823de7
Changed .travis.yml files to include coveralls
aleksisv Sep 23, 2017
f4ccb74
Added support to parallel builds.
aleksisv Sep 23, 2017
4d01620
Added support to parallel builds.
aleksisv Sep 23, 2017
3d7e505
Configuring travis
aleksisv Sep 23, 2017
3450a3d
Fixing travis.
aleksisv Sep 23, 2017
0757db0
Fixing travis.
aleksisv Sep 23, 2017
bb09f1f
Trying to fix travis.
aleksisv Sep 23, 2017
b8e7419
Added resources for testrunner, some refactoring and test modifications.
anttihaap Sep 23, 2017
840edc0
Fixed typos...
anttihaap Sep 23, 2017
7098298
A lot of refactoring.
anttihaap Sep 23, 2017
415ef74
Added comments to the newly refactored source-files
aleksisv Sep 24, 2017
aad55b6
Added tests that test tmcRtestrunner
aleksisv Sep 25, 2017
4f39b6a
Added tests to test that points are added correctly to the result dat…
aleksisv Sep 25, 2017
3b0e46d
Testing that file is created correctly when runTests is called.
aleksisv Sep 25, 2017
d8e08b2
Added a second small resource project that includes files with failin…
aleksisv Sep 25, 2017
afc4e74
Added more tests.
aleksisv Sep 25, 2017
eb0a250
Added a printing test.
aleksisv Sep 25, 2017
70cfac8
Modified README
aleksisv Sep 25, 2017
9f858ff
Modified example project description files
aleksisv Sep 25, 2017
a79dffe
Merge branch 'tmcRtestrunner' into metatests
aleksisv Sep 25, 2017
4338285
Merge pull request #5 from RTMC/metatests
samuvait Sep 25, 2017
cd7ea13
Merge branch 'owntestmethod' into tmcRtestrunner
aleksisv Sep 25, 2017
169776c
Modified example project description file
aleksisv Sep 25, 2017
880c814
Added files to collate
aleksisv Sep 25, 2017
95c10c3
Merge pull request #7 from RTMC/metatests
tmoo Sep 25, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@
.RData
.Ruserdata
.DS_Store
tmcRtestrunner.Rcheck/
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ source("../../R/arithmetics.R")

pointsForAllTests(c("r1"))

test("Addition works", c("r1.1","r1.2"), {
test("Addition works", c("r1.1", "r1.2"), {
expect_equal(add(1, 2), 3)
expect_equal(add(1, 2), 3.0)
expect_equal(add(1, 4), 5)
Expand Down
3 changes: 2 additions & 1 deletion example_projects/example_project1/tmc_run_test_example.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#!/bin/sh
#Currently this script needs to be run at project root!
Rscript tmc/result.R

Rscript -e "library(tmcRtestrunner);runTests(\"$PWD\", print=TRUE)"
5 changes: 5 additions & 0 deletions tmcRtestrunner/DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,8 @@ LazyData: true
Depends:
testthat,
jsonlite
Collate:
'ResultsJsonParser.R'
'TestthatResultReader.R'
'RunTests.R'
'getAvailablePoints.R'
72 changes: 72 additions & 0 deletions tmcRtestrunner/R/ResultsJsonParser.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#Creates JSON based on the test file.
.CreateJsonResults <- function(testthatOutput) {
results = list()
for (test in testthatOutput) {
testName <- test$test
testPoints <- test$points
testMessage <- ""

if (.CheckIfTestPassed(test)) {
testStatus <- "pass"
} else {
testStatus <- "fail"
testMessage <- .CreateMessageForTestWithFailures(test)
}

testResult <- .CreateJsonTestResult(testStatus, testName, testMessage,testPoints, "")
#Add test result to results
results[[length(results)+1]] <- testResult
}
return (results)
}

#Creates JSON for each different test case.
.CreateJsonTestResult <- function(testStatus, testName, testMessage,
testPoints, backtrace) {
testResult <- list(status=unbox(testStatus),
name=unbox(format(testName)),
message=unbox(testMessage),
backtrace=unbox(backtrace),
points=testPoints)
return(testResult)
}

#Returns message from failed results
#Currently supports only results that used calls
.MessageFromFailedResult <- function(result) {
if (is.null(result$call)) {
return("")
}
#language that failed the test. for example call expect_equal(1,2)
language <- toString(result$call[[1]])
return (paste(sep="", "Failed with call: ", language,"\n", result$message))
}

.CreateMessageForTestWithFailures <- function(test) {
testMessage <- ""
for (result in test$results) {
if (format(result) != "As expected") {
testMessage <- paste(sep = "", testMessage, .MessageFromFailedResult(result))
}
}
return(testMessage)
}

#Writes JSON based on the whole test result.
.WriteJson <- function(results) {
#json utf-8 coded:
json <- enc2utf8(toJSON(results, pretty = FALSE))
json <- prettify(json)
#encode json to utf-8 and write file
write(json, ".results.json")
}

#Prints results.
.PrintResultsFromJson <- function(jsonResult) {
for (test in jsonResult) {
cat(sep = "", test$name, ": ", test$status, "\n")
if (test$message != "") {
cat(sep = "", "\n", test$message, "\n")
}
}
}
160 changes: 40 additions & 120 deletions tmcRtestrunner/R/RunTests.R
Original file line number Diff line number Diff line change
@@ -1,144 +1,64 @@
runTests <- function(project_path, print = FALSE) {
# Runs the tests from project directory and writes results JSON to the root of the project
# as .tmc_results.json.
#
# Args:
# project_path: The path to the root of the project being tested.
# print: If TRUE, prints results; if not, not. DEFAULT is FALSE.
#
library('testthat')
library('jsonlite')
# Runs the tests from project directory and writes results JSON to the root of the project
# as .tmc_results.json.
#
# Args:
# project_path: The absolute path to the root of the project being tested.
# print: If TRUE, prints results; if not, not. DEFAULT is FALSE.
#
runTests <- function(projectPath, print=FALSE) {
tmcrTestRunnerProjectPath <- getwd()

#runs test for project, returns testthatOuput with added points.
testthatOutput <- .RunTestsProject(projectPath)

jsonResults <- .CreateJsonResults(testthatOutput)
.WriteJson(jsonResults)

if (print) {
.PrintResultsFromJson(jsonResults)
}

tmcrtestrunnet_project_path <- getwd()
setwd(project_path)
setwd(tmcrTestRunnerProjectPath)
}

#declaring variables to global environment that for example helperTMC.R can use
points <- list()
points_for_all_tests <- list()
.RunTestsProject <- function(projectPath) {
setwd(projectPath)

testthatOutput <- list()

#Lists all the files in the path beginning with "test" and ending in ".R"
testFiles <- list.files(path="tests/testthat", pattern = "test.*\\.R", full.names = T, recursive = FALSE)

for (testFile in testFiles) {
testFileOutput <- test_file(testFile, reporter = "silent")
#Modifies the points because they were added to all the tests.
points <- .AddPointsToAllTests(testFileOutput)
#Adds the output from the tests in the file to the list
testFileOutput <- .RunTestsFile(testFile)
testthatOutput <- c(testthatOutput, testFileOutput)
}

.CreateResults <- function(testthatOutput) {
results = list()
for (test in testthatOutput) {
testName <- test$test
testPoints <- .GetTestPoints(testName)
testFailed <- FALSE
testStatus <- "passed"
testMessage <- ""
for (result in test$results) {
if (format(result) != "As expected") {
testFailed <- TRUE
testStatus <- "failed"
testMessage <- paste(sep = "", testMessage, .MessageFromFailedResult(result))
}
}
.PrintResult(testName, testMessage, testFailed)
testResult <- .CreateTestResult(testStatus, testName, testMessage,testPoints, "")
#Add test result to results
results[[length(results)+1]] <- testResult
}
return (results)
}


results <- .CreateResults(testthatOutput)

#json utf-8 coded:
json <- enc2utf8(toJSON(results, pretty = FALSE))
json <- prettify(json)

#encode json to utf-8 and write file
write(json, ".results.json")

#restore project path:
setwd(tmcrtestrunnet_project_path)
return(testthatOutput)
}

#Checks if a single test passed
.CheckIfResultCorrect <- function(test) {
ret <- TRUE
for (result in test$results) {
if (format(result) != "As expected") {
ret <- FALSE
break
}
}
return (ret)
}
.RunTestsFile <- function(filePath) {
.GlobalEnv$points <- list()
.GlobalEnv$points_for_all_tests <- list()

#Checks whether all the tests in a single file passed
.CheckThatAllPassed <- function(test_output) {
ret <- TRUE
for (test in test_output) {
if (!.CheckIfResultCorrect(test)) {
ret <- FALSE
break
}
}
return (ret)
}
testFileOutput <- test_file(filePath, reporter = "silent")
testFileOutput <- .AddPointsToTestOutput(testFileOutput)

#Adds the points from a single test file to all the tests in the file
#returns points list, so that the modified points list is updated
.AddPointsToAllTests <- function(test_output) {
for (test in test_output) {
if (!(points_for_all_tests %in% points[[test$test]])) {
points[[test$test]] <- c(points[[test$test]], points_for_all_tests)
}
}
return (points)
return(testFileOutput)
}

.PrintResult <- function(name, message, failed) {
if (failed) {
print(paste(name, ": FAIL", sep = ""))
print(paste(" ", message, sep = ""))
} else {
print(paste(name, ": PASS", sep = ""))
.AddPointsToTestOutput <- function(testOutput) {
for (i in 1 : length(testOutput)) {
testOutput[[i]]$points <- .GetTestPoints(testOutput[[i]]$test)
}
return(testOutput)
}

#Returns message from failed results
#Currently supports only results that used calls
.MessageFromFailedResult <- function(result) {
if (is.null(result$call)) {
return("")
}
#language that failed the test. for example call expect_equal(1,2)
language <- toString(result$call[[1]])
return (paste(sep="", "Failed with call: ", language,"\n", result$message))
}

#Returns the points of a test or an empty vector if null
.GetTestPoints <- function(testName) {
if (is.null(points[[testName]])) {
return(vector())
testPoints <- vector()
} else {
return(points[[testName]])
testPoints <- points[[testName]]
}
}

.CreateTestResult <- function(testStatus, testName, testMessage,
testPoints, backtrace) {
testResult <- list(status=unbox(testStatus),
name=unbox(format(testName)),
message=unbox(testMessage),
backtrace=unbox(backtrace),
points=testPoints)
return(testResult)
}

DummyFunction <- function() {
return(TRUE)
testPoints <- c(.GlobalEnv$points_for_all_tests, testPoints)
return(testPoints)
}
28 changes: 28 additions & 0 deletions tmcRtestrunner/R/TestthatResultReader.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#Checks if all tests pass in testOutput
.CheckAllTestPassed <- function(testOutput) {
ret <- TRUE
for (test in testOutput) {
if (!.CheckIfTestPassed(test)) {
ret <- FALSE
break
}
}
return (ret)
}

#Checks if a single test passed
.CheckIfTestPassed <- function(test) {
ret <- TRUE
for (result in test$results) {
if (!.CheckIfResultPassed(result)) {
ret <- FALSE
break
}
}
return (ret)
}

#Check if a single result passed
.CheckIfResultPassed <- function(result) {
return(format(result) == "As expected")
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
[
{
"status": "pass",
"name": "RetTrue works.",
"message": "",
"backtrace": "",
"points": [
"r1",
"r1.1"
]
},
{
"status": "pass",
"name": "RetOne works.",
"message": "",
"backtrace": "",
"points": [
"r1",
"r1.2"
]
},
{
"status": "pass",
"name": "Add works.",
"message": "",
"backtrace": "",
"points": [
"r1",
"r1.3",
"r1.4"
]
}
]

Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
RetTrue <- function() {
return(TRUE)
}

RetOne <- function() {
return(1)
}

Add <- function(a, b) {
return(a + b)
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
Version: 1.0

RestoreWorkspace: Default
SaveWorkspace: Default
AlwaysSaveHistory: Default

EnableCodeIndexing: Yes
UseSpacesForTab: Yes
NumSpacesForTab: 2
Encoding: UTF-8

RnwWeave: Sweave
LaTeX: pdfLaTeX
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@

#Sets the points for all tests to global environment, wherefrom they can
#be retrieved.
pointsForAllTests <- function(points) {
.GlobalEnv$points_for_all_tests <- points
}

#The test that wraps around test_that()-method and stores the points
#to global environment.
test <- function(desc, points, code) {
.GlobalEnv$points[[desc]] <- points
test_that(desc, code)
}
Loading