Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions R/pkg/R/SQLContext.R
Original file line number Diff line number Diff line change
Expand Up @@ -634,7 +634,7 @@ tableNames <- function(x, ...) {
cacheTable.default <- function(tableName) {
sparkSession <- getSparkSession()
catalog <- callJMethod(sparkSession, "catalog")
callJMethod(catalog, "cacheTable", tableName)
invisible(callJMethod(catalog, "cacheTable", tableName))
}

cacheTable <- function(x, ...) {
Expand Down Expand Up @@ -663,7 +663,7 @@ cacheTable <- function(x, ...) {
uncacheTable.default <- function(tableName) {
sparkSession <- getSparkSession()
catalog <- callJMethod(sparkSession, "catalog")
callJMethod(catalog, "uncacheTable", tableName)
invisible(callJMethod(catalog, "uncacheTable", tableName))
}

uncacheTable <- function(x, ...) {
Expand All @@ -686,7 +686,7 @@ uncacheTable <- function(x, ...) {
clearCache.default <- function() {
sparkSession <- getSparkSession()
catalog <- callJMethod(sparkSession, "catalog")
callJMethod(catalog, "clearCache")
invisible(callJMethod(catalog, "clearCache"))
}

clearCache <- function() {
Expand Down Expand Up @@ -730,6 +730,7 @@ dropTempTable <- function(x, ...) {
#' If the view has been cached before, then it will also be uncached.
#'
#' @param viewName the name of the view to be dropped.
#' @return TRUE if the view is dropped successfully, FALSE otherwise.
#' @rdname dropTempView
#' @name dropTempView
#' @export
Expand Down
6 changes: 3 additions & 3 deletions R/pkg/R/context.R
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ objectFile <- function(sc, path, minPartitions = NULL) {
#' in the list are split into \code{numSlices} slices and distributed to nodes
#' in the cluster.
#'
#' If size of serialized slices is larger than spark.r.maxAllocationLimit or (200MB), the function
#' will write it to disk and send the file name to JVM. Also to make sure each slice is not
#' If size of serialized slices is larger than spark.r.maxAllocationLimit or (200MB), the function
#' will write it to disk and send the file name to JVM. Also to make sure each slice is not
#' larger than that limit, number of slices may be increased.
#'
#' @param sc SparkContext to use
Expand Down Expand Up @@ -379,5 +379,5 @@ spark.lapply <- function(list, func) {
#' @note setLogLevel since 2.0.0
setLogLevel <- function(level) {
sc <- getSparkContext()
callJMethod(sc, "setLogLevel", level)
invisible(callJMethod(sc, "setLogLevel", level))
}
6 changes: 3 additions & 3 deletions R/pkg/R/sparkR.R
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ sparkR.session <- function(
#' @method setJobGroup default
setJobGroup.default <- function(groupId, description, interruptOnCancel) {
sc <- getSparkContext()
callJMethod(sc, "setJobGroup", groupId, description, interruptOnCancel)
invisible(callJMethod(sc, "setJobGroup", groupId, description, interruptOnCancel))
}

setJobGroup <- function(sc, groupId, description, interruptOnCancel) {
Expand Down Expand Up @@ -457,7 +457,7 @@ setJobGroup <- function(sc, groupId, description, interruptOnCancel) {
#' @method clearJobGroup default
clearJobGroup.default <- function() {
sc <- getSparkContext()
callJMethod(sc, "clearJobGroup")
invisible(callJMethod(sc, "clearJobGroup"))
}

clearJobGroup <- function(sc) {
Expand All @@ -484,7 +484,7 @@ clearJobGroup <- function(sc) {
#' @method cancelJobGroup default
cancelJobGroup.default <- function(groupId) {
sc <- getSparkContext()
callJMethod(sc, "cancelJobGroup", groupId)
invisible(callJMethod(sc, "cancelJobGroup", groupId))
}

cancelJobGroup <- function(sc, groupId) {
Expand Down
14 changes: 7 additions & 7 deletions R/pkg/inst/tests/testthat/test_sparkSQL.R
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ test_that("test tableNames and tables", {
tables <- tables()
expect_equal(count(tables), 2)
suppressWarnings(dropTempTable("table1"))
dropTempView("table2")
expect_true(dropTempView("table2"))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have a bunch of other places where we call dropTempView - Might be good to add this to those places which we think are testing temp views ?


tables <- tables()
expect_equal(count(tables), 0)
Expand All @@ -589,7 +589,7 @@ test_that(
newdf <- sql("SELECT * FROM table1 where name = 'Michael'")
expect_is(newdf, "SparkDataFrame")
expect_equal(count(newdf), 1)
dropTempView("table1")
expect_true(dropTempView("table1"))

createOrReplaceTempView(df, "dfView")
sqlCast <- collect(sql("select cast('2' as decimal) as x from dfView limit 1"))
Expand All @@ -600,7 +600,7 @@ test_that(
expect_equal(ncol(sqlCast), 1)
expect_equal(out[1], " x")
expect_equal(out[2], "1 2")
dropTempView("dfView")
expect_true(dropTempView("dfView"))
})

test_that("test cache, uncache and clearCache", {
Expand All @@ -609,7 +609,7 @@ test_that("test cache, uncache and clearCache", {
cacheTable("table1")
uncacheTable("table1")
clearCache()
dropTempView("table1")
expect_true(dropTempView("table1"))
})

test_that("insertInto() on a registered table", {
Expand All @@ -630,13 +630,13 @@ test_that("insertInto() on a registered table", {
insertInto(dfParquet2, "table1")
expect_equal(count(sql("select * from table1")), 5)
expect_equal(first(sql("select * from table1 order by age"))$name, "Michael")
dropTempView("table1")
expect_true(dropTempView("table1"))

createOrReplaceTempView(dfParquet, "table1")
insertInto(dfParquet2, "table1", overwrite = TRUE)
expect_equal(count(sql("select * from table1")), 2)
expect_equal(first(sql("select * from table1 order by age"))$name, "Bob")
dropTempView("table1")
expect_true(dropTempView("table1"))

unlink(jsonPath2)
unlink(parquetPath2)
Expand All @@ -650,7 +650,7 @@ test_that("tableToDF() returns a new DataFrame", {
expect_equal(count(tabledf), 3)
tabledf2 <- tableToDF("table1")
expect_equal(count(tabledf2), 3)
dropTempView("table1")
expect_true(dropTempView("table1"))
})

test_that("toRDD() returns an RRDD", {
Expand Down