Skip to content

Commit

Permalink
[SPARK-9053] [SPARKR] Fix spaces around parens, infix operators etc.
Browse files Browse the repository at this point in the history
### JIRA
[[SPARK-9053] Fix spaces around parens, infix operators etc. - ASF JIRA](https://issues.apache.org/jira/browse/SPARK-9053)

### The Result of `lint-r`
[The result of lint-r at the rivision:a4c83cb1e4b066cd60264b6572fd3e51d160d26a](https://gist.github.com/yu-iskw/d253d7f8ef351f86443d)

Author: Yu ISHIKAWA <yuu.ishikawa@gmail.com>

Closes apache#7584 from yu-iskw/SPARK-9053 and squashes the following commits:

613170f [Yu ISHIKAWA] Ignore a warning about a space before a left parentheses
ede61e1 [Yu ISHIKAWA] Ignores two warnings about a space before a left parentheses. TODO: After updating `lintr`, we will remove the ignores
de3e0db [Yu ISHIKAWA] Add '## nolint start' & '## nolint end' statement to ignore infix space warnings
e233ea8 [Yu ISHIKAWA] [SPARK-9053][SparkR] Fix spaces around parens, infix operators etc.
  • Loading branch information
yu-iskw authored and shivaram committed Jul 31, 2015
1 parent 6bba750 commit fc0e57e
Show file tree
Hide file tree
Showing 9 changed files with 21 additions and 12 deletions.
4 changes: 4 additions & 0 deletions R/pkg/R/DataFrame.R
Original file line number Diff line number Diff line change
Expand Up @@ -1322,9 +1322,11 @@ setMethod("write.df",
"org.apache.spark.sql.parquet")
}
allModes <- c("append", "overwrite", "error", "ignore")
# nolint start
if (!(mode %in% allModes)) {
stop('mode should be one of "append", "overwrite", "error", "ignore"')
}
# nolint end
jmode <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "saveMode", mode)
options <- varargsToEnv(...)
if (!is.null(path)) {
Expand Down Expand Up @@ -1384,9 +1386,11 @@ setMethod("saveAsTable",
"org.apache.spark.sql.parquet")
}
allModes <- c("append", "overwrite", "error", "ignore")
# nolint start
if (!(mode %in% allModes)) {
stop('mode should be one of "append", "overwrite", "error", "ignore"')
}
# nolint end
jmode <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "saveMode", mode)
options <- varargsToEnv(...)
callJMethod(df@sdf, "saveAsTable", tableName, source, jmode, options)
Expand Down
7 changes: 5 additions & 2 deletions R/pkg/R/RDD.R
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,9 @@ setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val)

isPipelinable <- function(rdd) {
e <- rdd@env
# nolint start
!(e$isCached || e$isCheckpointed)
# nolint end
}

if (!inherits(prev, "PipelinedRDD") || !isPipelinable(prev)) {
Expand All @@ -97,7 +99,8 @@ setMethod("initialize", "PipelinedRDD", function(.Object, prev, func, jrdd_val)
# prev_serializedMode is used during the delayed computation of JRDD in getJRDD
} else {
pipelinedFunc <- function(partIndex, part) {
func(partIndex, prev@func(partIndex, part))
f <- prev@func
func(partIndex, f(partIndex, part))
}
.Object@func <- cleanClosure(pipelinedFunc)
.Object@prev_jrdd <- prev@prev_jrdd # maintain the pipeline
Expand Down Expand Up @@ -841,7 +844,7 @@ setMethod("sampleRDD",
if (withReplacement) {
count <- rpois(1, fraction)
if (count > 0) {
res[(len + 1):(len + count)] <- rep(list(elem), count)
res[ (len + 1) : (len + count) ] <- rep(list(elem), count)
len <- len + count
}
} else {
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/R/column.R
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ functions <- c("min", "max", "sum", "avg", "mean", "count", "abs", "sqrt",
"acos", "asin", "atan", "cbrt", "ceiling", "cos", "cosh", "exp",
"expm1", "floor", "log", "log10", "log1p", "rint", "sign",
"sin", "sinh", "tan", "tanh", "toDegrees", "toRadians")
binary_mathfunctions<- c("atan2", "hypot")
binary_mathfunctions <- c("atan2", "hypot")

createOperator <- function(op) {
setMethod(op,
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/R/context.R
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ parallelize <- function(sc, coll, numSlices = 1) {
numSlices <- length(coll)

sliceLen <- ceiling(length(coll) / numSlices)
slices <- split(coll, rep(1:(numSlices + 1), each = sliceLen)[1:length(coll)])
slices <- split(coll, rep(1: (numSlices + 1), each = sliceLen)[1:length(coll)])

# Serialize each slice: obtain a list of raws, or a list of lists (slices) of
# 2-tuples of raws
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/R/pairRDD.R
Original file line number Diff line number Diff line change
Expand Up @@ -879,7 +879,7 @@ setMethod("sampleByKey",
if (withReplacement) {
count <- rpois(1, frac)
if (count > 0) {
res[(len + 1):(len + count)] <- rep(list(elem), count)
res[ (len + 1) : (len + count) ] <- rep(list(elem), count)
len <- len + count
}
} else {
Expand Down
4 changes: 2 additions & 2 deletions R/pkg/R/utils.R
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ convertJListToRList <- function(jList, flatten, logicalUpperBound = NULL,
}

results <- if (arrSize > 0) {
lapply(0:(arrSize - 1),
lapply(0 : (arrSize - 1),
function(index) {
obj <- callJMethod(jList, "get", as.integer(index))

Expand Down Expand Up @@ -572,7 +572,7 @@ mergePartitions <- function(rdd, zip) {
keys <- list()
}
if (lengthOfValues > 1) {
values <- part[(lengthOfKeys + 1) : (len - 1)]
values <- part[ (lengthOfKeys + 1) : (len - 1) ]
} else {
values <- list()
}
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/inst/tests/test_binary_function.R
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ test_that("union on two RDDs", {
expect_equal(actual, c(as.list(nums), mockFile))
expect_equal(getSerializedMode(union.rdd), "byte")

rdd<- map(text.rdd, function(x) {x})
rdd <- map(text.rdd, function(x) {x})
union.rdd <- unionRDD(rdd, text.rdd)
actual <- collect(union.rdd)
expect_equal(actual, as.list(c(mockFile, mockFile)))
Expand Down
6 changes: 3 additions & 3 deletions R/pkg/inst/tests/test_rdd.R
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ test_that("flatMapValues() on pairwise RDDs", {
expect_equal(actual, list(list(1,1), list(1,2), list(2,3), list(2,4)))

# Generate x to x+1 for every value
actual <- collect(flatMapValues(intRdd, function(x) { x:(x + 1) }))
actual <- collect(flatMapValues(intRdd, function(x) { x: (x + 1) }))
expect_equal(actual,
list(list(1L, -1), list(1L, 0), list(2L, 100), list(2L, 101),
list(2L, 1), list(2L, 2), list(1L, 200), list(1L, 201)))
Expand Down Expand Up @@ -293,7 +293,7 @@ test_that("sumRDD() on RDDs", {
})

test_that("keyBy on RDDs", {
func <- function(x) { x*x }
func <- function(x) { x * x }
keys <- keyBy(rdd, func)
actual <- collect(keys)
expect_equal(actual, lapply(nums, function(x) { list(func(x), x) }))
Expand All @@ -311,7 +311,7 @@ test_that("repartition/coalesce on RDDs", {
r2 <- repartition(rdd, 6)
expect_equal(numPartitions(r2), 6L)
count <- length(collectPartition(r2, 0L))
expect_true(count >=0 && count <= 4)
expect_true(count >= 0 && count <= 4)

# coalesce
r3 <- coalesce(rdd, 1)
Expand Down
4 changes: 3 additions & 1 deletion R/pkg/inst/tests/test_sparkSQL.R
Original file line number Diff line number Diff line change
Expand Up @@ -666,10 +666,12 @@ test_that("column binary mathfunctions", {
expect_equal(collect(select(df, atan2(df$a, df$b)))[2, "ATAN2(a, b)"], atan2(2, 6))
expect_equal(collect(select(df, atan2(df$a, df$b)))[3, "ATAN2(a, b)"], atan2(3, 7))
expect_equal(collect(select(df, atan2(df$a, df$b)))[4, "ATAN2(a, b)"], atan2(4, 8))
## nolint start
expect_equal(collect(select(df, hypot(df$a, df$b)))[1, "HYPOT(a, b)"], sqrt(1^2 + 5^2))
expect_equal(collect(select(df, hypot(df$a, df$b)))[2, "HYPOT(a, b)"], sqrt(2^2 + 6^2))
expect_equal(collect(select(df, hypot(df$a, df$b)))[3, "HYPOT(a, b)"], sqrt(3^2 + 7^2))
expect_equal(collect(select(df, hypot(df$a, df$b)))[4, "HYPOT(a, b)"], sqrt(4^2 + 8^2))
## nolint end
})

test_that("string operators", {
Expand Down Expand Up @@ -876,7 +878,7 @@ test_that("parquetFile works with multiple input paths", {
write.df(df, parquetPath2, "parquet", mode="overwrite")
parquetDF <- parquetFile(sqlContext, parquetPath, parquetPath2)
expect_is(parquetDF, "DataFrame")
expect_equal(count(parquetDF), count(df)*2)
expect_equal(count(parquetDF), count(df) * 2)
})

test_that("describe() on a DataFrame", {
Expand Down

0 comments on commit fc0e57e

Please sign in to comment.