Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
gweidner committed May 6, 2015
2 parents 9790cbe + 845d1d4 commit 4a35c20
Show file tree
Hide file tree
Showing 244 changed files with 37,711 additions and 17,897 deletions.
7 changes: 0 additions & 7 deletions .rat-excludes
Expand Up @@ -74,12 +74,5 @@ logs
.*scalastyle-output.xml
.*dependency-reduced-pom.xml
known_translations
json_expectation
local-1422981759269/*
local-1422981780767/*
local-1425081759269/*
local-1426533911241/*
local-1426633911242/*
local-1427397477963/*
DESCRIPTION
NAMESPACE
30 changes: 30 additions & 0 deletions LICENSE
Expand Up @@ -643,6 +643,36 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

========================================================================
For d3 (core/src/main/resources/org/apache/spark/ui/static/d3.min.js):
========================================================================

Copyright (c) 2010-2015, Michael Bostock
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.

* The name Michael Bostock may not be used to endorse or promote products
derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

========================================================================
For Scala Interpreter classes (all .scala files in repl/src/main/scala
Expand Down
107 changes: 14 additions & 93 deletions R/pkg/NAMESPACE
@@ -1,117 +1,36 @@
#exportPattern("^[[:alpha:]]+")
exportClasses("RDD")
exportClasses("Broadcast")
exportMethods(
"aggregateByKey",
"aggregateRDD",
"cache",
"cartesian",
"checkpoint",
"coalesce",
"cogroup",
"collect",
"collectAsMap",
"collectPartition",
"combineByKey",
"count",
"countByKey",
"countByValue",
"distinct",
"Filter",
"filterRDD",
"first",
"flatMap",
"flatMapValues",
"fold",
"foldByKey",
"foreach",
"foreachPartition",
"fullOuterJoin",
"glom",
"groupByKey",
"intersection",
"join",
"keyBy",
"keys",
"length",
"lapply",
"lapplyPartition",
"lapplyPartitionsWithIndex",
"leftOuterJoin",
"lookup",
"map",
"mapPartitions",
"mapPartitionsWithIndex",
"mapValues",
"maximum",
"minimum",
"numPartitions",
"partitionBy",
"persist",
"pipeRDD",
"reduce",
"reduceByKey",
"reduceByKeyLocally",
"repartition",
"rightOuterJoin",
"sampleByKey",
"sampleRDD",
"saveAsTextFile",
"saveAsObjectFile",
"sortBy",
"sortByKey",
"subtract",
"subtractByKey",
"sumRDD",
"take",
"takeOrdered",
"takeSample",
"top",
"unionRDD",
"unpersist",
"value",
"values",
"zipPartitions",
"zipRDD",
"zipWithIndex",
"zipWithUniqueId"
)
# Imports from base R
importFrom(methods, setGeneric, setMethod, setOldClass)
useDynLib(SparkR, stringHashCode)

# S3 methods exported
export(
"textFile",
"objectFile",
"parallelize",
"hashCode",
"includePackage",
"broadcast",
"setBroadcastValue",
"setCheckpointDir"
)
export("sparkR.init")
export("sparkR.stop")
export("print.jobj")
useDynLib(SparkR, stringHashCode)
importFrom(methods, setGeneric, setMethod, setOldClass)

# SparkRSQL

exportClasses("DataFrame")

exportMethods("columns",
exportMethods("cache",
"collect",
"columns",
"count",
"describe",
"distinct",
"dtypes",
"except",
"explain",
"filter",
"first",
"groupBy",
"head",
"insertInto",
"intersect",
"isLocal",
"join",
"length",
"limit",
"orderBy",
"names",
"persist",
"printSchema",
"registerTempTable",
"repartition",
Expand All @@ -125,9 +44,11 @@ exportMethods("columns",
"show",
"showDF",
"sortDF",
"take",
"toJSON",
"toRDD",
"unionAll",
"unpersist",
"where",
"withColumn",
"withColumnRenamed")
Expand Down
37 changes: 37 additions & 0 deletions R/pkg/R/DataFrame.R
Expand Up @@ -1276,3 +1276,40 @@ setMethod("saveAsTable",
callJMethod(df@sdf, "saveAsTable", tableName, source, jmode, options)
})

#' describe
#'
#' Computes statistics for numeric columns.
#' If no columns are given, this function computes statistics for all numerical columns.
#'
#' @param x A DataFrame to be computed.
#' @param col A string of name
#' @param ... Additional expressions
#' @return A DataFrame
#' @rdname describe
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlCtx <- sparkRSQL.init(sc)
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlCtx, path)
#' describe(df)
#' describe(df, "col1")
#' describe(df, "col1", "col2")
#' }
setMethod("describe",
signature(x = "DataFrame", col = "character"),
function(x, col, ...) {
colList <- list(col, ...)
sdf <- callJMethod(x@sdf, "describe", listToSeq(colList))
dataFrame(sdf)
})

#' @rdname describe
setMethod("describe",
signature(x = "DataFrame"),
function(x) {
colList <- as.list(c(columns(x)))
sdf <- callJMethod(x@sdf, "describe", listToSeq(colList))
dataFrame(sdf)
})
10 changes: 5 additions & 5 deletions R/pkg/R/RDD.R
Expand Up @@ -797,7 +797,7 @@ setMethod("first",
#' @aliases distinct,RDD-method
setMethod("distinct",
signature(x = "RDD"),
function(x, numPartitions = SparkR::numPartitions(x)) {
function(x, numPartitions = SparkR:::numPartitions(x)) {
identical.mapped <- lapply(x, function(x) { list(x, NULL) })
reduced <- reduceByKey(identical.mapped,
function(x, y) { x },
Expand Down Expand Up @@ -993,7 +993,7 @@ setMethod("coalesce",
signature(x = "RDD", numPartitions = "numeric"),
function(x, numPartitions, shuffle = FALSE) {
numPartitions <- numToInt(numPartitions)
if (shuffle || numPartitions > SparkR::numPartitions(x)) {
if (shuffle || numPartitions > SparkR:::numPartitions(x)) {
func <- function(partIndex, part) {
set.seed(partIndex) # partIndex as seed
start <- as.integer(sample(numPartitions, 1) - 1)
Expand Down Expand Up @@ -1078,7 +1078,7 @@ setMethod("saveAsTextFile",
#' @aliases sortBy,RDD,RDD-method
setMethod("sortBy",
signature(x = "RDD", func = "function"),
function(x, func, ascending = TRUE, numPartitions = SparkR::numPartitions(x)) {
function(x, func, ascending = TRUE, numPartitions = SparkR:::numPartitions(x)) {
values(sortByKey(keyBy(x, func), ascending, numPartitions))
})

Expand Down Expand Up @@ -1552,7 +1552,7 @@ setMethod("cartesian",
#' @aliases subtract,RDD
setMethod("subtract",
signature(x = "RDD", other = "RDD"),
function(x, other, numPartitions = SparkR::numPartitions(x)) {
function(x, other, numPartitions = SparkR:::numPartitions(x)) {
mapFunction <- function(e) { list(e, NA) }
rdd1 <- map(x, mapFunction)
rdd2 <- map(other, mapFunction)
Expand Down Expand Up @@ -1583,7 +1583,7 @@ setMethod("subtract",
#' @aliases intersection,RDD
setMethod("intersection",
signature(x = "RDD", other = "RDD"),
function(x, other, numPartitions = SparkR::numPartitions(x)) {
function(x, other, numPartitions = SparkR:::numPartitions(x)) {
rdd1 <- map(x, function(v) { list(v, NA) })
rdd2 <- map(other, function(v) { list(v, NA) })

Expand Down
4 changes: 4 additions & 0 deletions R/pkg/R/generics.R
Expand Up @@ -384,6 +384,10 @@ setGeneric("value", function(bcast) { standardGeneric("value") })
#' @export
setGeneric("columns", function(x) {standardGeneric("columns") })

#' @rdname describe
#' @export
setGeneric("describe", function(x, col, ...) { standardGeneric("describe") })

#' @rdname schema
#' @export
setGeneric("dtypes", function(x) { standardGeneric("dtypes") })
Expand Down
4 changes: 2 additions & 2 deletions R/pkg/R/pairRDD.R
Expand Up @@ -739,7 +739,7 @@ setMethod("cogroup",
#' @aliases sortByKey,RDD,RDD-method
setMethod("sortByKey",
signature(x = "RDD"),
function(x, ascending = TRUE, numPartitions = SparkR::numPartitions(x)) {
function(x, ascending = TRUE, numPartitions = SparkR:::numPartitions(x)) {
rangeBounds <- list()

if (numPartitions > 1) {
Expand Down Expand Up @@ -806,7 +806,7 @@ setMethod("sortByKey",
#' @aliases subtractByKey,RDD
setMethod("subtractByKey",
signature(x = "RDD", other = "RDD"),
function(x, other, numPartitions = SparkR::numPartitions(x)) {
function(x, other, numPartitions = SparkR:::numPartitions(x)) {
filterFunction <- function(elem) {
iters <- elem[[2]]
(length(iters[[1]]) > 0) && (length(iters[[2]]) == 0)
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/inst/tests/test_broadcast.R
Expand Up @@ -29,7 +29,7 @@ test_that("using broadcast variable", {
randomMatBr <- broadcast(sc, randomMat)

useBroadcast <- function(x) {
sum(value(randomMatBr) * x)
sum(SparkR:::value(randomMatBr) * x)
}
actual <- collect(lapply(rrdd, useBroadcast))
expected <- list(sum(randomMat) * 1, sum(randomMat) * 2)
Expand Down
11 changes: 11 additions & 0 deletions R/pkg/inst/tests/test_sparkSQL.R
Expand Up @@ -705,5 +705,16 @@ test_that("parquetFile works with multiple input paths", {
expect_true(count(parquetDF) == count(df)*2)
})

test_that("describe() on a DataFrame", {
df <- jsonFile(sqlCtx, jsonPath)
stats <- describe(df, "age")
expect_true(collect(stats)[1, "summary"] == "count")
expect_true(collect(stats)[2, "age"] == 24.5)
expect_true(collect(stats)[3, "age"] == 5.5)
stats <- describe(df)
expect_true(collect(stats)[4, "name"] == "Andy")
expect_true(collect(stats)[5, "age"] == 30.0)
})

unlink(parquetPath)
unlink(jsonPath)
5 changes: 4 additions & 1 deletion R/pkg/inst/tests/test_utils.R
Expand Up @@ -92,7 +92,10 @@ test_that("cleanClosure on R functions", {
}
newF <- cleanClosure(f)
env <- environment(newF)
expect_equal(length(ls(env)), 3) # Only "g", "l" and "f". No "base", "field" or "defUse".
# TODO(shivaram): length(ls(env)) is 4 here for some reason and `lapply` is included in `env`.
# Disabling this test till we debug this.
#
# expect_equal(length(ls(env)), 3) # Only "g", "l" and "f". No "base", "field" or "defUse".
expect_true("g" %in% ls(env))
expect_true("l" %in% ls(env))
expect_true("f" %in% ls(env))
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/inst/worker/worker.R
Expand Up @@ -72,7 +72,7 @@ if (numBroadcastVars > 0) {
for (bcast in seq(1:numBroadcastVars)) {
bcastId <- SparkR:::readInt(inputCon)
value <- unserialize(SparkR:::readRaw(inputCon))
setBroadcastValue(bcastId, value)
SparkR:::setBroadcastValue(bcastId, value)
}
}

Expand Down
8 changes: 0 additions & 8 deletions core/pom.xml
Expand Up @@ -228,14 +228,6 @@
<artifactId>json4s-jackson_${scala.binary.version}</artifactId>
<version>3.2.10</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.mesos</groupId>
<artifactId>mesos</artifactId>
Expand Down
8 changes: 1 addition & 7 deletions core/src/main/java/org/apache/spark/JobExecutionStatus.java
Expand Up @@ -17,15 +17,9 @@

package org.apache.spark;

import org.apache.spark.util.EnumUtil;

public enum JobExecutionStatus {
RUNNING,
SUCCEEDED,
FAILED,
UNKNOWN;

public static JobExecutionStatus fromString(String str) {
return EnumUtil.parseIgnoreCase(JobExecutionStatus.class, str);
}
UNKNOWN
}

0 comments on commit 4a35c20

Please sign in to comment.