Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into limit-pushdown-2
Browse files Browse the repository at this point in the history
  • Loading branch information
JoshRosen committed Feb 13, 2016
2 parents c28343f + 2228f07 commit ac3e978
Show file tree
Hide file tree
Showing 133 changed files with 5,162 additions and 1,072 deletions.
2 changes: 1 addition & 1 deletion R/pkg/R/serialize.R
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ writeObject <- function(con, object, writeType = TRUE) {
# passing in vectors as arrays and instead require arrays to be passed
# as lists.
type <- class(object)[[1]] # class of POSIXlt is c("POSIXlt", "POSIXt")
# Checking types is needed here, since is.na only handles atomic vectors,
# Checking types is needed here, since 'is.na' only handles atomic vectors,
# lists and pairlists
if (type %in% c("integer", "character", "logical", "double", "numeric")) {
if (is.na(object)) {
Expand Down
2 changes: 1 addition & 1 deletion conf/spark-env.sh.template
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
# - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G)
# - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G)
# - SPARK_YARN_APP_NAME, The name of your application (Default: Spark)
# - SPARK_YARN_QUEUE, The hadoop queue to use for allocation requests (Default: default)
# - SPARK_YARN_QUEUE, The hadoop queue to use for allocation requests (Default: 'default')
# - SPARK_YARN_DIST_FILES, Comma separated list of files to be distributed with the job.
# - SPARK_YARN_DIST_ARCHIVES, Comma separated list of archives to be distributed with the job.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ $(document).ready(function() {
{name: 'seventh'},
{name: 'eighth'},
],
"autoWidth": false
};

var rowGroupConf = {
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion core/src/main/scala/org/apache/spark/SecurityManager.scala
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ import org.apache.spark.util.Utils
*
* - HTTP for broadcast and file server (via HttpServer) -> Spark currently uses Jetty
* for the HttpServer. Jetty supports multiple authentication mechanisms -
* Basic, Digest, Form, Spengo, etc. It also supports multiple different login
* Basic, Digest, Form, Spnego, etc. It also supports multiple different login
* services - Hash, JAAS, Spnego, JDBC, etc. Spark currently uses the HashLoginService
* to authenticate using DIGEST-MD5 via a single user and the shared secret.
* Since we are using DIGEST-MD5, the shared secret is not passed on the wire
Expand Down
5 changes: 2 additions & 3 deletions core/src/main/scala/org/apache/spark/SparkEnv.scala
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ import org.apache.spark.network.netty.NettyBlockTransferService
import org.apache.spark.rpc.{RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.{LiveListenerBus, OutputCommitCoordinator}
import org.apache.spark.scheduler.OutputCommitCoordinator.OutputCommitCoordinatorEndpoint
import org.apache.spark.serializer.Serializer
import org.apache.spark.serializer.{JavaSerializer, Serializer}
import org.apache.spark.shuffle.ShuffleManager
import org.apache.spark.storage._
import org.apache.spark.util.{RpcUtils, Utils}
Expand Down Expand Up @@ -277,8 +277,7 @@ object SparkEnv extends Logging {
"spark.serializer", "org.apache.spark.serializer.JavaSerializer")
logDebug(s"Using serializer: ${serializer.getClass}")

val closureSerializer = instantiateClassFromConf[Serializer](
"spark.closure.serializer", "org.apache.spark.serializer.JavaSerializer")
val closureSerializer = new JavaSerializer(conf)

def registerOrLookupEndpoint(
name: String, endpointCreator: => RpcEndpoint):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -774,6 +774,16 @@ class JavaSparkContext(val sc: SparkContext)

/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs(): Unit = sc.cancelAllJobs()

/**
* Returns an Java map of JavaRDDs that have marked themselves as persistent via cache() call.
* Note that this does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: JMap[java.lang.Integer, JavaRDD[_]] = {
sc.getPersistentRDDs.mapValues(s => JavaRDD.fromRDD(s))
.asJava.asInstanceOf[JMap[java.lang.Integer, JavaRDD[_]]]
}

}

object JavaSparkContext {
Expand Down
Loading

0 comments on commit ac3e978

Please sign in to comment.