Permalink
Browse files

Merge remote-tracking branch 'upstream/master' into security-branch-0…

….9-with-client-rebase

Conflicts:
	core/src/main/scala/org/apache/spark/broadcast/HttpBroadcast.scala
	core/src/main/scala/org/apache/spark/deploy/client/TestClient.scala
	core/src/main/scala/org/apache/spark/deploy/master/Master.scala
	core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala
	core/src/main/scala/org/apache/spark/deploy/worker/ui/WorkerWebUI.scala
	core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala
	core/src/main/scala/org/apache/spark/metrics/MetricsSystem.scala
	core/src/main/scala/org/apache/spark/metrics/sink/ConsoleSink.scala
	core/src/main/scala/org/apache/spark/metrics/sink/CsvSink.scala
	core/src/main/scala/org/apache/spark/metrics/sink/JmxSink.scala
	core/src/main/scala/org/apache/spark/metrics/sink/MetricsServlet.scala
	core/src/main/scala/org/apache/spark/network/Connection.scala
	core/src/main/scala/org/apache/spark/network/ConnectionManager.scala
	core/src/main/scala/org/apache/spark/network/ReceiverTest.scala
	core/src/main/scala/org/apache/spark/network/SenderTest.scala
	core/src/main/scala/org/apache/spark/storage/BlockManager.scala
	core/src/main/scala/org/apache/spark/storage/ThreadingTest.scala
	core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
	core/src/main/scala/org/apache/spark/ui/SparkUI.scala
	core/src/main/scala/org/apache/spark/ui/jobs/JobProgressUI.scala
	core/src/main/scala/org/apache/spark/util/Utils.scala
	core/src/test/scala/org/apache/spark/metrics/MetricsSystemSuite.scala
	core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala
	core/src/test/scala/org/apache/spark/ui/UISuite.scala
	project/SparkBuild.scala
	yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
	yarn/stable/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
  • Loading branch information...
2 parents 13733e1 + cda381f commit ac046b30f41400ae0cb9de1af31996e3255cd1f1 @tgravescs tgravescs committed Mar 6, 2014
Showing 370 changed files with 4,455 additions and 2,284 deletions.
View
@@ -396,3 +396,35 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
+
+
+========================================================================
+For sbt and sbt-launch-lib.bash in sbt/:
+========================================================================
+
+// Generated from http://www.opensource.org/licenses/bsd-license.php
+Copyright (c) 2011, Paul Phillips.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the author nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
View
@@ -1,12 +1,12 @@
# Apache Spark
-Lightning-Fast Cluster Computing - <http://spark.incubator.apache.org/>
+Lightning-Fast Cluster Computing - <http://spark.apache.org/>
## Online Documentation
You can find the latest Spark documentation, including a programming
-guide, on the project webpage at <http://spark.incubator.apache.org/documentation.html>.
+guide, on the project webpage at <http://spark.apache.org/documentation.html>.
This README file only contains basic setup instructions.
@@ -92,21 +92,10 @@ If your project is built with Maven, add this to your POM file's `<dependencies>
## Configuration
-Please refer to the [Configuration guide](http://spark.incubator.apache.org/docs/latest/configuration.html)
+Please refer to the [Configuration guide](http://spark.apache.org/docs/latest/configuration.html)
in the online documentation for an overview on how to configure Spark.
-## Apache Incubator Notice
-
-Apache Spark is an effort undergoing incubation at The Apache Software
-Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of
-all newly accepted projects until a further review indicates that the
-infrastructure, communications, and decision making process have stabilized in
-a manner consistent with other successful ASF projects. While incubation status
-is not necessarily a reflection of the completeness or stability of the code,
-it does indicate that the project has yet to be fully endorsed by the ASF.
-
-
## Contributing to Spark
Contributions via GitHub pull requests are gladly accepted from their original
View
@@ -21,17 +21,20 @@
<parent>
<groupId>org.apache.spark</groupId>
<artifactId>spark-parent</artifactId>
- <version>1.0.0-incubating-SNAPSHOT</version>
+ <version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<groupId>org.apache.spark</groupId>
<artifactId>spark-assembly_2.10</artifactId>
<name>Spark Project Assembly</name>
- <url>http://spark.incubator.apache.org/</url>
+ <url>http://spark.apache.org/</url>
+ <packaging>pom</packaging>
<properties>
- <spark.jar>${project.build.directory}/scala-${scala.binary.version}/${project.artifactId}-${project.version}-hadoop${hadoop.version}.jar</spark.jar>
+ <spark.jar.dir>scala-${scala.binary.version}</spark.jar.dir>
+ <spark.jar.basename>${project.artifactId}-${project.version}-hadoop${hadoop.version}.jar</spark.jar.basename>
+ <spark.jar>${project.build.directory}/${spark.jar.dir}/${spark.jar.basename}</spark.jar>
<deb.pkg.name>spark</deb.pkg.name>
<deb.install.path>/usr/share/spark</deb.install.path>
<deb.user>root</deb.user>
@@ -55,6 +55,15 @@
<include>**/*</include>
</includes>
</fileSet>
+ <fileSet>
+ <directory>
+ ${project.parent.basedir}/assembly/target/${spark.jar.dir}
+ </directory>
+ <outputDirectory>/</outputDirectory>
+ <includes>
+ <include>${spark.jar.basename}</include>
+ </includes>
+ </fileSet>
</fileSets>
<dependencySets>
@@ -75,6 +84,8 @@
<excludes>
<exclude>org.apache.hadoop:*:jar</exclude>
<exclude>org.apache.spark:*:jar</exclude>
+ <exclude>org.apache.zookeeper:*:jar</exclude>
+ <exclude>org.apache.avro:*:jar</exclude>
</excludes>
</dependencySet>
</dependencySets>
View
@@ -21,15 +21,29 @@
<parent>
<groupId>org.apache.spark</groupId>
<artifactId>spark-parent</artifactId>
- <version>1.0.0-incubating-SNAPSHOT</version>
+ <version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<groupId>org.apache.spark</groupId>
<artifactId>spark-bagel_2.10</artifactId>
<packaging>jar</packaging>
<name>Spark Project Bagel</name>
- <url>http://spark.incubator.apache.org/</url>
+ <url>http://spark.apache.org/</url>
+
+ <profiles>
+ <profile>
+ <!-- SPARK-1121: SPARK-1121: Adds an explicit dependency on Avro to work around
+ a Hadoop 0.23.X issue -->
+ <id>yarn-alpha</id>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro</artifactId>
+ </dependency>
+ </dependencies>
+ </profile>
+ </profiles>
<dependencies>
<dependency>
@@ -27,7 +27,7 @@ object Bagel extends Logging {
/**
* Runs a Bagel program.
- * @param sc [[org.apache.spark.SparkContext]] to use for the program.
+ * @param sc org.apache.spark.SparkContext to use for the program.
* @param vertices vertices of the graph represented as an RDD of (Key, Vertex) pairs. Often the
* Key will be the vertex id.
* @param messages initial set of messages represented as an RDD of (Key, Message) pairs. Often
@@ -38,10 +38,10 @@ object Bagel extends Logging {
* @param aggregator [[org.apache.spark.bagel.Aggregator]] performs a reduce across all vertices
* after each superstep and provides the result to each vertex in the next
* superstep.
- * @param partitioner [[org.apache.spark.Partitioner]] partitions values by key
+ * @param partitioner org.apache.spark.Partitioner partitions values by key
* @param numPartitions number of partitions across which to split the graph.
* Default is the default parallelism of the SparkContext
- * @param storageLevel [[org.apache.spark.storage.StorageLevel]] to use for caching of
+ * @param storageLevel org.apache.spark.storage.StorageLevel to use for caching of
* intermediate RDDs in each superstep. Defaults to caching in memory.
* @param compute function that takes a Vertex, optional set of (possibly combined) messages to
* the Vertex, optional Aggregator and the current superstep,
@@ -131,7 +131,7 @@ object Bagel extends Logging {
/**
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]], default
- * [[org.apache.spark.HashPartitioner]] and default storage level
+ * org.apache.spark.HashPartitioner and default storage level
*/
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest](
sc: SparkContext,
@@ -146,7 +146,7 @@ object Bagel extends Logging {
/**
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]] and the
- * default [[org.apache.spark.HashPartitioner]]
+ * default org.apache.spark.HashPartitioner
*/
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest](
sc: SparkContext,
@@ -166,7 +166,7 @@ object Bagel extends Logging {
/**
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]],
- * default [[org.apache.spark.HashPartitioner]],
+ * default org.apache.spark.HashPartitioner,
* [[org.apache.spark.bagel.DefaultCombiner]] and the default storage level
*/
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest](
@@ -180,7 +180,7 @@ object Bagel extends Logging {
/**
* Runs a Bagel program with no [[org.apache.spark.bagel.Aggregator]],
- * the default [[org.apache.spark.HashPartitioner]]
+ * the default org.apache.spark.HashPartitioner
* and [[org.apache.spark.bagel.DefaultCombiner]]
*/
def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest](
@@ -19,3 +19,4 @@
# - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT
# - SPARK_WORKER_INSTANCES, to set the number of worker processes per node
# - SPARK_WORKER_DIR, to set the working directory of worker processes
+# - SPARK_PUBLIC_DNS, to set the public dns name of the master
View
@@ -21,15 +21,29 @@
<parent>
<groupId>org.apache.spark</groupId>
<artifactId>spark-parent</artifactId>
- <version>1.0.0-incubating-SNAPSHOT</version>
+ <version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.10</artifactId>
<packaging>jar</packaging>
<name>Spark Project Core</name>
- <url>http://spark.incubator.apache.org/</url>
+ <url>http://spark.apache.org/</url>
+
+ <!-- SPARK-1121: SPARK-1121: Adds an explicit dependency on Avro to work around
+ a Hadoop 0.23.X issue -->
+ <profiles>
+ <profile>
+ <id>yarn-alpha</id>
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro</artifactId>
+ </dependency>
+ </dependencies>
+ </profile>
+ </profiles>
<dependencies>
<dependency>
@@ -39,18 +53,16 @@
<dependency>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
- <groupId>org.apache.avro</groupId>
- <artifactId>avro</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.avro</groupId>
- <artifactId>avro-ipc</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper</artifactId>
+ <groupId>org.apache.curator</groupId>
+ <artifactId>curator-recipes</artifactId>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
@@ -81,6 +93,22 @@
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>jul-to-slf4j</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>jcl-over-slf4j</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>log4j</groupId>
+ <artifactId>log4j</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ </dependency>
+ <dependency>
<groupId>com.ning</groupId>
<artifactId>compress-lzf</artifactId>
</dependency>
@@ -124,8 +152,18 @@
<artifactId>scala-library</artifactId>
</dependency>
<dependency>
- <groupId>net.liftweb</groupId>
- <artifactId>lift-json_${scala.binary.version}</artifactId>
+ <groupId>org.json4s</groupId>
+ <artifactId>json4s-jackson_${scala.binary.version}</artifactId>
+ <version>3.2.6</version>
+ <!-- see also exclusion for lift-json; this is necessary since it depends on
+ scala-library and scalap 2.10.0, but we use 2.10.3, and only override
+ scala-library -->
+ <exclusions>
+ <exclusion>
+ <groupId>org.scala-lang</groupId>
+ <artifactId>scalap</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
<groupId>it.unimi.dsi</groupId>
@@ -144,10 +182,6 @@
<artifactId>netty-all</artifactId>
</dependency>
<dependency>
- <groupId>log4j</groupId>
- <artifactId>log4j</artifactId>
- </dependency>
- <dependency>
<groupId>com.clearspring.analytics</groupId>
<artifactId>stream</artifactId>
</dependency>
@@ -206,11 +240,6 @@
<artifactId>junit-interface</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.slf4j</groupId>
- <artifactId>slf4j-log4j12</artifactId>
- <scope>test</scope>
- </dependency>
</dependencies>
<build>
<outputDirectory>target/scala-${scala.binary.version}/classes</outputDirectory>
@@ -227,7 +256,7 @@
</goals>
<configuration>
<exportAntProperties>true</exportAntProperties>
- <tasks>
+ <target>
<property name="spark.classpath" refid="maven.test.classpath" />
<property environment="env" />
<fail message="Please set the SCALA_HOME (or SCALA_LIBRARY_PATH if scala is on the path) environment variables and retry.">
@@ -240,7 +269,7 @@
</not>
</condition>
</fail>
- </tasks>
+ </target>
</configuration>
</execution>
</executions>
@@ -23,17 +23,17 @@
* Expose some commonly useful storage level constants.
*/
public class StorageLevels {
- public static final StorageLevel NONE = new StorageLevel(false, false, false, 1);
- public static final StorageLevel DISK_ONLY = new StorageLevel(true, false, false, 1);
- public static final StorageLevel DISK_ONLY_2 = new StorageLevel(true, false, false, 2);
- public static final StorageLevel MEMORY_ONLY = new StorageLevel(false, true, true, 1);
- public static final StorageLevel MEMORY_ONLY_2 = new StorageLevel(false, true, true, 2);
- public static final StorageLevel MEMORY_ONLY_SER = new StorageLevel(false, true, false, 1);
- public static final StorageLevel MEMORY_ONLY_SER_2 = new StorageLevel(false, true, false, 2);
- public static final StorageLevel MEMORY_AND_DISK = new StorageLevel(true, true, true, 1);
- public static final StorageLevel MEMORY_AND_DISK_2 = new StorageLevel(true, true, true, 2);
- public static final StorageLevel MEMORY_AND_DISK_SER = new StorageLevel(true, true, false, 1);
- public static final StorageLevel MEMORY_AND_DISK_SER_2 = new StorageLevel(true, true, false, 2);
+ public static final StorageLevel NONE = create(false, false, false, 1);
+ public static final StorageLevel DISK_ONLY = create(true, false, false, 1);
+ public static final StorageLevel DISK_ONLY_2 = create(true, false, false, 2);
+ public static final StorageLevel MEMORY_ONLY = create(false, true, true, 1);
+ public static final StorageLevel MEMORY_ONLY_2 = create(false, true, true, 2);
+ public static final StorageLevel MEMORY_ONLY_SER = create(false, true, false, 1);
+ public static final StorageLevel MEMORY_ONLY_SER_2 = create(false, true, false, 2);
+ public static final StorageLevel MEMORY_AND_DISK = create(true, true, true, 1);
+ public static final StorageLevel MEMORY_AND_DISK_2 = create(true, true, true, 2);
+ public static final StorageLevel MEMORY_AND_DISK_SER = create(true, true, false, 1);
+ public static final StorageLevel MEMORY_AND_DISK_SER_2 = create(true, true, false, 2);
/**
* Create a new StorageLevel object.
Oops, something went wrong.

0 comments on commit ac046b3

Please sign in to comment.