Skip to content

Commit

Permalink
Fixed all non-deprecation warnings.
Browse files Browse the repository at this point in the history
  • Loading branch information
rxin committed Jul 22, 2015
1 parent 78660ac commit 87c354a
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 9 deletions.
7 changes: 4 additions & 3 deletions core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -94,13 +94,14 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[_ <: Product2[K, _]]], part:
}

override def getDependencies: Seq[Dependency[_]] = {
rdds.map { rdd: RDD[_ <: Product2[K, _]]@unchecked =>
rdds.map { rdd: RDD[_] =>
if (rdd.partitioner == Some(part)) {
logDebug("Adding one-to-one dependency with " + rdd)
new OneToOneDependency(rdd)
} else {
logDebug("Adding shuffle dependency with " + rdd)
new ShuffleDependency[K, Any, CoGroupCombiner](rdd, part, serializer)
new ShuffleDependency[K, Any, CoGroupCombiner](
rdd.asInstanceOf[RDD[_ <: Product2[K, _]]], part, serializer)
}
}
}
Expand Down Expand Up @@ -133,7 +134,7 @@ class CoGroupedRDD[K](@transient var rdds: Seq[RDD[_ <: Product2[K, _]]], part:
// A list of (rdd iterator, dependency number) pairs
val rddIterators = new ArrayBuffer[(Iterator[Product2[K, Any]], Int)]
for ((dep, depNum) <- dependencies.zipWithIndex) dep match {
case oneToOneDependency: OneToOneDependency[Product2[K, Any]] =>
case oneToOneDependency: OneToOneDependency[Product2[K, Any]] @unchecked =>
val dependencyPartition = split.narrowDeps(depNum).get.split
// Read them from the parent
val it = oneToOneDependency.rdd.iterator(dependencyPartition, context)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.mllib.stat.test
import scala.annotation.varargs

import org.apache.commons.math3.distribution.{NormalDistribution, RealDistribution}
import org.apache.commons.math3.stat.inference.KolmogorovSmirnovTest
import org.apache.commons.math3.stat.inference.{KolmogorovSmirnovTest => CommonMathKolmogorovSmirnovTest}

import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
Expand Down Expand Up @@ -187,7 +187,7 @@ private[stat] object KolmogorovSmirnovTest extends Logging {
}

private def evalOneSampleP(ksStat: Double, n: Long): KolmogorovSmirnovTestResult = {
val pval = 1 - new KolmogorovSmirnovTest().cdf(ksStat, n.toInt)
val pval = 1 - new CommonMathKolmogorovSmirnovTest().cdf(ksStat, n.toInt)
new KolmogorovSmirnovTestResult(pval, ksStat, NullHypothesis.OneSampleTwoSided.toString)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import java.util.{Map => JavaMap}
import javax.annotation.Nullable

import scala.collection.mutable.HashMap
import scala.language.existentials

import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions._
Expand Down Expand Up @@ -401,7 +402,7 @@ object CatalystTypeConverters {
case seq: Seq[Any] => seq.map(convertToCatalyst)
case r: Row => InternalRow(r.toSeq.map(convertToCatalyst): _*)
case arr: Array[Any] => arr.toSeq.map(convertToCatalyst).toArray
case m: Map[Any, Any] =>
case m: Map[_, _] =>
m.map { case (k, v) => (convertToCatalyst(k), convertToCatalyst(v)) }.toMap
case other => other
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,9 @@ final class DataFrameWriter private[sql](df: DataFrame) {
// the table. But, insertInto with Overwrite requires the schema of data be the same
// the schema of the table.
insertInto(tableName)

case SaveMode.Overwrite =>
throw new UnsupportedOperationException("overwrite mode unsupported.")
}
} else {
val cmd =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ private[orc] object OrcFilters extends Logging {
private def buildSearchArgument(expression: Filter, builder: Builder): Option[Builder] = {
def newBuilder = SearchArgument.FACTORY.newBuilder()

def isSearchableLiteral(value: Any) = value match {
def isSearchableLiteral(value: Any): Boolean = value match {
// These are types recognized by the `SearchArgumentImpl.BuilderImpl.boxLiteral()` method.
case _: String | _: Long | _: Double | _: DateWritable | _: HiveDecimal | _: HiveChar |
_: HiveVarchar | _: Byte | _: Short | _: Integer | _: Float => true
case _: String | _: Long | _: Double | _: Byte | _: Short | _: Integer | _: Float => true
case _: DateWritable | _: HiveDecimal | _: HiveChar | _: HiveVarchar => true
case _ => false
}

Expand Down

0 comments on commit 87c354a

Please sign in to comment.