From ba0879bb8d9949146ca72401d195fcfb04edc3ba Mon Sep 17 00:00:00 2001 From: a1singh Date: Tue, 3 Nov 2015 17:22:33 -0800 Subject: [PATCH] Update LDAOptimizer.scala line 441: since idx was never used, replaced unrequired zipWithIndex.foreach with foreach --- .../scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala index 38486e949bbcf..17c0609800e90 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAOptimizer.scala @@ -438,7 +438,7 @@ final class OnlineLDAOptimizer extends LDAOptimizer { val stat = BDM.zeros[Double](k, vocabSize) var gammaPart = List[BDV[Double]]() - nonEmptyDocs.zipWithIndex.foreach { case ((_, termCounts: Vector), idx: Int) => + nonEmptyDocs.foreach { case (_, termCounts: Vector) => val ids: List[Int] = termCounts match { case v: DenseVector => (0 until v.size).toList case v: SparseVector => v.indices.toList