Skip to content

Commit

Permalink
Scala style fix
Browse files Browse the repository at this point in the history
  • Loading branch information
avulanov committed Jul 16, 2014
1 parent 79e8476 commit 1843f73
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 58 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -17,94 +17,83 @@

package org.apache.spark.mllib.evaluation

import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext._

/**
* Evaluator for multilabel classification.
* NB: type Double both for prediction and label is retained
* for compatibility with model.predict that returns Double
* and MLUtils.loadLibSVMFile that loads class labels as Double
*
* @param predictionAndLabels an RDD of (predictions, labels) pairs, both are non-null sets.
*/
class MultilabelMetrics(predictionAndLabels:RDD[(Set[Double], Set[Double])]) extends Logging{
class MultilabelMetrics(predictionAndLabels: RDD[(Set[Double], Set[Double])]) {

private lazy val numDocs = predictionAndLabels.count
private lazy val numDocs: Long = predictionAndLabels.count

private lazy val numLabels = predictionAndLabels.flatMap{case(_, labels) => labels}.distinct.count
private lazy val numLabels: Long = predictionAndLabels.flatMap { case (_, labels) =>
labels}.distinct.count

/**
* Returns strict Accuracy
* (for equal sets of labels)
* @return strictAccuracy.
*/
lazy val strictAccuracy = predictionAndLabels.filter{case(predictions, labels) =>
lazy val strictAccuracy: Double = predictionAndLabels.filter { case (predictions, labels) =>
predictions == labels}.count.toDouble / numDocs

/**
* Returns Accuracy
* @return Accuracy.
*/
lazy val accuracy = predictionAndLabels.map{ case(predictions, labels) =>
lazy val accuracy: Double = predictionAndLabels.map { case (predictions, labels) =>
labels.intersect(predictions).size.toDouble / labels.union(predictions).size}.sum / numDocs

/**
* Returns Hamming-loss
* @return hammingLoss.
*/
lazy val hammingLoss = (predictionAndLabels.map{ case(predictions, labels) =>
lazy val hammingLoss: Double = (predictionAndLabels.map { case (predictions, labels) =>
labels.diff(predictions).size + predictions.diff(labels).size}.
sum).toDouble / (numDocs * numLabels)

/**
* Returns Document-based Precision averaged by the number of documents
* @return macroPrecisionDoc.
*/
lazy val macroPrecisionDoc = (predictionAndLabels.map{ case(predictions, labels) =>
if(predictions.size >0)
predictions.intersect(labels).size.toDouble / predictions.size else 0}.sum) / numDocs
lazy val macroPrecisionDoc: Double = (predictionAndLabels.map { case (predictions, labels) =>
if (predictions.size > 0) {
predictions.intersect(labels).size.toDouble / predictions.size
} else 0
}.sum) / numDocs

/**
* Returns Document-based Recall averaged by the number of documents
* @return macroRecallDoc.
*/
lazy val macroRecallDoc = (predictionAndLabels.map{ case(predictions, labels) =>
lazy val macroRecallDoc: Double = (predictionAndLabels.map { case (predictions, labels) =>
labels.intersect(predictions).size.toDouble / labels.size}.sum) / numDocs

/**
* Returns Document-based F1-measure averaged by the number of documents
* @return macroRecallDoc.
*/
lazy val macroF1MeasureDoc = (predictionAndLabels.map{ case(predictions, labels) =>
lazy val macroF1MeasureDoc: Double = (predictionAndLabels.map { case (predictions, labels) =>
2.0 * predictions.intersect(labels).size / (predictions.size + labels.size)}.sum) / numDocs

/**
* Returns micro-averaged document-based Precision
* (equals to label-based microPrecision)
* @return microPrecisionDoc.
*/
lazy val microPrecisionDoc = microPrecisionClass
lazy val microPrecisionDoc: Double = microPrecisionClass

/**
* Returns micro-averaged document-based Recall
* (equals to label-based microRecall)
* @return microRecallDoc.
*/
lazy val microRecallDoc = microRecallClass
lazy val microRecallDoc: Double = microRecallClass

/**
* Returns micro-averaged document-based F1-measure
* (equals to label-based microF1measure)
* @return microF1MeasureDoc.
*/
lazy val microF1MeasureDoc = microF1MeasureClass
lazy val microF1MeasureDoc: Double = microF1MeasureClass

private lazy val tpPerClass = predictionAndLabels.flatMap{ case(predictions, labels) =>
private lazy val tpPerClass = predictionAndLabels.flatMap { case (predictions, labels) =>
predictions.intersect(labels).map(category => (category, 1))}.reduceByKey(_ + _).collectAsMap()

private lazy val fpPerClass = predictionAndLabels.flatMap{ case(predictions, labels) =>
private lazy val fpPerClass = predictionAndLabels.flatMap { case(predictions, labels) =>
predictions.diff(labels).map(category => (category, 1))}.reduceByKey(_ + _).collectAsMap()

private lazy val fnPerClass = predictionAndLabels.flatMap{ case(predictions, labels) =>
Expand All @@ -113,38 +102,39 @@ class MultilabelMetrics(predictionAndLabels:RDD[(Set[Double], Set[Double])]) ext
/**
* Returns Precision for a given label (category)
* @param label the label.
* @return Precision.
*/
def precisionClass(label: Double) = if((tpPerClass(label) + fpPerClass.getOrElse(label, 0)) == 0)
0 else tpPerClass(label).toDouble / (tpPerClass(label) + fpPerClass.getOrElse(label, 0))
def precisionClass(label: Double) = {
val tp = tpPerClass(label)
val fp = fpPerClass.getOrElse(label, 0)
if (tp + fp == 0) 0 else tp.toDouble / (tp + fp)
}

/**
* Returns Recall for a given label (category)
* @param label the label.
* @return Recall.
*/
def recallClass(label: Double) = if((tpPerClass(label) + fnPerClass.getOrElse(label, 0)) == 0)
0 else
tpPerClass(label).toDouble / (tpPerClass(label) + fnPerClass.getOrElse(label, 0))
def recallClass(label: Double) = {
val tp = tpPerClass(label)
val fn = fnPerClass.getOrElse(label, 0)
if (tp + fn == 0) 0 else tp.toDouble / (tp + fn)
}

/**
* Returns F1-measure for a given label (category)
* @param label the label.
* @return F1-measure.
*/
def f1MeasureClass(label: Double) = {
val precision = precisionClass(label)
val recall = recallClass(label)
if((precision + recall) == 0) 0 else 2 * precision * recall / (precision + recall)
}

private lazy val sumTp = tpPerClass.foldLeft(0L){ case(sum, (_, tp)) => sum + tp}
private lazy val sumFpClass = fpPerClass.foldLeft(0L){ case(sum, (_, fp)) => sum + fp}
private lazy val sumFnClass = fnPerClass.foldLeft(0L){ case(sum, (_, fn)) => sum + fn}
private lazy val sumTp = tpPerClass.foldLeft(0L){ case (sum, (_, tp)) => sum + tp}
private lazy val sumFpClass = fpPerClass.foldLeft(0L){ case (sum, (_, fp)) => sum + fp}
private lazy val sumFnClass = fnPerClass.foldLeft(0L){ case (sum, (_, fn)) => sum + fn}

/**
* Returns micro-averaged label-based Precision
* @return microPrecisionClass.
*/
lazy val microPrecisionClass = {
val sumFp = fpPerClass.foldLeft(0L){ case(sumFp, (_, fp)) => sumFp + fp}
Expand All @@ -153,7 +143,6 @@ class MultilabelMetrics(predictionAndLabels:RDD[(Set[Double], Set[Double])]) ext

/**
* Returns micro-averaged label-based Recall
* @return microRecallClass.
*/
lazy val microRecallClass = {
val sumFn = fnPerClass.foldLeft(0.0){ case(sumFn, (_, fn)) => sumFn + fn}
Expand All @@ -162,8 +151,6 @@ class MultilabelMetrics(predictionAndLabels:RDD[(Set[Double], Set[Double])]) ext

/**
* Returns micro-averaged label-based F1-measure
* @return microRecallClass.
*/
lazy val microF1MeasureClass = 2.0 * sumTp / (2 * sumTp + sumFnClass + sumFpClass)

}
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@

package org.apache.spark.mllib.evaluation

import org.apache.spark.mllib.util.LocalSparkContext
import org.apache.spark.rdd.RDD
import org.scalatest.FunSuite

import org.apache.spark.mllib.util.LocalSparkContext
import org.apache.spark.rdd.RDD

class MultilabelMetricsSuite extends FunSuite with LocalSparkContext {
test("Multilabel evaluation metrics") {
Expand All @@ -45,7 +45,7 @@ class MultilabelMetricsSuite extends FunSuite with LocalSparkContext {
* class 2 - doc 0, 3, 4, 6 (total 4)
*
*/
val scoreAndLabels:RDD[(Set[Double], Set[Double])] = sc.parallelize(
val scoreAndLabels: RDD[(Set[Double], Set[Double])] = sc.parallelize(
Seq((Set(0.0, 1.0), Set(0.0, 2.0)),
(Set(0.0, 2.0), Set(0.0, 1.0)),
(Set(), Set(0.0)),
Expand All @@ -70,20 +70,16 @@ class MultilabelMetricsSuite extends FunSuite with LocalSparkContext {
val microRecallClass = sumTp.toDouble / (4 + 1 + 2 + 1 + 2 + 2)
val microF1MeasureClass = 2.0 * sumTp.toDouble /
(2 * sumTp.toDouble + (1 + 1 + 2) + (0 + 1 + 2))

val macroPrecisionDoc = 1.0 / 7 *
(1.0 / 2 + 1.0 / 2 + 0 + 1.0 / 1 + 2.0 / 2 + 2.0 / 3 + 1.0 / 1.0)
val macroRecallDoc = 1.0 / 7 *
(1.0 / 2 + 1.0 / 2 + 0 / 1 + 1.0 / 1 + 2.0 / 2 + 2.0 / 2 + 1.0 / 2)
val macroF1MeasureDoc = (1.0 / 7) *
2 * ( 1.0 / (2 + 2) + 1.0 / (2 + 2) + 0 + 1.0 / (1 + 1) +
2.0 / (2 + 2) + 2.0 / (3 + 2) + 1.0 / (1 + 2) )

val hammingLoss = (1.0 / (7 * 3)) * (2 + 2 + 1 + 0 + 0 + 1 + 1)

val strictAccuracy = 2.0 / 7
val accuracy = 1.0 / 7 * (1.0 / 3 + 1.0 /3 + 0 + 1.0 / 1 + 2.0 / 2 + 2.0 / 3 + 1.0 / 2)

assert(math.abs(metrics.precisionClass(0.0) - precision0) < delta)
assert(math.abs(metrics.precisionClass(1.0) - precision1) < delta)
assert(math.abs(metrics.precisionClass(2.0) - precision2) < delta)
Expand All @@ -93,20 +89,14 @@ class MultilabelMetricsSuite extends FunSuite with LocalSparkContext {
assert(math.abs(metrics.f1MeasureClass(0.0) - f1measure0) < delta)
assert(math.abs(metrics.f1MeasureClass(1.0) - f1measure1) < delta)
assert(math.abs(metrics.f1MeasureClass(2.0) - f1measure2) < delta)

assert(math.abs(metrics.microPrecisionClass - microPrecisionClass) < delta)
assert(math.abs(metrics.microRecallClass - microRecallClass) < delta)
assert(math.abs(metrics.microF1MeasureClass - microF1MeasureClass) < delta)

assert(math.abs(metrics.macroPrecisionDoc - macroPrecisionDoc) < delta)
assert(math.abs(metrics.macroRecallDoc - macroRecallDoc) < delta)
assert(math.abs(metrics.macroF1MeasureDoc - macroF1MeasureDoc) < delta)

assert(math.abs(metrics.hammingLoss - hammingLoss) < delta)
assert(math.abs(metrics.strictAccuracy - strictAccuracy) < delta)
assert(math.abs(metrics.accuracy - accuracy) < delta)


}

}

0 comments on commit 1843f73

Please sign in to comment.