-
Notifications
You must be signed in to change notification settings - Fork 820
/
MultiNGram.scala
71 lines (59 loc) · 2.58 KB
/
MultiNGram.scala
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in project root for information.
package com.microsoft.azure.synapse.ml.featurize.text
import com.microsoft.azure.synapse.ml.codegen.Wrappable
import com.microsoft.azure.synapse.ml.core.contracts.{HasInputCol, HasOutputCol}
import com.microsoft.azure.synapse.ml.core.schema.DatasetExtensions
import com.microsoft.azure.synapse.ml.logging.{FeatureNames, SynapseMLLogging}
import com.microsoft.azure.synapse.ml.param.TypedIntArrayParam
import org.apache.spark.ml._
import org.apache.spark.ml.feature._
import org.apache.spark.ml.param._
import org.apache.spark.ml.util._
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Dataset, Row}
object MultiNGram extends DefaultParamsReadable[MultiNGram]
/** Extracts several ngrams
*
* @param uid The id of the module
*/
class MultiNGram(override val uid: String)
extends Transformer with HasInputCol with HasOutputCol
with Wrappable with DefaultParamsWritable with SynapseMLLogging {
logClass(FeatureNames.Featurize)
def this() = this(Identifiable.randomUID("MultiNGram"))
setDefault(outputCol, uid + "_output")
val lengths = new TypedIntArrayParam(
this,
"lengths",
"the collection of lengths to use for ngram extraction"
)
def getLengths: Seq[Int] = $(lengths)
def setLengths(v: Seq[Int]): this.type = set(lengths, v)
override def transform(dataset: Dataset[_]): DataFrame = {
logTransform[DataFrame]({
val df = dataset.toDF()
val intermediateOutputCols = getLengths.map(n =>
DatasetExtensions.findUnusedColumnName(s"ngram_$n")(dataset.columns.toSet)
)
val models = getLengths.zip(intermediateOutputCols).map { case (n, out) =>
new NGram().setN(n).setInputCol(getInputCol).setOutputCol(out)
}
val intermediateDF = NamespaceInjections.pipelineModel(models.toArray).transform(df)
intermediateDF.map { row =>
val mergedNGrams = intermediateOutputCols
.map(col => row.getAs[Seq[String]](col))
.reduce(_ ++ _)
Row.fromSeq(row.toSeq :+ mergedNGrams)
}(RowEncoder(intermediateDF.schema.add(getOutputCol, ArrayType(StringType))))
.drop(intermediateOutputCols: _*)
}, dataset.columns.length)
}
override def copy(extra: ParamMap): MultiNGram =
defaultCopy(extra)
def transformSchema(schema: StructType): StructType = {
assert(schema(getInputCol).dataType == ArrayType(StringType))
schema.add(getOutputCol, ArrayType(StringType))
}
}