-
Notifications
You must be signed in to change notification settings - Fork 0
/
ExampleOfWrappedArrayUDF.java
114 lines (92 loc) · 4.87 KB
/
ExampleOfWrappedArrayUDF.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
package tw.howie.example.spark;
import org.apache.log4j.PropertyConfigurator;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.api.java.UDF3;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import scala.collection.JavaConversions;
import scala.collection.mutable.WrappedArray;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.apache.spark.sql.functions.callUDF;
import static org.apache.spark.sql.functions.col;
import static org.apache.spark.sql.types.DataTypes.createStructField;
/**
* @author howie
* @since 2019-03-27
* <p>
* For
* https://stackoverflow.com/questions/55357655/how-to-cast-datasetrow-columns-to-non-primitive-data-type
*/
public class ExampleOfWrappedArrayUDF {
String master = "local[2]";
private SparkSession spark = SparkSession.builder()
.appName("ExampleOfWrappedArrayUDF")
.master(master)
.config("spark.submit.deployMode", "client")
.config("spark.io.compression.codec", "snappy")
.config("spark.rdd.compress", "true")
.getOrCreate();
@Before
public void before() {
PropertyConfigurator.configure(ExampleOfWrappedArrayUDF.class.getClassLoader()
.getResource("log4j-test.properties"));
}
@After
public void after() {
spark.stop();
}
@Test
public void testExampleOfWrappsArrayUDF() {
/*
+------+---------------+---------------------------------------------+---------------+
| Id| value | time |aggregateType |
+------+---------------+---------------------------------------------+---------------+
|0001 | [1.5,3.4,4.5]| [1551502200000,1551502200000,1551502200000] | Sum |
+------+---------------+---------------------------------------------+---------------+
**/
StructType dataSchema = new StructType(new StructField[] {createStructField("Id", DataTypes.StringType, true),
createStructField("value",
DataTypes.createArrayType(DataTypes.DoubleType,
false),
false),
createStructField("time",
DataTypes.createArrayType(DataTypes.LongType,
false),
false),
createStructField("aggregateType",
DataTypes.StringType,
true),});
List<Row> data = new ArrayList<>();
data.add(RowFactory.create("0001",
Arrays.asList(1.5, 3.4, 4.5),
Arrays.asList(1551502200000L, 1551502200000L, 1551502200000L),
"sum"));
Dataset<Row> example = spark.createDataFrame(data, dataSchema);
example.show(false);
UDF3<String, WrappedArray<Long>, WrappedArray<Double>, Double> myUDF = (param1, param2, param3) -> {
List<Long> param1AsList = JavaConversions.seqAsJavaList(param2);
List<Double> param2AsList = JavaConversions.seqAsJavaList(param3);
//Example
double myDoubleResult = 0;
if ("sum".equals(param1)) {
myDoubleResult = param2AsList.stream()
.mapToDouble(f -> f)
.sum();
}
return myDoubleResult;
};
spark.udf()
.register("myUDF", myUDF, DataTypes.DoubleType);
example = example.withColumn("new", callUDF("myUDF", col("aggregateType"), col("time"), col("value")));
example.show(false);
}
}