-
Notifications
You must be signed in to change notification settings - Fork 0
/
data_stream.py
98 lines (82 loc) · 3.11 KB
/
data_stream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import logging
import json
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import pyspark.sql.functions as psf
from CONSTANTS import BOOTSTRAP_SERVER
schema = StructType(
[
StructField("crime_id", StringType(), True),
StructField("original_crime_type_name", StringType(), True),
StructField("report_date", DateType(), True),
StructField("call_date", DateType(), True),
StructField("offense_date", DateType(), True),
StructField("call_time", TimestampType(), True),
StructField("call_date_time", TimestampType(), True),
StructField("disposition", StringType(), True),
StructField("address", StringType(), True),
StructField("city", StringType(), True),
StructField("state", StringType(), True),
StructField("agency_id", IntegerType(), True),
StructField("address_type", StringType(), True),
StructField("common_location", StringType(), True),
]
)
def run_spark_job(spark):
df = (
spark.readStream.format("kafka")
.option("kafka.bootstrap.servers", BOOTSTRAP_SERVER)
.option("subscribe", "police.calls.service")
.load()
)
# Show schema for the incoming resources for checks
df.printSchema()
# Take only value and convert it to String
kafka_df = df.selectExpr("cast(value as string) value")
service_table = kafka_df.select(
psf.from_json(psf.col("value"), schema).alias("DF")
).select("DF.*")
distinct_table = service_table.select("call_date_time", "original_crime_type_name", "disposition")
# # count the number of original crime type
agg_df = distinct_table \
.withWatermark("call_date_time", "20 seconds") \
.groupBy(
psf.window(psf.col("call_date_time"), "10 seconds", "2 seconds"),
psf.col("original_crime_type_name")) \
.count()
query = (agg_df
.writeStream
.queryName("crime counter")
.trigger(processingTime="1 seconds")
.outputMode("complete")
.format("console")
.start()
)
radio_code_json_filepath = "data/radio_code.json"
radio_code_df = spark.read.json(radio_code_json_filepath, multiLine=True)
radio_code_df.printSchema()
radio_code_df = radio_code_df.withColumnRenamed("disposition_code", "disposition")
radio_code_df.printSchema()
join_query = (
service_table
.join(radio_code_df, on="disposition")
.writeStream
.queryName("join disposition")
.outputMode("append")
.format("console")
.start()
)
join_query.awaitTermination()
if __name__ == "__main__":
logger = logging.getLogger(__name__)
spark = (
SparkSession.builder.master("local[*]")
.appName("KafkaSparkStructuredStreaming")
.config("spark.driver.memory", "1g")
.config("spark.executor.memory", "1g")
.getOrCreate()
)
# spark.sparkContext.setLogLevel("WARN")
logger.info("Spark started")
run_spark_job(spark)
spark.stop()