From 3fb7e36ff9036a1affbed715267495f5050d298e Mon Sep 17 00:00:00 2001 From: poorna Date: Fri, 30 Mar 2018 14:07:17 -0700 Subject: [PATCH 1/5] CDAP-13280 Add Spark 2 streaming Kafka source --- .../docs/KAFKABATCHSOURCE.md | 2 +- kafka-plugins-0.10/docs/KAFKASOURCE.md | 85 ++++ .../docs/KAFKAWRITER-SINK.md | 4 +- .../docs/Kafka-alert-publisher.md | 63 +++ .../docs/Kafka-batchsink.md | 4 +- .../docs/Kafka-batchsource.md | 3 +- .../docs/Kafka-streamingsource.md | 118 +++++ .../docs/KafkaAlerts-alertpublisher.md | 39 ++ .../kafka-batch-source-plugins-config.png | Bin .../docs/kafka-sink-plugin-config.png | Bin .../docs/kafka-source-plugin-config.png | Bin 0 -> 76013 bytes .../icons/Kafka-batchsink.png | Bin .../icons/Kafka-batchsource.png | Bin .../icons/KafkaAlerts-alertpublisher.png | Bin 0 -> 2066 bytes kafka-plugins-0.10/pom.xml | 172 +++++++ .../alertpublisher/KafkaAlertPublisher.java | 174 +++++++ .../plugin/batchSource/KafkaBatchSource.java | 15 +- .../plugin/batchSource/KafkaInputFormat.java | 52 +-- .../hydrator/plugin/batchSource/KafkaKey.java | 0 .../plugin/batchSource/KafkaMessage.java | 0 .../plugin/batchSource/KafkaReader.java | 0 .../plugin/batchSource/KafkaRecordReader.java | 0 .../plugin/batchSource/KafkaRequest.java | 0 .../plugin/batchSource/KafkaSplit.java | 0 .../hydrator/plugin/common/KafkaHelpers.java | 105 +++++ .../co/cask/hydrator/plugin/sink/Kafka.java | 22 +- .../plugin/sink/KafkaOutputFormat.java | 16 +- .../plugin/sink/KafkaRecordWriter.java | 0 .../plugin/sink/StringPartitioner.java | 0 .../hydrator/plugin/source/KafkaConfig.java | 430 ++++++++++++++++++ .../plugin/source/KafkaStreamingSource.java | 285 ++++++++++++ .../source/ReferenceStreamingSource.java | 46 ++ .../co/cask/hydrator/EmbeddedKafkaServer.java | 16 +- .../cask/hydrator/KafkaBatchSourceTest.java | 11 +- .../KafkaSinkAndAlertsPublisherTest.java | 278 +++++++++++ .../hydrator/KafkaStreamingSourceTest.java | 267 +++++++++++ .../widgets/Kafka-batchsink.json | 0 .../widgets/Kafka-batchsource.json | 0 .../widgets/Kafka-streamingsource.json | 151 ++++++ .../widgets/KafkaAlerts-alertpublisher.json | 52 +++ kafka-plugins-0.8/docs/KAFKASOURCE.md | 2 +- .../docs/Kafka-alert-publisher.md | 4 +- kafka-plugins-0.8/docs/Kafka-batchsink.md | 4 +- kafka-plugins-0.8/docs/Kafka-batchsource.md | 3 +- .../docs/Kafka-streamingsource.md | 2 +- .../docs/KafkaAlerts-alertpublisher.md | 5 +- .../icons/KafkaAlerts-alertpublisher.png | Bin 0 -> 2066 bytes kafka-plugins-0.8/pom.xml | 103 ++++- .../alertpublisher/KafkaAlertPublisher.java | 18 +- .../co/cask/hydrator/plugin/sink/Kafka.java | 13 +- .../plugin/sink/KafkaOutputFormat.java | 14 +- ...rceTest.java => KafkaBatchSourceTest.java} | 11 +- .../KafkaSinkAndAlertsPublisherTest.java | 277 +++++++++++ .../hydrator/KafkaStreamingSourceTest.java | 268 +++++++++++ kafka-plugins-0.9/pom.xml | 78 ---- pom.xml | 129 ++---- 56 files changed, 3049 insertions(+), 292 deletions(-) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/docs/KAFKABATCHSOURCE.md (92%) create mode 100644 kafka-plugins-0.10/docs/KAFKASOURCE.md rename {kafka-plugins-0.9 => kafka-plugins-0.10}/docs/KAFKAWRITER-SINK.md (93%) create mode 100644 kafka-plugins-0.10/docs/Kafka-alert-publisher.md rename {kafka-plugins-0.9 => kafka-plugins-0.10}/docs/Kafka-batchsink.md (96%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/docs/Kafka-batchsource.md (98%) create mode 100644 kafka-plugins-0.10/docs/Kafka-streamingsource.md create mode 100644 kafka-plugins-0.10/docs/KafkaAlerts-alertpublisher.md rename {kafka-plugins-0.9 => kafka-plugins-0.10}/docs/kafka-batch-source-plugins-config.png (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/docs/kafka-sink-plugin-config.png (100%) create mode 100644 kafka-plugins-0.10/docs/kafka-source-plugin-config.png rename {kafka-plugins-0.9 => kafka-plugins-0.10}/icons/Kafka-batchsink.png (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/icons/Kafka-batchsource.png (100%) create mode 100644 kafka-plugins-0.10/icons/KafkaAlerts-alertpublisher.png create mode 100644 kafka-plugins-0.10/pom.xml create mode 100644 kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java (96%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaInputFormat.java (76%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaKey.java (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaMessage.java (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaReader.java (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRecordReader.java (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRequest.java (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaSplit.java (100%) create mode 100644 kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java (89%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java (88%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/sink/KafkaRecordWriter.java (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/main/java/co/cask/hydrator/plugin/sink/StringPartitioner.java (100%) create mode 100644 kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java create mode 100644 kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java create mode 100644 kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/ReferenceStreamingSource.java rename {kafka-plugins-0.9 => kafka-plugins-0.10}/src/test/java/co/cask/hydrator/EmbeddedKafkaServer.java (89%) rename kafka-plugins-0.9/src/test/java/co/cask/hydrator/Kafka9BatchSourceTest.java => kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaBatchSourceTest.java (97%) create mode 100644 kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java create mode 100644 kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java rename {kafka-plugins-0.9 => kafka-plugins-0.10}/widgets/Kafka-batchsink.json (100%) rename {kafka-plugins-0.9 => kafka-plugins-0.10}/widgets/Kafka-batchsource.json (100%) create mode 100644 kafka-plugins-0.10/widgets/Kafka-streamingsource.json create mode 100644 kafka-plugins-0.10/widgets/KafkaAlerts-alertpublisher.json create mode 100644 kafka-plugins-0.8/icons/KafkaAlerts-alertpublisher.png rename kafka-plugins-0.8/src/test/java/co/cask/hydrator/{Kafka8BatchSourceTest.java => KafkaBatchSourceTest.java} (97%) create mode 100644 kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java create mode 100644 kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java delete mode 100644 kafka-plugins-0.9/pom.xml diff --git a/kafka-plugins-0.9/docs/KAFKABATCHSOURCE.md b/kafka-plugins-0.10/docs/KAFKABATCHSOURCE.md similarity index 92% rename from kafka-plugins-0.9/docs/KAFKABATCHSOURCE.md rename to kafka-plugins-0.10/docs/KAFKABATCHSOURCE.md index eafab36..5d0683a 100644 --- a/kafka-plugins-0.9/docs/KAFKABATCHSOURCE.md +++ b/kafka-plugins-0.10/docs/KAFKABATCHSOURCE.md @@ -10,7 +10,7 @@ Kafka batch source that emits a records with user specified schema. Usage Notes ----------- -Kafka Batch Source can be used to read events from a kafka topic. It uses kafka consumer [0.9.1 apis](https://kafka.apache.org/090/documentation.html) to read events from a kafka topic. The Kafka Batch Source supports providing additional kafka properties for the kafka consumer, reading from kerberos-enabled kafka and limiting the number of records read. Kafka Batch Source converts incoming kafka events into cdap structured records which then can be used for further transformations. +Kafka Batch Source can be used to read events from a kafka topic. It uses kafka consumer [0.10.2 apis](https://kafka.apache.org/0100/documentation.html) to read events from a kafka topic. The Kafka Batch Source supports providing additional kafka properties for the kafka consumer, reading from kerberos-enabled kafka and limiting the number of records read. Kafka Batch Source converts incoming kafka events into cdap structured records which then can be used for further transformations. The source will read from the earliest available offset or the initial offset that specified in the config for the first run, remember the last offset it read last run and continue from that offset for the next run. diff --git a/kafka-plugins-0.10/docs/KAFKASOURCE.md b/kafka-plugins-0.10/docs/KAFKASOURCE.md new file mode 100644 index 0000000..912d358 --- /dev/null +++ b/kafka-plugins-0.10/docs/KAFKASOURCE.md @@ -0,0 +1,85 @@ +[![Build Status](https://travis-ci.org/hydrator/kafka-plugins.svg?branch=master)](https://travis-ci.org/hydrator/kafka-plugins) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +Kafka Source +=========== + +Kafka streaming source that emits a records with user specified schema. + +plugin configuration + +Usage Notes +----------- + +Kafka Streaming Source can be used to read events from a kafka topic. It uses kafka consumer [0.10.2 apis](https://kafka.apache.org/0100/documentation.html) to read events from a kafka topic. Kafka Source converts incoming kafka events into cdap structured records which then can be used for further transformations. + +The source provides capabilities to read from latest offset or from beginning or from the provided kafka offset. The plugin relies on Spark Streaming offset [storage capabilities](https://spark.apache.org/docs/latest/streaming-kafka-0-10-integration.html) to manager offsets and checkpoints. + +Plugin Configuration +--------------------- + +| Configuration | Required | Default | Description | +| :------------ | :------: | :----- | :---------- | +| **Kafka Brokers** | **Y** | N/A | List of Kafka brokers specified in host1:port1,host2:port2 form. | +| **Kafka Topic** | **Y** | N/A | The Kafka topic to read from. | +| **Topic Partition** | **N** | N/A | List of topic partitions to read from. If not specified, all partitions will be read. | +| **Default Initial Offset** | **N** | N/A | The default initial offset for all topic partitions. An offset of -2 means the smallest offset. An offset of -1 means the latest offset. Defaults to -1. Offsets are inclusive. If an offset of 5 is used, the message at offset 5 will be read. If you wish to set different initial offsets for different partitions, use the initialPartitionOffsets property. | +| **Initial Partition Offsets** | **N** | N/A | The initial offset for each topic partition. If this is not specified, all partitions will use the same initial offset, which is determined by the defaultInitialOffset property. Any partitions specified in the partitions property, but not in this property will use the defaultInitialOffset. An offset of -2 means the smallest offset. An offset of -1 means the latest offset. Offsets are inclusive. If an offset of 5 is used, the message at offset 5 will be read. | +| **Time Field** | **N** | N/A | Optional name of the field containing the read time of the batch. If this is not set, no time field will be added to output records. If set, this field must be present in the schema property and must be a long. | +| **Key Field** | **N** | N/A | Optional name of the field containing the message key. If this is not set, no key field will be added to output records. If set, this field must be present in the schema property and must be bytes. | +| **Partition Field** | **N** | N/A | Optional name of the field containing the partition the message was read from. If this is not set, no partition field will be added to output records. If set, this field must be present in the schema property and must be an int. | +| **Offset Field** | **N** | N/A | Optional name of the field containing the partition offset the message was read from. If this is not set, no offset field will be added to output records. If set, this field must be present in the schema property and must be a long. | +| **Format** | **N** | N/A | Optional format of the Kafka event message. Any format supported by CDAP is supported. For example, a value of 'csv' will attempt to parse Kafka payloads as comma-separated values. If no format is given, Kafka message payloads will be treated as bytes. | +| **Kerberos Principal** | **N** | N/A | The kerberos principal used for the source when kerberos security is enabled for kafka. | +| **Keytab Location** | **N** | N/A | The keytab location for the kerberos principal when kerberos security is enabled for kafka. | + + +Build +----- +To build this plugin: + +``` + mvn clean package +``` + +The build will create a .jar and .json file under the ``target`` directory. +These files can be used to deploy your plugins. + +Deployment +---------- +You can deploy your plugins using the CDAP CLI: + + > load artifact .jar config-file .json> + +For example, if your artifact is named 'kafka-plugins-': + + > load artifact target/kafka-plugins-.jar config-file target/kafka-plugins-.json + +## Mailing Lists + +CDAP User Group and Development Discussions: + +* `cdap-user@googlegroups.com ` + +The *cdap-user* mailing list is primarily for users using the product to develop +applications or building plugins for appplications. You can expect questions from +users, release announcements, and any other discussions that we think will be helpful +to the users. + +## License and Trademarks + +Copyright © 2017 Cask Data, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +Cask is a trademark of Cask Data, Inc. All rights reserved. + +Apache, Apache HBase, and HBase are trademarks of The Apache Software Foundation. Used with +permission. No endorsement by The Apache Software Foundation is implied by the use of these marks. diff --git a/kafka-plugins-0.9/docs/KAFKAWRITER-SINK.md b/kafka-plugins-0.10/docs/KAFKAWRITER-SINK.md similarity index 93% rename from kafka-plugins-0.9/docs/KAFKAWRITER-SINK.md rename to kafka-plugins-0.10/docs/KAFKAWRITER-SINK.md index 54374a2..99273c4 100644 --- a/kafka-plugins-0.9/docs/KAFKAWRITER-SINK.md +++ b/kafka-plugins-0.10/docs/KAFKAWRITER-SINK.md @@ -12,14 +12,14 @@ The sink also allows you to write events into kerberos-enabled kafka. Usage Notes ----------- -Kafka sink emits events in realtime to configured kafka topic and partition. It uses kafka producer [0.8.2 apis](https://kafka.apache.org/082/javadoc/index.html?org/apache/kafka/clients/producer/KafkaProducer.html) to write events into kafka. +Kafka sink emits events in realtime to configured kafka topic and partition. It uses kafka producer [0.10.2 apis](https://kafka.apache.org/0100/documentation.html) to write events into kafka. This sink can be configured to operate in synchronous or asynchronous mode. In synchronous mode, each event will be sent to the broker synchronously on the thread that calls it. This is not sufficient on most of the high volume environments. In async mode, the kafka producer will batch together all the kafka events for greater throughput. But that makes it open for the possibility of dropping unsent events in case of client machine failure. Since kafka producer by default uses synchronous mode, this sink also uses Synchronous producer by default. It uses String partitioner and String serializer for key and value to write events to kafka. Optionally if kafka key is provided, producer will use that key to partition events accross multiple partitions in a given topic. This sink also allows compression configuration. By default compression is none. -Kafka producer can be tuned using many properties as shown [here](https://kafka.apache.org/082/javadoc/org/apache/kafka/clients/producer/ProducerConfig.html). This sink allows user to configure any property supported by kafka 0.8.2 Producer. +Kafka producer can be tuned using many properties as shown [here](https://kafka.apache.org/0100/javadoc/org/apache/kafka/clients/producer/ProducerConfig.html). This sink allows user to configure any property supported by kafka 0.10.2.0 Producer. Plugin Configuration diff --git a/kafka-plugins-0.10/docs/Kafka-alert-publisher.md b/kafka-plugins-0.10/docs/Kafka-alert-publisher.md new file mode 100644 index 0000000..f358fa5 --- /dev/null +++ b/kafka-plugins-0.10/docs/Kafka-alert-publisher.md @@ -0,0 +1,63 @@ +# kafka-alert-plugin + +Join CDAP community [![Build Status](https://travis-ci.org/hydrator/kafka-alert-plugin.svg?branch=master)](https://travis-ci.org/hydrator/kafka-alert-plugin) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) []() + +Kafka Alert Publisher that allows you to publish alerts to kafka as json objects. The plugin internally uses kafka producer apis to publish alerts. +The plugin allows to specify kafka topic to use for publishing and other additional kafka producer properties. +This plugin uses kafka 0.10.2 java apis. + +Build +----- +To build this plugin: + +``` + mvn clean package +``` + +The build will create a .jar and .json file under the ``target`` directory. +These files can be used to deploy your plugins. + +Deployment +---------- +You can deploy your plugins using the CDAP CLI: + + > load artifact .jar config-file .json> + +For example, if your artifact is named 'kafka-alert-plugin-': + + > load artifact target/kafka-alert-plugin-.jar config-file target/kafka-alert-plugin-.json + +## Mailing Lists + +CDAP User Group and Development Discussions: + +* `cdap-user@googlegroups.com ` + +The *cdap-user* mailing list is primarily for users using the product to develop +applications or building plugins for appplications. You can expect questions from +users, release announcements, and any other discussions that we think will be helpful +to the users. + +## IRC Channel + +CDAP IRC Channel: #cdap on irc.freenode.net + + +## License and Trademarks + +Copyright © 2017 Cask Data, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +Cask is a trademark of Cask Data, Inc. All rights reserved. + +Apache, Apache HBase, and HBase are trademarks of The Apache Software Foundation. Used with +permission. No endorsement by The Apache Software Foundation is implied by the use of these marks. diff --git a/kafka-plugins-0.9/docs/Kafka-batchsink.md b/kafka-plugins-0.10/docs/Kafka-batchsink.md similarity index 96% rename from kafka-plugins-0.9/docs/Kafka-batchsink.md rename to kafka-plugins-0.10/docs/Kafka-batchsink.md index 18ec604..d966a3e 100644 --- a/kafka-plugins-0.9/docs/Kafka-batchsink.md +++ b/kafka-plugins-0.10/docs/Kafka-batchsink.md @@ -7,7 +7,7 @@ Kafka sink that allows you to write events into CSV or JSON to kafka. Plugin has the capability to push the data to a Kafka topic. It can also be configured to partition events being written to kafka based on a configurable key. The sink can also be configured to operate in sync or async mode and apply different -compression types to events. Kafka sink is compatible with Kafka 0.9 and 0.10 +compression types to events. This plugin uses kafka 0.10.2 java apis. Configuration @@ -55,4 +55,4 @@ Additional properties like number of acknowledgements and client id can also be "kafkaProperties": "acks:2,client.id:myclient", "key": "message" } - } \ No newline at end of file + } diff --git a/kafka-plugins-0.9/docs/Kafka-batchsource.md b/kafka-plugins-0.10/docs/Kafka-batchsource.md similarity index 98% rename from kafka-plugins-0.9/docs/Kafka-batchsource.md rename to kafka-plugins-0.10/docs/Kafka-batchsource.md index 48714f4..8242579 100644 --- a/kafka-plugins-0.9/docs/Kafka-batchsource.md +++ b/kafka-plugins-0.10/docs/Kafka-batchsource.md @@ -7,7 +7,7 @@ Kafka batch source. Emits the record from kafka. It will emit a record based on you use, or if no schema or format is specified, the message payload will be emitted. The source will remember the offset it read last run and continue from that offset for the next run. The Kafka batch source supports providing additional kafka properties for the kafka consumer, -reading from kerberos-enabled kafka and limiting the number of records read +reading from kerberos-enabled kafka and limiting the number of records read. This plugin uses kafka 0.10.2 java apis. Use Case -------- @@ -106,4 +106,3 @@ For each Kafka message read, it will output a record with the schema: | count | int | | price | double | +================================+ - \ No newline at end of file diff --git a/kafka-plugins-0.10/docs/Kafka-streamingsource.md b/kafka-plugins-0.10/docs/Kafka-streamingsource.md new file mode 100644 index 0000000..51fffdb --- /dev/null +++ b/kafka-plugins-0.10/docs/Kafka-streamingsource.md @@ -0,0 +1,118 @@ +# Kafka Streaming Source + + +Description +----------- +Kafka streaming source. Emits a record with the schema specified by the user. If no schema +is specified, it will emit a record with two fields: 'key' (nullable string) and 'message' +(bytes). This plugin uses kafka 0.10.2 java apis. + + +Use Case +-------- +This source is used whenever you want to read from Kafka. For example, you may want to read messages +from Kafka and write them to a Table. + + +Properties +---------- +**referenceName:** This will be used to uniquely identify this source for lineage, annotating metadata, etc. + +**brokers:** List of Kafka brokers specified in host1:port1,host2:port2 form. (Macro-enabled) + +**topic:** The Kafka topic to read from. (Macro-enabled) + +**partitions:** List of topic partitions to read from. If not specified, all partitions will be read. (Macro-enabled) + +**defaultInitialOffset:** The default initial offset for all topic partitions. +An offset of -2 means the smallest offset. An offset of -1 means the latest offset. Defaults to -1. +Offsets are inclusive. If an offset of 5 is used, the message at offset 5 will be read. +If you wish to set different initial offsets for different partitions, use the initialPartitionOffsets property. (Macro-enabled) + +**initialPartitionOffsets:** The initial offset for each topic partition. If this is not specified, +all partitions will use the same initial offset, which is determined by the defaultInitialOffset property. +Any partitions specified in the partitions property, but not in this property will use the defaultInitialOffset. +An offset of -2 means the smallest offset. An offset of -1 means the latest offset. +Offsets are inclusive. If an offset of 5 is used, the message at offset 5 will be read. (Macro-enabled) + +**schema:** Output schema of the source. If you would like the output records to contain a field with the +Kafka message key, the schema must include a field of type bytes or nullable bytes, and you must set the +keyField property to that field's name. Similarly, if you would like the output records to contain a field with +the timestamp of when the record was read, the schema must include a field of type long or nullable long, and you +must set the timeField property to that field's name. Any field that is not the timeField or keyField will be used +in conjuction with the format to parse Kafka message payloads. + +**format:** Optional format of the Kafka event message. Any format supported by CDAP is supported. +For example, a value of 'csv' will attempt to parse Kafka payloads as comma-separated values. +If no format is given, Kafka message payloads will be treated as bytes. + +**timeField:** Optional name of the field containing the read time of the batch. +If this is not set, no time field will be added to output records. +If set, this field must be present in the schema property and must be a long. + +**keyField:** Optional name of the field containing the message key. +If this is not set, no key field will be added to output records. +If set, this field must be present in the schema property and must be bytes. + +**partitionField:** Optional name of the field containing the partition the message was read from. +If this is not set, no partition field will be added to output records. +If set, this field must be present in the schema property and must be an int. + +**offsetField:** Optional name of the field containing the partition offset the message was read from. +If this is not set, no offset field will be added to output records. +If set, this field must be present in the schema property and must be a long. + +**maxRatePerPartition:** Maximum number of records to read per second per partition. Defaults to 1000. + +**principal** The kerberos principal used for the source when kerberos security is enabled for kafka. + +**keytabLocation** The keytab location for the kerberos principal when kerberos security is enabled for kafka. + +Example +------- +This example reads from the 'purchases' topic of a Kafka instance running +on brokers host1.example.com:9092 and host2.example.com:9092. The source will add +a time field named 'readTime' that contains a timestamp corresponding to the micro +batch when the record was read. It will also contain a field named 'key' which will have +the message key in it. It parses the Kafka messages using the 'csv' format +with 'user', 'item', 'count', and 'price' as the message schema. + + { + "name": "Kafka", + "type": "streamingsource", + "properties": { + "topics": "purchases", + "brokers": "host1.example.com:9092,host2.example.com:9092", + "format": "csv", + "timeField": "readTime", + "keyField": "key", + "schema": "{ + \"type\":\"record\", + \"name\":\"purchase\", + \"fields\":[ + {\"name\":\"readTime\",\"type\":\"long\"}, + {\"name\":\"key\",\"type\":\"bytes\"}, + {\"name\":\"user\",\"type\":\"string\"}, + {\"name\":\"item\",\"type\":\"string\"}, + {\"name\":\"count\",\"type\":\"int\"}, + {\"name\":\"price\",\"type\":\"double\"} + ] + }" + } + } + +For each Kafka message read, it will output a record with the schema: + + +================================+ + | field name | type | + +================================+ + | readTime | long | + | key | bytes | + | user | string | + | item | string | + | count | int | + | price | double | + +================================+ + +Note that the readTime field is not derived from the Kafka message, but from the time that the +message was read. diff --git a/kafka-plugins-0.10/docs/KafkaAlerts-alertpublisher.md b/kafka-plugins-0.10/docs/KafkaAlerts-alertpublisher.md new file mode 100644 index 0000000..47c3f13 --- /dev/null +++ b/kafka-plugins-0.10/docs/KafkaAlerts-alertpublisher.md @@ -0,0 +1,39 @@ +# Kafka Alert Publisher + + +Description +----------- +Kafka Alert Publisher that allows you to publish alerts to kafka as json objects. +The plugin internally uses kafka producer apis to publish alerts. +The plugin allows to specify kafka topic to use for publishing and other additional +kafka producer properties. This plugin uses kafka 0.10.2 java apis. + + +Configuration +------------- +**brokers:** List of Kafka brokers specified in host1:port1,host2:port2 form. + +**topic:** The Kafka topic to write to. This topic should already exist in kafka. + +**producerProperties** Specifies additional kafka producer properties like acks, client.id as key and value pair. + +**Kerberos Principal** The kerberos principal used for the source when kerberos security is enabled for kafka. + +**Keytab Location** The keytab location for the kerberos principal when kerberos security is enabled for kafka. + +Example +------- +This example publishes alerts to already existing kafka topic alarm as json objects. +The kafka broker is running at localhost and port 9092. Additional kafka producer properties +are like acks and client.id are specified as well. + + + { + "name": "Kafka", + "type": "alertpublisher", + "properties": { + "brokers": "localhost:9092", + "topic": "alarm", + "producerProperties": "acks:2,client.id:myclient" + } + } diff --git a/kafka-plugins-0.9/docs/kafka-batch-source-plugins-config.png b/kafka-plugins-0.10/docs/kafka-batch-source-plugins-config.png similarity index 100% rename from kafka-plugins-0.9/docs/kafka-batch-source-plugins-config.png rename to kafka-plugins-0.10/docs/kafka-batch-source-plugins-config.png diff --git a/kafka-plugins-0.9/docs/kafka-sink-plugin-config.png b/kafka-plugins-0.10/docs/kafka-sink-plugin-config.png similarity index 100% rename from kafka-plugins-0.9/docs/kafka-sink-plugin-config.png rename to kafka-plugins-0.10/docs/kafka-sink-plugin-config.png diff --git a/kafka-plugins-0.10/docs/kafka-source-plugin-config.png b/kafka-plugins-0.10/docs/kafka-source-plugin-config.png new file mode 100644 index 0000000000000000000000000000000000000000..c9b36bd0bceb37f0695b3445be268634b7e92604 GIT binary patch literal 76013 zcmeEuRaBf=)MW!9Sbzj~NO0HS?(Xg`!QH742m~j%y99T451!y&IKkbis;P8`?zLw9 z5A!r@9%k`Wmn+|Q?m74Dz3-_CQ&yBhMJ7Q0>#x61Wu(Pb|N0A7<*&b9tRcRHw(QF~ z@Bj4|*H`l$=aY*`B%;&_KaeC!KYo*bqY@sz(Ct#8)#K6Q!EAV~ z3(&0olt+UM`|b;@tcuU6f)8_UXj+#2-tE$AFjhE9aM~$YL@|~9a?Cerm73j(OL8+X zpUOvC7#1V;;|mHh7^IA(k303UWz~Ow_@~`SSSBG0@wtQa`l}ZS;+ZgMOke*Q0qrE4 zcSZDPJoJ+lSq{a)s~>J*|IVR|QMmVSXAEw*z??XmKKpe=nZ6y)Xxm=TmG~pUle=Lw zJVjilqtUd>-3~=A>-K%FDG9Bx&iBuE#_*6;oCL5ONE;@IXrk%waD(1Sff4NjGOg{GDY6)%gJMRRnu z_XLK=k<7d4H(~>WT55HsiVQ=acpJa=r122a0PH~-th^p*s*P1<_l*YJvhzhY;|uy$ zwVbPAzt3?`0}9OqGKDuTB1 zF38;*6{9+JAeyhv4JAf}<|k@_$7Ba*xGNnA*p<37Rd^nL#Juldkf8C38`W(?sgg8f zKjyvnUXWJ=)}p}PeBI!bg~~RB_DA2Fe)p4sy=%)0zpfTz_Uco-Njc{6omJ2D`v`Mt z%gZePNOX^ouc{D{!S^v{=k$>66shqGU1CyulD#!L&eA= z`$$iyOwDnsdiWDt4DW0Yjc%?=>|!BsYtnPrJj1VnBvE0kRM<*^=GF>e?TAKGa1vKW0B=I`%Kl>6eanxs8sLIqe#?$S@R z4j4sEkX{<=4+GYij#gaRnG6tM$cm?=qh8|1>(;zMH6wQp-j<(A8#JN1IT;%H@?nOd zGb8(9vUs)2$8vy>OfKaDNr;iPM7N0{LTcPZGakXKRpXv$lI);6y(iazK9&1O$zzsA z6co<0sL3Ol!u>%6YpAZA+g3XR-2n0`_S}8 zs&*eAi<32gq|A((e=cIc03#HMuM_e;Te!i0TLa)vZg?oox`jyEC4e;Fx)=cb@_^1_ z+TZj9KJU}Fy=1c(G2$L}jO)qE?lK!$%E#Lcvnq{tSP5BNS2<~|!Hf}5CY-z|q}VAP zT4Bko$a;C@b>`sE(nNh7mCvx8m?gK|v~Rf@C!n3?ma7nSpk4;dqUN3LUWi|3-W!Ei zApZ=Moaj%~XnN1TN4uxb6=kn-ZLP|bX>)nAUBXu>%1e1 z*cQ^J$64SLWw~;GcZ@Z;(8o00ZOLB4U5lI1L-=9H@@0UOw60)MqD|tjhh8{+jr^Ay zgiU*?LwcL5+)1hWJ=ez$dF~PEU%1-umagrM^y;(~`T2_K<_x|fWVMItodyKruu7#g zk)1Di121mkBxpIje!=rLor=b^m2PEl#RRn^e}xpG8Cjedu;X#M;Dn+E#ZEt5?`qR5 z{S7w)FKL{Xm}gHmEokGL)9qa!&bHoXyC}sY0-7x|AMbZA4;O17H1t%8=F11(3}`hE zj_+JfVTIhF>4A(|y|=S>O3No^|*uWawmDt2UbCX-VG&arcr z#6O%*s&lL=z3Ho6|6^74=heZEijn&*y164k!!Ss^r`GZA_dxqG{)2AQ!S%(uFN=C_ zXE3B+dw^R~wtZpe`Bc--R_Kpu_p-6&$ls0iOS`_dX?nC8uFbz@VxR+`1Nx9PhPNI^P^J2ia z%Yy6co>h&{EQ=(%nW(gX9B##+^R<|FoT=?jdP|#cFaQa4Qqv;^CwxctBW&X0H&+db zUHQkP&Nf?>u0d?|u1$xmaP%=6iz3m1<&$2)1L_cF+ioF(-GJ5>9bOyq!gNUTp2PQj=|iOyQpWf!u3Y$?IzsE;8;QL(Pp@M{I6rai$`Y_A8az(tq&PmU-U*r=+?YQM zk+o%<7IU$~Dal)mq`>SgxS&y;^1)(F8obi;xGgvFsvulUcTncHeaVu-YlrQyxxeRN z3T*boKN?eO@c4~*rPtqKeGgbfr{e&Q{iqJ`Tu+$`tu(6K*z*+3IazONem5ewJLt@R zJyk_iy`H^lgW(wqheI#E?H_d8A(8oOqh7)E$JL>Ck%mm^+jjtPN^;NB>r#_tQ zBza>@o1r%QdtztaIg@RC6WYy}!j!qhE1<9=&qNNkpfoS)pxFr*8;ovwMIabOeopL7 zF=0H~j%26cYTW$Fw5l^uH;S}CYDIUK$*AW6o?M${HOD%r&AZ;-n0C~@VnX0{b^}C; zP+McdRf|g(HPfY?Vwo>#{VuF>Gg#Y-9gr?F6mA^#aTVY<913`0%EfQdauP^?KDe-d z@1~K&pg;EDUZ*YS$Y$xLM&iB6E6-*^zt4b!{7#Vk2n)o zukV&Yzm&;eRJjw9u0Bz@wikmJbo37m{oH_ptwI;X(?Z8`hxEX_ljt4ri-UtlqaC3p z&8whs-^~D@)6e?!oI@g-;Z@RK<64&a1bv-%M>mTUbcfZ5VT~q@1*&5mF~!c4XryH1 z)^(RF-`jaK6(^1NH`%ctS^+aOu@w8rhE4CRehA|Ke7|lgV-EgNqFl#>=0S{WbOS@a zgNX0Mko#Tmr0smIsr_y{nRJ?)t$L-!eUqlqrz+>D^K;~bg+OBK6DhELy(hjoO+*Rs z{ZwwK_Fct$g~E^3ea%W+hRgOK7<^S|KIf@2XiD zlXb(`1ND~HX49r2Oa`4_`)ZyCEx;&ZqD3sez*?4vrxJ%h48Ra;DOIt6jfrzVDE43+Fl~Vd+B; z{+b7Fv;zsGTo_;LT7NNvJ>9pf)E@t)yiU$2+bcO3;u&*S-2_1r#4id9sndQ8y)QMI;ZYI~q=?W~z;bGp9s&jt@> zBN{I6(ABE30tHm7R&MbxlNh!pwIEwvR>K-4l<&!HT_@i&`LSC&wm-0!;V>JoxOR9! z=4Hkf2&KjSWHiB{eB)<8u4{YX`H?i+Q?~Y1qVpS(cqY_y-I%kJ{A@O}HQ8OjCGsR% zMzseQHJ{ZxF`raR((C-$prCd96Cz=s|$B`b=dHX{$seC!eM7L&1UiIca z3H>ckPcqp8QeE3ZgDJdTJVWdmA9)h*{@oq@ihk9L5cadrrHNH-pQDL+r*hS(E9oq# z%^qa~z|Ks=Yr;k`6r^XsP>bO#m@EHwv~R1|Fm#T3BZFAzCP^TM6wi+%nOR%I4wL6& z(*nQFVE zD~GOgw(2D0SQv_|l`s_79&a(q8Efa00@`nrcb?yQAgp|Ay6QI-K9d`_vGYC2-EJi* z8gzQ_{bCeZMChMwSLo6|Flp^aPleFt1YeDwsvhZ z7*YrAF<(;Vo5=}%Y%7Z%ChD$E_Vk+yICi1MPRlvRC+g16yvlR{{JzLs&^jP~=@2tO zn>&dM=V3(0#&y}%?NAxHJ@7@JXT4-gVmk!v0?af>ksz3U94B;IFPyvd_uA;-`qkkF zTjjS-Z{y^j3^}X1q3m{D00M;%0|EZSR;!`9-N`5^tgcv;Rc#@exRBO$(dy}E*v z)Cj)Qu6Hhi;DvdTI|o@np%w~}X1r4CNzUHc?A7ptccS6AN3U6Nt^{w962e+`> zRd14%Ip^f|aNeu|23?;NI_iDl)b!Trt6!dx4+qC)l(Jk$qC?zIquE1f56gA&Ab6Oy zZ)LwctmuHzMAD8x-jdTds-Mx6JaZ#c>n4)htaJ*Wg#6}#Ki#nNJN+YcK313#wrZdc z#8-czjpy3@HXAXRnO1AQjeC`K^r#!6&CBopK6WrJJHRqv+}Eq+5;*@AgfxOV?zR&x zwfyIxLBtC~h|V&0-W&_MBvK&nJ0N>Eb(^&P?l_jv*X_>vmI{9Q)9GXrcgM?(pMgzc zlezf?QV$r>u&9d=YBRs%g+A9!j*hZl)A!`N{T>^|;Nxl%1It}SitbO1!kI9#{L?R-QN8nOCwKG#`nmcD^6B0yAmtWCv!y=BJNxm)jKKRxwnH=Y_yloz&Cj%%IN zdvrLkWe!19fYYVqUwY@nY*CnY$(GdK{JmY!cVQBM3bfyXY%QyuKtF{~p*i;ignD_C zn4=Q)3-e~dcC|MK6UR)wOhSH&4ySVb-mH_lo$PziVA5AVK_9i)!pZg&YIlUk=%Ng7 z_*T_ty>ir$tXb&M6ur;1#zYqbZ`${D`sUL4N{2$VnW@ZL@0AY9qW5>XpQQ@c?(VNf zrJ~7K|JwTrSCCw7(h&Oab{u4zT2B)w%ebTxuW(;kCZb*0^?A3-bBW)Ba*@{Hk=Nl{ z)lXsH&p!Ro^3|_!_e}fC)W?aGl5xj7wAX zc6ceFI=t>|hnQ1O_Nza7=dEy;EysqO6;L48YkhL59puq-*chp`BMSHL_;i}zhD>nv zv-mE-Vr2eQx0?yKwY`Hhle8tpSnXbji;m9ckE2FiQGbYz`pEfr(h-K;(gSt zkamUJ6~OyN;p_OXuZhB`gC+s{T#<$9`8suUM=9?={Ujt#pw(C+ZD1^*P{FAVUaTfc z?XwCB*ikO=QI8SyT5&hywA?{=iY{z~bL;H6~YEyE}xA-!w9q$e8w~!-7o=07T3ZlTR1; zdd?T2+SD3FX%@<2R~gD1MPF+MmL7C)+Tg8D(UH5o8=c4!_*%1^*IeYRGp9(>+k70$ zvSSvI>iV&H@I7mCl;u=;3(?JXu%pl1+3}f7fo4Zi86By~?vF{G#MHrZ=0`Sfff;N4 zsK^%bJGTWFdGdI7SDfnU_y+mZ<}``2o3E+$;R7fEW@Ds;VGPHn3m=UZUK1!y>za%a zPG_^~tsJHP%-)EP{}!MC*V_c1&?48`d6CQb92XNM!cOceng~aR!<8=13!aSTPf40? zrKWu>5mQGuZPplSSbLqbU}0iQn#4+5>4l8U{-#Qig+UGOd2lbn;Z&H-QdFfv_*S~p z2q1HHY*{HCj)4!8`!#?v^EA5H`m>9;1Z2R4;Ag$1jE4i)+j81T!Jjy%AtqPsTg{xD zu%U&>v;1YLtX3!jRc+Z5k{oYR+nn)@Yk~$)e_FbqG#9@g7D?^TfZ>rm$-uM2*Rc+Qs z;XK62kE-W#+woQ1oG_aLcOz-9O?h;;&{^UaXh46iwag54a@ayaZelERgHbkgFZ*4< zsInk(R}wC-{;}g;YRbq_v~%rKG8~qT7o|jdZt!en{|FDDcFG{RwJ`&~hCk3o>iJB7 z#YAKZyC~SA^~aoC@udu_>oPz44pZ(Arv1Qsb3VWQru-~{ywgyD4!gmr{}z1gaSvB- z`OyXej$WfIw8mmX{-&`tb7~NbY2;b1m&*Z}pH9dW%Kkhp$wI#suC8_SK$m!mU9bx4p$7{)2bNyvi`Ta9SzenWyUZDrGe52+AK}dDO^?zs)pFnTLg+C(|rGnN)_!o9jbL znr|QjBLx>Lvs>9KQ?;=B2(A@27V~?GL#iF*O{+IA8X{+3f$T9G{pl6GGi6}OfsizT zbFS$IU*;w8w#EAM*CDuL+v55!k~$f5kTjfu7rFSdtBsz{kH?;`ecjmGDnfM{+2WVb z1x7Qu43rjn#0sni%$sZ3l8^9|&e;Jv5ye=E;e~Aen?~R=jG{@3zVGRubug)?+fMC9 zR-JHHvj7>|?5R;w`81}h-Pv-OaP#AMQdA8k1Rpj%ifdi75r7xJdhwCWLxMIqzdN*P zw;k(IZ{ zOZl8vOI}=bK4uq~uMI~jSKjYsmV>UeURE56uKsFydIB#ShrD@X5i96{G0dz!uBK*u zLz9qB+8#X%g!BhnDk*dMtU1q9&bAK#%<4tpbsOj=a~y%DB*tEHQwK@hb{oFVL>VK+ z{%Xx-hBS@kGK8LXd?$8i*yKV1%fL!}!(`twjRAiN3UY*w&yKl2Nr*#gd>P8v_>7m} zRypNuv$*~F7RheP*%vM9P#z|@ysrXoZU`k$(T9wYZW&iBlF~cQ;ox3T~BV_)16&c zZDAAMbwHLQ5g07e&W)!r5+b`IDvZ$bZu5jyb*_u3Y2s?KPQgS{`^z6xd3Y)K>_7>B z@J7x@_{e@ZsYxHu(6O7Dt2reV3Ac!YOK&yggtrflNl9^)x}4%xq>8Ywe#Zvr)atx= zw7@{s`S2-<(X+w3X|)_rAOqCE_p?zF3x9d=eBeyX&3hkaB=w%A!-3y{Pg81bYk~tk z`pge$1<7=Bnd$B!E-*9J@7gwOY9Zdgo0rt%pJ%|Jp@8L#kTAm5kRH@s{ntzWf=>{^CMzm^ez}}8#D!OVH2fVJ zN|e7f)Zg|MhC#BXD*Z^N5%oOTXE|XR^$h~Y>whn;VKJb1S2(s*^4ZqCl*W82o=tX( zE_r>tRDWiIS}>NVe|X`v#2^04J#JryZ+Zg@w?^! zsXo-8I%aBGz(`-%s1)n3AssvZBdT|mIQD$2a*wSNO5WqcibeJ*-M2!FgDw`-ACB*$ zIx38EsJUhdCXSa8vwhF$PF`1BI0(=LS%(? z-O};v$F7ClYJ1F&JjyU8-Qd@goVzjipFE{bxo!FeLpEMzcO<0{Xwa(V&|b=<&mjHr zlHAucu}vDx%Xc#DRvL`e?gx{&eSu zQc!B=+c!!-Kh(F~zCAc0oWLC=Y8Jz}bbxUCtP+U?k0ZY7wB_px;7Uk6#6)7$iJ3qb z5T0h!oO^XR${|O@+P25$nH=R~f0C~ks=N1uWLDQkRzN90(nVj!h)Yg-IzieN|MV!h z6==2>Z=JNW#1!Lf)q;IR(%cI;`B4c+0s?c_2(r(QLXT!9Rku$Fw&(R!8j~-vw{?@+*>e{FS%u)J#3mFn}M0IHBIZbO&YV&lG zGNY~r`%Zqqan$W@*0Is|bT!M++x>aF*N{-D8G<<7PlzK|h@qLJlr_Ey!Z&9)9$N0h z!I=6+Y(HXW^+Xz>Q%{a`+xVGbtvNyLmx9{&-;8ffQ&`n2bYC6@7BXZuW?{wQo%a$x)zCzh@irE2Zt(cs zUOY0m71w>}Q3a=QMhDBi}{W>@(Jv?U_z#b==tiyy=pV&lRdvTJ@3MHKFrXj>YK7qcIqLPrC=O`CJ5) z&emM?9E#~^)bkiD&6$Ai0^0d~hLDC1E>+5E2hBNC-!Hk!u_M$;Uhj7BNGamQR51$t zAz{f8!hvJr7G?{gmm0|#l4$}N$Ct^*+)I}eTTPGg<>8^E8<+caJS!|tF=|7G0?joX z3ECG3M%-^t4k|ewoe3~Yfx=}6=da+2aI^({X|ooVy{|i?h51(G+Rj4(NE6Y{4E}I! ztI^|!qr|XsAK#ODWm{E6y`{{3S;N;ZUT|WiqDy1A%!!ud7ZOdEoEpV=FZ_xZg$xds{Yg=-i>VsB> zglU*EXN-^_iILtdzxP1R!gF$NPJq9#5U^Un4$YLt)}P zR625wAGr=2X+29(YJGOgAf+L!U>}@U`=j40zN)vi@@S0Uh8~xITUSoHv68A9j`ZfE z#`?gng`)@uw;=;}RztM})aBM;-IiV|@5V3L`%L3L%}#$p?`)X_khgg5c^Ue-a$pV~ zuP0$~S$>HKM!j6zsaHEplMGf5@7+PZ^PXD&B{LGc-n-e6R%S-VxVWPvt;SVlA*gtC zO{g}n`WbQV5wqgS%zeEY!Bc+G4kvFUX~2eS(Y3ZQI&miCpVYcOg+mj&!*7TB%clYE zHhl8l{6ZHa=E(Bl){%YQq)?^fqJJQkwl>QYy9gS zp05pR;P6szPDuAhd|Mr^9r{5kzAyd#hAnU36tGxLkno4%UT(>z3-yesh5EjaZxkJF zRJ!QxYf|R7AjNY~&0KfEU*{RAOvs>LVNEPqKWDocbv|+}s<|05; z#+CK19Pz}8>le~E6+7=RuI>kTn#EZ$nxJp+5P!-U!HS`euqnXs**SItJdwyA8;q2a z;~d>%3>?*z>UQAe+P=J4o>QOVjNG`N7c}l3Nyu3m!OZyQ-c}ekIYqcZnoJ!)$VO2zy{4kUDMEF`zh zD>i?`ZT1`reMNYKghEcI$~@Rd0CX|*lF&`$x&sZ(dtXR>L4pAmB&>RJ`FMaDU-CN{ zU}Y4Vklw zCky=kHSbL}semzjI69zI%p zQw#kA7IEu@d#F|RYMX4z_925q41A`j^%$BlCshJFkQ7jhou>Pghkx1-s zNw=>!rFNkR@EFtM~;-RDfc|x-m_daD%!vLAvJLm0zhn^+@P^TWt2YkI+Z3-^N{}y4+g7rI}1d_q~ zRV{x8;k<&bDyX9_|8I0{Ln9?oPwb!jsoyk@Od9GZBBCV=CGZ%<<5cG}5^jH}os$mI zuW9e(S*I+>L;*F!#>b6rFpyOGJ{R#M0ne0Bof{JPP2wNt>h{K=xspk+IWg|F6{;SL zXQ#EISNVK++MMX)Y(h;O9f``0cRt`)&~zlyG=Zx$wZ6!cIcbzF614{w3th?u-=Txf zDM3jyyFiG&j0hm`qwBF(yQ2f;H1bWTAXWapJ(0a~;5+KX=|7C4d*^LzJdZvJi09Z4 zZ$A`|&!4AkI$W*9j@DFeN&EJ2@~XX!nXjF?%Au!@^g=__n_!;K`9c(TJ@UHyWx!f% znjreMm9OUG8tQZ^s|gqW9$D5quA6u5xkE;EUrCgtPxTUoGvTZ>{)tN5?2ndtwMCW= zzrfs{g9tBwt$7$tZw{wY6p877N3*;p>5koh{I+z%)GD+bo(a6(ovwG{|N6Ml`iQ8d zDL1B9WTlsfiOcp4=tE~jIPtBD^ZYIrmxbD|M~fWXs{_)_@&~{DfU`P!QI)r zr(+Sq`}CNq;_7|9eDf-RgM_{JMHMlyJ;CY}IRI?O63<0_f9QWzqL?73^IZv~sgf7? z*{09R%yI6slpYWM%|w+QvtfFuq0c9(#J=v#$=-P-xp$S-K3+pWg}Y@j>DyI24PGVb zr9Z-keiH2G0ICtL(%hoq-9&n6?XJY&_w@G_w~h-PjI?jQj=JDRUmqYN^CEpIIPXg!T?zuw~aCn`i3wX(K8i}+4rY0@8A_ZQ09l{V@S8hdB zp0d{1A*=;7zcCZ?k4lz2tsavu;sXET(Lb&+Vz|!drh~@+%P?Go7 zaN0(>f*Asqe+(KG`B&XG7uu`ntEk;bft%Ki449)AGV-WM z`BFjJ=@cwIV`$n$!@xFk|tMz$=ex~R){2M zrCuJQ!s(@9PN^zs7Zot#M-O>as#j258hy5KrtCUpc;zDbuWlrG9JgxM3pY4q-Dmu`0Kb z>9Gl!z9<_)0-PbVR>9JJ+*s`S5^88Bz2oJxHX9y}y@Hp16db!B(+~+K&j!*k&?+|1 zktHAQ!ou^23rIpPC)sr{{Bu?YioVU+Y2ON2RD81M+C*FJ-}ugbz$-w+LD z6f^t`|>}jX3Vb0}|j^yl+y57xqg)?D~_@vm}`o zud?-npT3m;t@w_&wisx5WUfEF{b{kn&3{894e|xhSfm9>ruqD+?Hu%hn#nA3@a(}d z*AJaf3vh+zSq}(8;{vY>P3`aBT5IygBW1Od!dwl06wv_BR$&?#{Spfu?)0F)0D@yw z>qW7GP7KC^I!HELyCvy6p9)oS!t^1#^(cwiU@ij$Ncsk4&$qcSEqc(Ho|0g9n{e0< zM1K4(HlOL^7SqqMD{oVOa+{GOn_ zvZ2q@aA-rcMFyW};kYxIxEkQ@=qCP`XC!XIDbcW<5#&N`|12sGQ z8D(~|H7?fg%EmxZ>UH1Mn~nh7-O#viz5EZjGC(;f;1Cu4gi#V<&i@Y(=)#2Jjc2ex zbi04w8=2c_gcn$AFo8Yk6f?Th$|CaZAK-|n3&k5dayfogz|Z%&6s0UNV2K$| zAfUe&oPGX|0PXWX6%!(e);!FJ6xjc%-1kr@xr6(&^I0dxKoo@v#jmC5o=Yq~zPNxQ z@Db)b|L6CocpVG{q-3$`;{Tx&DB4Uk5`TO)h%_1EZ*)Rd^5x%ozcc4h&~<&ed4{a9 z!mpv2^^>sbzqJk&vCyx#sRz6{{?GPg&>zjBZ#=+YMv*3T#$2I&R#o`LTF8wSeo{;wIP zdPgY7A4wlEurCxnJ|Q7^XshM*c$vcAUdD|IQ#LE>=Eflrfg=!A#?+Mur8ixy9os(J zaye7e>|Y|odpM%xm;&G#pu3z?jf58>Q>S;*w1WM7`Ekiyv_Y7asFpPsME(P z)BShh;_^G*DsulR+4@aNUKjk1x7_~|HjwgPyfH$TZVGAqFWzFIPxmQUmG$2^j@9c< z@`aYXHK)Kw6Y)sPUi=h=X!SAgvZ~r8XMddiK*?5$$jUk}DYwTj1QFQ{UQ9uVn`{=y z7sq;goNm@}S+sS4#|0dJ!e&xc9p9RO_a$TN&C&7aBTnCEin&&i*q_Y>Z-SH(I6O$s6`Ubws_0_vKXP~Lg$F99c zA+JhO=)QmQ+pd~~60q2qPnA_4r`gfTCq%3^-Z0Y~FU-skept6)kZ7NP~nBOI*|R!=5|m|YNZa&Yg2&enqZ z%}ZSk)E0_OjaUqta#$6v_ir~tEVV}y2glq@>2*(UHt3D!5k<`AQ;al*4fZ+?ifazn8F|V;P}XZ?6?)JpHk6tIJ$`ciinzpCv5%e_A^Pm*TMELeh(lW#LnJ zr#)*t*P;DcgV;iMvwNEl1W?Y~<$|w=h7cg9v)n|u`f?)gDf2dqBkjI!v z{XMD!(GjoNv0FNZy5P0~mnG1p*6YaHlJdnS>$tr<0ngWK2kvSID)P{Pp(Y@z$yhn+ zogrEUC%s`A(usmRe$&T<@`;mFzOReva774O3R}g2z_almM$jvyty69{L+Euc0&LM} z(EjH4 zQ4G&ixNg1SfG@HSwLp+xb3if}6pnXn_iJKP9==ptFNHB&%SJhE_;l~9V0p?v{t~&m z*)mTNb9yZ8BJ2yxqRTK(de;SQOe51))o`6BybyphWjHBzM)bL%a;j$J?8V9VI?cMm2Y~?P0AlBNm zs!H0LRC>wkAcyguz^&*Ih4`DXTtBuZ`srO862KR6zw?n_ZV%~^|AhO?)sf}TJa>Jq zT(lF^(S-oA;4)P!cE4TTQlsg>q&Lg={!?zdGvz%sE!$m*BjwU+Hx5>g&A59- z=aFf3*-Q2gd48L!R0?_PM2!fW1m#oW+3%i9_ya^GTj^sB#ZbmQ(k1-WaZ z)m#8pf3bG(l16&LJ~mr zWdT+t4cw4##G)77!>@?^vC7I|V+0LCN;0!|L4EFC_YJ-ax1eHv-$%{PtaIc}bPg}@W?+@LFex3lv_Q%_r zt^$!7?89oK>C}mz`05TWaPxEF{e~0U%X>a&+ZoD+L^GjfBigh}dAM+EM-mmA_(W@2 zyD=X?jhM{S_^In-!J8HzHX2AQry@HR`V<(l^4cGLNlBU)jfG`tVgMjC`(?vP5S(F= z^>-tAgTyd_a&F(yj**H}(181---*&+nWM#Q?K8*xdvCtCzk5zHhypHi--1NX?gOSEn^fJs2R`YlJH`fY!K^!ID&kE!%J2?6!+M z-mV7fl%im$zqRM+g+Xv#byww^THoRfblVAy_AW+2w-la%_tVkDaDS;Zm5&odn0+4! z(I>P>15ICQ#tx^ zz#Emic|CCP-*nyHo!mAw(`gDgt4m@eebeN>SP^Qzy+`=<@Ics}mDO_^WVOW+=iI#H zV!^R4e{Ez$#d&7V-k1Ny)k-0dAQjyIO+9^?2f(+go*Xw!U)UA;Hu)aX}9^@2t3B>jKF)XNqMG7^qM2jmt7<#k)S|?l5#K~)# zfIo>~#nV$NO{3=PNtXcY?Fy#3mq*=Va$8Y_6_|uslMjn_zdMaUwvTYxWjabLw#5|% zq1_t-XOv=Cs)2+h3X_cD8#NN#)DmK<1dPb{aPQUS>~X_BLIu*681fr($K{=VX;q%A z^(i#&IccvDn#K!pYy81+YQO|`>Ab)_$c6p}$u|3|po?<}R+>JuzLO#RnlD3!dLKQP z?Lwp$y{^aSfBEP!sy6AeqT=F%xsEeJ7?56~gHFjG0Ly`~?~Q)$O&XNrwI+-$t$ICP zKU(9t_Zuz5K^l4@2jiMk2W*?ZR$Ia?7sc#)mIeKIIOqO;e>}{f8fZJUdUa=vEK{k@ ziTW5gfQFK2D&|yJXncByk&Oz7@VS9sxtW7~N2hf*X)U2YpX!@+D0(yjO!4ZDYf|+1 zal1r)|L_Bwf7OZ%xRN3{HfEq7*`zzf+1x8srrsDp44KSxCw#rJ*qu|=wR^})e}-4> z3Fc=*ecBtZF4foJ&x9B2+&z5DF3sBQpi4Xb#JSnW#kP4^g@1PKctp1tTSmXF(CH$n zp$9!0TCk)^?hz`Zmd}q?LT{y!PsWkFFmB?m`hLOMd-#5e2sKbG zK%e=XTh{TEh&XGSqpNb}zt!&#y?w6GV6<1}9l7;aVezX-eynPCdVR0O!lX};tUTK3 zo5!HD7^DJ13jy*)RrWocDTKy&HB3#mUvI;;qa+|kb;qC6=O+WjTl_$E=9Z$zq5Hnm z%vXMO%uRrm&#{I^Ka0k-pug9mO7+U`lB@N}W3Q%)nMF+Odt{p2WbqzJ*>U);C;P9| z))X^8HRZrilm7E6=2I)R7_gf)?%l6b|H<2pA~vVLFsmFn!*$y)WnFOqKSHxlNyl=0 zlP`>`r0$4}On-(m18joteJgpxwzAN9r}ir#KmEw?EMKh(+W5im47vRoZcdF0R)GVPt_51X4f@5XQNi=##=52j*6W=^9B)fwT z#xYO+i5r|*AR~vqB-(QNi1DI_uu5XU6~^LgAIck*c?14AgH`BX%(eOTDXLfbJHwA( z5I>cNhktq3%Jj}f_J6VWmQis%-M?pWf+x5`a1SB4CP5OM;Lun?AXwur4Iw~qg1ZEF zclQu95NM!rcXz#qBs|aWK67W*U9;w{JG18hrh%raPSvhDeQJN}vv)Ju5HMyM4;$VL zX2>OU5j=hkt2keFe##Z$IJ0=Ta%OMz7@hG+GOh7B0=~W38~DdL_+AYP>hG^d3g^)) z-uOa&Cu$aSVnxv=V>}FodG`!a%o_%%xCq%@(2X#wf0kiE8mFGhZ3;e&vP|H$%(7h_ zh~hP6GlV;Bb!+f{(}cYk?hRgeD-AJQD#3S}su3FVt4AnUevC4p2zihf|3inJH*5M4 z4n`i+M+#-Xy|MPwKmVk__)*S2;5;@)#mzhVz;NOSeh!>fJ(;r|i zjU&-3%k%P;3)`8&lb*Cn#aUe%)0eY9BbwlXk05%9Kf3&c+#d~&W9o?)9L>^$58Uq@ z6+KF5toWy}5F<~K$~=!>v3CjbZ5=y1poA#Cnx`fm@%|(9cx@nc9eu)LLJoCDs^;L` zr+8l4l*?P@-Fm=@%z%=zklU0;;wny^*Y#1g^xol>qOp?JF1gWcR^Q>ooUK%nQr62j z9raoMN}%fgyHzW3cuC2YP7W!RSIbGCHf7`kwyMo&bo7>I%-A%jNNyc{(Ue-tGwgr6 z$2quOXNM?K71MJU@&Y^^o64`YBr-P9OEvKWpTH#z1b*vLRL>(UfoJmDwZyKvTTAz|z#f=J^QclY=bWh-qxq!<1`2T^Dt-4P$xDooF)pdc(^T zKeYgwd`wahoPh84*t<=E7FPKw6!;%;Yhy_3)d-MkBz z^~T3W!jj(|aKYRFqy`lN#V>hVX_brwB{Lnzk`$e|8AW$G#urXF9Sw^2xJC1S#amh& zVxT+U`SJ+vWrKIJV${rC^q}_pvu3Ji@Mub3tyS3WRl1H?&zCAeay7mzeUd}685d+Aav=pmzc=lEx{n){xKO9Fj=7Lq4%e_0}2pwZHY zP^){HG)5y))l-!(rV05%vj=FdC#BM1{C0!(M1pPTcqI(z7h;^%aB|5ilXa1_6wA;?JltB$9h81)a1G}n7m z(&G36b|BJVX8xCodIhxV@o@Uz;_Oqv{%tL9mHt&{j!^N-fy^4b*R@FYuZQ16T-8OH}Tuakz(PNO<3_9@qU_;57hNhL2a~w|~`X;Yh_< z{x6gKFEB~RIiVe(-QbNk0dW!&6E)L207gIg8(#9lK_tUMAtolS08a~i9>%Qx@=~Qe z3FgA}j71C|4{s=43VUI3aZ|2&^!IuwWmW`%WYyFN#x5OMQJ8>F%u#hQykM9B{AiJm zAW{xvJg)uSP1ur#SRm?DcxthEEf{Tyrc^3)sr1ZY6E8VLOrpmyhxBgJU*eU_RKLG| zjNy}mlfwEBp%fliD)8JDcM>r0(e=CJkk2yp%iMpdpp_gYT$D|daDvz`d-3>6@he{E zU3;jEetW5+$~;L>)sh?<)2iuhLU58i`Qp#2-IMWphdot;stelAgrJkU$G2fm>}`WN zphxBVQ;JsOkXk(pu+o=2xozZeHG?tcGE9aL(LpLpNoG1{lZuVmBDfHhSHB5wugS+H z@W1x>!w9fl>0#eLs{B0IfhTR&!H5~~;iuW)dCerQ+9wmt>Rc|edP|`-C=nW*oq!-~ z%R+g*?HpYbGT#LW`R0zVo*B2azm0xtH0Ia?vb7~$X`Ekjgot<#L0lsCRsj$swJ@zN&N>XIwf?36y)%s9uH||y_{pzW~zChd1t7_ z{VSv%+EtBaPM3bzJ@#Hb@-w8ixVL9^goIOkWj`K#@FyZQ+P{Q~WcK(Ae{|MI2%PQ9 zV3cLHd~4!qV-D)kPVwkeONc;}ZAfF_)C1V-pi_x-qx|!sfPyVMfl8y)snk>#{2c)bMaWd7olAJfuwjYgb%N;Ic3M|4~ z$7<0vUO7t2H$|s)^L9Rsc)hQl&>TglAh%j*=`g%g6IgR~VPFStKygg>a3TzOvfrf6 zG=)&HXRmkr_mTIR?Wv=1(5A@{BUPI0SxbfljrY2qYwxCvP0Kh1vMscAu4*~K%>ISc zH&ZXz;`fDpGkPm^qbJ5U%f%ny&`t(Z`p#BG4CiXJ7n8kF0Y_;UXmSf&Jpq8+WmR{Ohjwxt+TX3`2rxv4b}!LZlw{ z^}822MiK6Rz!K`=5zzyq}sP13B{50Zj zUsXH*LNXajaM-|ZE^vdK1v$et#;`jJ`AkOuUhedMICZh2$DdA~$k{B+;I0jF`P^4s zee?Iona{8ag{jWkfntxQp zfN2YVN=wch#}$uW%cK}DOk*DmyP8=$uDzYX9ns#>J|P!ohPlWNsw_Zaf3(?;rOEP| zC7xYfZ*)D?q4E%Zkg(6@SBw_(;4w`saJMqs-2xshsRT2jgm~ z&VZxJe?Uj&sPhPxERyX6G;wDaSd$w|xtLf|mQpCMIvly0(`rs>cP=makTW7FLMx&W z9luK~>^u@WJ>r^zR(L{!oxRvr=yk8BT1|LoI@x%WWK3s_WVI>y{T@CKanNCu*BmH6 zbw_l)-%*`|g-($gEfwSI3?ypXA_KIp~PcJ*; z^PR3MTpLOyC;PJvCW=AcTqzB~43eU~k#WXc&TcG4$u2D3*`IiSdi6J@Fx)nVD0|Gd zH*24}JL_0L`ZuA)Mt!)BB})%W>RoM4gErU29+IMAJa#d%8U2#)eo2?~H2D$2!n{<; zB=1_=ZVE-$nyBpjr(4hd5ie0uUN3wViB>JGXR;#&e6<>5Yb*T;>r=J1Z1&V$3HvqH&MoV9o|T}57UGRToSFlxxSq`A zz-hd0{x8_e-xc)}G(;Q2y4zacPjI^vK#$6lY;YFd{unsFe&;e3xm2^4L8p3Nuhh&) z(dMg~uf}OL*McLR_Xi6Qj}Vh) zWxA3M`|#JC$^GIc{Cb3J(KAGL^=7F~ngPQf#o2)z*Gl~XDA=Vc5`qKIaT9jCbI3Z7 zGH&$77O2?!4aikS0`^N|gj&5| zuq`Z?1pLC=aKPEpqn$ie^3Rd%@!Pq`D*fwncIN$9-`&Zak0f8%SwpiX6!q@CnVh#R zbu~hhU^JzlJTx1j_uKB{m%y%GS2p5bjIdY*;Ezbaf;#jez+S4eGgI^_iFZ3R>~0tS zO9pP!9p-WyvYhZzg>}#)GDw()#=D?^6)Bf74%_|}FpQRY#DWJO8ZEVVXG`5ist?~e zpQIaZ6>3)-U7lI<>#!i)iGzFsc3qz&D%k(M>$-C$ z;QmQqlmK-~zKs3LyAVSKdS%~>{>w9kD%P9YAjPeCNEhyl1Y8TSU?yLeB%uW%S-35CK9CY8Y*LLs@^+{d9q!G!< z9(MSnhmH8(HqH@wKJRG5<&~6tS&DovJMt0r)iBZB3V;3)uKg(!E9--kZo9DI^#T{m7l70y!8IdlioxQkc}y#oN%a9K=a$ux-2Pha!66hNr++0L*TDl1Tj&U(U{ZC6 zSf%BxE*!H%7KkSJG2CV-!Dgj#BeL0r5xG~u5?hVPCaMap<<}yPaCc>$NrAF&GHJ2KJ6?IfnN7V4Jeb+lyL9lWijbB7wV)}suZn32ZN-_>(9FeHI1rAXZNICrWA7tY zs(v926_;?^P&IIWG=4Gx9}Q+M;+R$yrNb$QXL=Qyy-d?gv$*pH$C#l=Q^2=fe&aaZ z4K`9F#PN;?%Wpov*My@nRHHMGk=09PMQk_A3YuF&hjJwv>@tL;Z8QIqdWafdvI$qM z@)1V?jS}_Oy7PU1sklb;g(R#6xb*SX!re8ugvVnE$lvZ84Xn1VKb&%y>lI{y?hK(8A#sN9DBDI1dk-#NcQ-RBr3EC+K;|cxcH8RDtDR?vqeB9bpTVe1v>wTj@*u zj!($eAu#i9Qu*U3>qNEX+=NT=l2>Gq5$VC$AZurJRR1ouKWyLmOk)tPNJbIe1JS z;$JEEY+S$*&TMVqdk^&4(OFjP0OA#jXoI%M>S*`uTzSeO-};nwP$J)#Uk~uOTK%V| zLpUKsyJqM`8LLb&Hn*qswTBIcJbf--V^)d{t0%k+7K@h`^eW#i2Iaa<=Liz#8y~({ z;KXB-%PN_Gr*O%@tqzrclKBX5Bk(Ce8l~5ht#Ix?w0gFryu03S8BQSBeqV zJ*!1oq@VpBHp4`Abt0S$ad>EU%KdIHhZDrJp)vcl&eP!6!nX*+K?k*>tFI%I$7zPw z)rLmhw(oA9&72)c)g{@$8Pk3^mu>pt>s4}5lwYlwx1QU`p%1PQ2sJhDh<&<`TA=u&;9~z zml8m4&V)SkBqDI;xyk4+{chdc9$~&Bq=v?(Z{`*p>#tQ}7qYUCRMp--Sn}vDd{4RX zwao*>c_UQS%oSyTOQ0mq30vu3EzO?I+Fph}t(zv}n{ZOVUG4)P;9YB_CQ3pVL}Z*0VVj$$)@IEx)iPN*&>=3Pfl$*C- z6X3vkc^&HP^ux8H(FOjMgL^AN#7z44OY&+3kGN4`j^(j8-A^qxu7&FCW;wbP6G&Z> ze|gtTi-sYY-#DODi6EFv`9fcq`n>?zycs#zcI8#MGwoIJ620E#>g+xhVM;`Z;PNZo zI`R^_k+#KUZ~T<9s_9&z^SSmTktQ&Crj_*2X0HmK zqn)9k=eB~@t%Gi^WUUE8bov5vXxL;&Nm$!cRk(5qzvlBRuQXGUfS|Qu0M2-*%UHV8 z3#OyP@4d08D|Y@PBJ2s?uEWhadZ%iJ-{}b8S!Ujn56c#nwFFnSN(mOw%PNeRz?yFk z8PU3~uR*4r#Sr34j0>Z)S4z*4$SHJZ=i4Af`G&h9TR!BkS(Kj+YY+9E^rHU)Gzf~M zeLjTYh9>mBhzJ?jxah7ky~sWdLkdsO;e`X7v;rFG`HxQvRez&ORP7 zGMfPH#YTXPnLir}MxE7Z;wACLJer8>=9{doz{Q|9w+lB)_I5rg_N}79>rh^wbD09V{Sj>fgsQMZXOl+fnj zh*XJND1B57@Oo8$EuxCor5+q;Ga8%ArRAGD8Cuho zAU3^g=a!#E=$%Zx&JHt10WuDyM?IQ+Xf9vBetZla>kYrmDrc(pY}9M$pCzTU8Vw6s z_LkZD8Q2zd8e7YU?zlsrm1HYSC{$T{wcp~iL?3LGK|7DFNbl^%dq^7Ks<6=19Qs~N zdVStFJ>c}QV-qIYX~cH3XZ|AW5{Ae@Q znS|vJ7@AWDRh;t85$VD$UljGLtaq{U@d-24jE#qCqQkKg~1X~%ou8kq`Y6V&X9IsaR^YkJ5#j>l7*@qbJmLo8k9dWSC0#5B+yYpKR}N(K_k7AJukk< zFbS>_f&y7Wui(8iDrmk8du?m=(qSbU+$_Dca&+7DI#$!9`k2lV8oqG0(M_ZxCIs-Bhw(Uhm#=_uxp}VeB2pk7-v)v;N>HSOC_52IR40kxgfc6dkIn8qDw3bWU zD`y(csxVioddb7K1B<b=!%jD^fW;s(*#xFtG|GMl(q}gWBYn-;N$2Q%H8DVZr=n zJLfLKBbaZy6wkQTW@v_wRf}}Yjsd+SPoyvWf)UVRsE`pC1}VvYDj`E1hf7C62`--t zCF`1KwtOA(Ujev6{wp_b@;c9HaCT~HSVxCs+HOWT@N5%D{DTs#C%7*u7U=b|ctD7a zPFiWe0|M|LkQnouT3|j5Q65W$X9IG&HylmM_!>uHy(%`|Bqt^f%n1yCq3Y=|Ict2LL`1>)i6#BE|U5zk>Ad!7&rXbW!3Qw zc`vU|jz;n|BeCmZS1=Phvtn34__^Y5J&;Tua0maEs;<$XXy^jY_SVnGn~-T&{}$kbian@|nc0g3_ur@jiP+@I1czS~|McW~BtL`hIGsD0xNB$}Bz#R~P@U z+<-1{EMk}`2qya_#$~}llK3AS|2xqquT>ZKL%g=8)9JO)MGT7c?X@$SCC8Q5LV$CQ z3ljtv{nml^^yV<4J*H5_J7M?6nFD48zHIZKIf15%edyT@<^<+1+SJ0?{~-#yuny_<${tkT(j$ zO*{0oW!dwIpAPFpOlMJFixzf$AlhGoBM+~}5bFqx=9q-0ZPUWa^1;0b+~Wg4RDLQ% z=kihY(8(@j(r-a+?hW-v%>_&!rMt)a-!SU7H)mZXm3MIn7y|jdR+5B{tVOOBcEtIb zH4NYO4HmoicX=bzlh)41YsZ!Lh;C;duCp9oK>MaZ?&D9l<>RZ~(6mm{9kNNv*LWY( zt4!*4j?k1`xA@69JUb0~b9Fpzk;rZEq*{*Xdzm`rV%yJh_ZL5UwQf9a>kCY{JSi-= zdShHGkB+HI@?Np8{t<43GPbE%hW_%5aC3{YXFRZ_Ca$?MXRE72U0p35X0QmdcsO}`$*mJL#8 zOMhdIA48!qwZq|bl^psMlW=fGD1x=xt8&^gKTA*T^5KhBzf%quU#1YJo!AT3bgAUd zIho?c?QV8YVaxrCJbTuX#W_|4wRIb z;A*t$Ee_rJWS<^99_^M_p#_Rzd9lAYoWMRM7Ty-xUXzR*4A0INzvtESDfsENnWeME z8!RGNyZ%nyfm~(Nkp8Pky(KIqiVYf~0JkwLL%C|FIf4 zvHi?#6M5Zk4K`LiRcMJl$?b4f|MuKt72tE@}$SIWa`Ium&YI)zDlW1m4s_ZXH+dX+CxcY z26|L9y*`#5WqDdrlw^|3?_H|E`fW2dp~mAi`{|T$Tp8n1*W z3fy=C5xSGa%QM6$lkEtKGNRlfZ82g%!;yTpRI7>5h@0Ekw)deysc%|Es#Y`RV?jUgwn=wU; zl0$2>Y6?Rprg11~(7fD2UupKxxY0WcURl@5>%29k&TyI@bX}?S_11%arV9Vll73m# zwawQ9O;z7!w!tFFy&mSof{ z<(8w-eQiMTgy}U~sJP%Up+g3b+ZYCoBRO`qy2?XM6t@>G)nL5(af^JRT&Do0XBhCI zpH#V(coYQsm8B_7CRwiuC1fp4@@$-$DV|{}yuLozHR$4PU)E^L8#XxSA+2v-@;XBt zu!xb)Y(hSQ&D^{t5IL8UbetaCLZMu}Acgc^6*~LbA6Do81ut8^q8^!H>4A$IR(FwG z^J&Zz6oGceL$z+lzF>FMDr0Y|FtJwS*2^@L)&NW(`-C*miQ>MeF*!`XI}v7;D3AsS*Llu1b4dS{yNyxRT3{ z*h#D=)IOVqeA;;1NKYx`DoPo7oM{|t>wGc4xu!4X2f2=r8N5XyOcZQCT`NhTIJ!pL zpeX00lWQqCUC7!mtZ~^W#7IV)5l7*<^hmxs70?xN=gYNh?jJv#X&-%(B>6!dt}{kw z*NDDqsco!N=xA@(WY`Tk0$IgvaL^>${rzCF=e!)dx=l31tK1FOZf&qQfY5e#G~!JP zBw`n?eWr24qp=~`jHReLKZmwdG*lFV3#ngu!%nGwp`%#8twQ0x$WzDPa^#86bIp5e zb1`IAdpyg_xgW07#FtF^iyO2*M-X|@YdY27%#|eQ7$SbHr03VW+*Kj? zrTAi)&T4u7S%kF~f6lm^HAqu4G@$|Bdnr=jN80G|CeFf+_c;-tzfA7RyHn*{^_~ClcBl+SE!sW6Pjw-?DB)#w(4j6Hc6`PmMx{9xKlIna!Q189uX-6il{fJFj zry1+#7aF>&8r^9TKYKxO(W9_Gt`UI${a=t9WJSL55TTV7__ zgqoZc1q+0`n3|>_yBj#mrRD10moZiUzsGER5G}&@Ui`~!Nc|VH@n6iw|NlT1G0+}z zp-d_}A`qA&1wb6S#ntTb>Ir{TD__rkqjc-VuIDDhv_Xv|x=KDdzP#DTU~x6iAA=lJ zoJ{x${kpps#aMT~+81uPV(2Rmk0#6UQnCy?0gkccb0t1q;6UH)WjMSF-azp0@AXM# zhTFu}{SQ8BTEHipMp5Y*@y74Wc{oS~r`sCMh_9%bI~W50k(5IWkTAqB+5W-z0HW2) zf9Ao!xnm`A(mvdOHqhdH1Hg_i|IEqpKS0g@x4|G(>{b%JkG~jsB_=j5kqdctFZ!>5 zyUb^|90Y-Yp^QxKYag$b9YR{A>UqUO^@OD3K3&=U%igEu*c8^~l9FiqtJqy4HftOk z8YSO82JQ4|@6vlw+EM#{8`OVG^f1!mnk` zf3-+LpZY2*Dt;d;)Gl3Ne)`&EXQTT{jTcfm;R=KT^gG?=Bvio1sw%*%puJV|yco2B zDnTugoNtHZeSHElR z{co(J^;lCX^@($!o%`3VgpE9MU@|9xG#*k+`oBHzbnyb8=|X-qk?FkJe55l}Zl%#H ziJXnFJpOoPCsl47(Cp8|w*0)C*HCrfOrKyHU~PU1i~c+Bz7jJq^_jTo;J;W0EzUH2 zU5QZLs#N3eoGo4tylT`;#z`6G*=*(qrf~OOFA-?-I4Y_v(J`pcB8DVxe1%s0M~H!d z?^1s6JqvJf@uW8Ur3x*#YRvEF*{@hJw?T*JjoZ>7(r5l=BcHB(Qf#dDe|)KeHU>u2 z7n!^Sr}bFw!tj6AscJgGcT&_?KWeC%l({9~xp?kFSvC)w)-75$UaU=jy1nRJU};Oyyg@;!mBfr6-BPqGy0RFx*<(){`*5Aic34qI!_>DT(=ODFiBxPR;h2aSX1IrQk z@^J}JxgCaI(yvXa^D4r^8LYcGFuNtYk3_Y(d+uONxKH|WRyiMR-G;8B;(=Ou()QQW~J3j%~krar|!Y0HX z#$rE*s)prpNfD|i;x*!7&yQQ=8Z-!9Qu`I&?C%4qhZ(Oi=V%nol)r-pWQsup3V)U@ z&C~<2_>C32j;z-gC+W5<*k;X6F%k(cg;-P0DG3@b`7<*Y-%0|fvhC`p4KfGoWl+^8-#;&R4N9L4n!`HwYA;@Sc}^&S<>YE+So%H5enm3ilwIG zpGwC+17s|;x__1y-eE0zFrN0KLTy-l#^$kyN{dvjrjKnfNNGP6R-OcrpD2) zVEN@Npi6e_*jKT5iY<&&-SslJGW#vIH6=3ebQ1|q8?)GlbG|SQ+d+Rgx_rI9IR39J zSvs9pqwl|zXAk9RnhfT{blv6rAheKmu0+W0TY(h5pvz+O&XCiT6@}*8PYD!pt!s-5 z6MwWE!JEBMc`~O(?B(83O|{W)JzWo=YgaP;Pqm-rDD0N4zNVBO6YU;E>eQ#u?{rPs zd+#7(z0bA|nL#2O>FD3Mb@Q!v77{JGdN{(^V#e$A{zg4Wq`5XUb?%amb#jF&Bs8x2 z$=8TN6;%h=@$_Sn8}5d`r^cECck&-S zo#^VmomLnwjj@^cCvJMnhNAZ}ZTZMF06XcxlB}A?bo>h@ z(fyyin|%8HuTokQxk}h#1<$UMLNyYU#a%XEBnnm#ZmtZzOP}`L|GDWF{WN}La&*7e z)0DNMw#;+=MBk;M1`6(|G(h)ZfexMVuD(4uqW%W^+^XZfZ;q_D<1JjhdqhMbgw7^6(hdpaL_9ZI>^G_`R$0Tj<0n?w`ocCZNr z-LAE29$Qb`Qph#MNH*KN-!+ds5l@Ix40c=9hMWk-VrxD)S) zpXzdkeLgxrQab6i>M*9cD z%a{oc%0q&F*c6aBqy*db83hOiXSJ9iZ2A3Nrs!b3!d0-!5mDo&l+GqO=f;tyRv@?xgf$j{~DliDpS-vBm zUYlq{LWyqbX20~uBR>ms)&plx?;Ah!OgUT1cXZ~xu^F)4VlZ38ia!yKNE7BdpF8Yy zv)w`rktt~P;`Pic6xBK*LH53gLT0=T(@J`Zvs;IMwI!4kVv;cKRKl4Qx-RC>3a-ks zOLeU#TWM&F4#MS_`}P^L;j-tY$c_!2^={vyU(h0viwBx0`_*TtsO$=KTHtG~J2z5N z-Op?v_{dqlvE6BQ+!4g|_-6FLfW}gJmHfq&dWx>&=cqz+qVot51i=`3o(GQfe?c*+ zB%d4?dx1f0W0uLDty9sJd8XZ;CmRnjCXSJ?WnDs_1oXJ=kw@=>W&N5mODnWzDJtYL z&Fd4c4;i>_YO#s=cGT#xLl@I784f!=ApOk^3Qy&*t|}tJ6W!3tZ@L$-1DW2~hBeq@ ztzNI=pCmnx7qk@iRvn>O$j!XutGNES4^Tg{vj zSSQMoYfO=f*~wp1Nn)Mg+!oY0+Q86-9IDP}mLa?5-z3hkBv)mpbcaR2=6+1|?Y!iu zy1HJ{Ka-!XOPcsliQ>w~Qm*K@*i7p)9VGvrrSeamc z%Y^HgP|gPtb9BKV`Y};B1$>>Q8iv9{qlE|T!nK0rmx(V36N1A{XA(C*Gis6ANBhR> zY4C4rK(_f4PBYA&MOJLd%@(<@#gj?dK;N$$Hd4rqUtjZ@vzzu$tYWVoogEk0PkfAf zSH^YtAo#7E`6l%1kSz{z?DGvfR=LEVXlWj!Rbpcfyo||Be+?^WeIR6k|^+6X48Om-=?~a;A(%{J(>q8jqB?`e}bg zzuqlvCqCL@M%Fzt6UbaG=jlHp_m>vE#>(RD=?da_;wtB5nQgBfkh_x?pi4_=6I~}>z;Mxd56nGiiV{u~2G%WFa(f!w=hNLP>WDUDM{blY7ZqZX zQBj0TtRyuC@KB7P?mF5@b(j_A4PLLlkb8z9^RpDJ2`t%`0ap zyC3@Y-L0-qO+*4LJC<$=OL*m0hgAqUgv&k&ilAHdkBg&+#$cXyA(xKUC{h^u3;61+maa=}#`2j8+}JQGM&M z9(gS4hbq*s`3Ww0?)HN?S@2tF4yUSk8+v|sNWWIELm+R%m#xzQVz4e3OjP~Mu_(^a_cts{t`ri>=eEu#0?jJy%QMbksmoXxP z9Sj~zOC!G4x_T_}1^KbL%+Xlm7+tr=w>373??Gdi_FCb)?W1+CugV0ChSGa`f@*45 zbaW`xG%rbO`KHr}G)^W=@lZhH@eHE;~M9nBCFt*bihJUZJiJnz8AOw*)6H6Lj%cX}wG~oI<1wxBNvCyJ2E_U{M?ip| ze2(<0gJ+_(ZhB~?fTgBHupsGHufQz(iud*>_9o+F-y|{)4d-7+kVzNmr_vxqj}sng z$Axnb{!NtNRKbKZqsExeBAN3&r~p6XYp2&J2Evv%X^hO2nK81DcyuHenr=8)-KSZK zU6&}Oj=*zyQglTJx4Y8Wu=j%9SndLo24(f|{E$l{#v=O>#n!g`(w46O*0m9qZIi-$ zMcG&*`{v8zmROK`_0j7{)S79&I>#qP?q(!OPOMc~>wI+%9d;;9%%M%i&gVeBq3=EU zS3-gp@v5(K;IHG`V_@@+2rMT+2;#?R!^O{P_^<-H{Hd0J>VqHHV{KZELP@KRrQ{2t zZs}Vr#)1y&iTUO1Bsbs{-lj0C+1YMRs?6uk6IlBs6>agtg%3j_X~TcOM~-@&lsWSq zhOQ3Y*7=}cWR?6h>DKLpPEDQjFit5rn1~!WCgoMR4o_N38$67lya@xM3rY17TA}i! zIR&^$F?tmRmN!lrm zg$Lv=L48wNE_NI_j+J$$YGfUZme5o-cfroX!(qZ$o^2$N!H{+{SO)_si=Vkvgae;B z%;~lhyQjLWpz(P~W|fJz@TIM;Zb_?pc4g)A-2Bm-*`8tbY|iab?1|(VKN)#5o1Q2|ao0}`>tl>3W zn5aUxl&7FeDBCmKrgfAwY)+1fTVC0ycd)sPj6bIy{Ae58bpx6pG%DIZXcivg#;nWo zJ`TiPbcxZe9?SH35s0^W`fV<_QkPvcYe(|Y6^iThF>PdYe^;c2aiDDWZjQ_x`%IZF z@BRlAa9>9>XuK}gkK#snoh@s46x}!OI}_vrb03m;7sSaa%g`_9w4Z#QXbbL~Z*6fY zH2aoU$K8D9n0V4B4B|GA+?@~VDzR*B9_MR1STFbwGVioJZyS00{JK1*-1@*4WHqCr z*SH8yXd7Cga|_q77lpqZ+i(oskG$lVI@(KW&^T!+?z)ODnda zM%}*Cn9$0hC21n-az*wSR=u!XnqRXKV&*jLXg?Guba`SO>o3RxDjc$Jg)eHnMx#I? z3r5~Q_^=IMud>-YP(;X8YBt*MAbM(AR|l%umS5POv#;F~Nb0aB*DHNh{sP2pqIiB0 z1wCQW5drf-$yaBmf(M#9#pP?akmYSOrE<3TdW+W6x>37g$T6OO>>ByJSYf z2q${}4&EDdPR4rhPDYxVi3-oLN&hi4W_#is3XSeZ$h>p{HKljT8&8y8C$HM*C}8_I zQ;_w@&)Tbw6?KDzyae0^3vH$*ot*y9G9*#Voni`2z>yYneed%LocW~LZc$OtuE)VE zdltn`;q?L}{dVorb*cs()IQu^BU0#WU;L~a`k6;pzIOz*- z%=6v-0|Co(^`+2A(YusM+dTghDU-_GT+Hm!>Od;ZA>tAN-*;LdL!K?mG&!@BsRcST z_^Bav?8eW=e4GuYx3I`Q7?ccq>|pZ@MK{;#DCGG&(8Gy3>I1l;9N(zy^#5(z&kyi1!?wt?QDK zF%Z(sb4M?dpY@>Wt5Jv0!np_{JdY<6C@g1;SK@DpbI2i z_v=pAdmPf`Umn7z>D3dT-;%Jxu1Mn&lb_JMwq5E%?^OrnzsbAgSHuP~mWTv-i>AJs zQtjN(5VKY9#!X$Jqw*qo1mq{-Zp)Ltj~M*81_+Xv?!Me2_u8|mI6yl}YN+PkgMpYj zz#D0UC0z`E18&8FfjHPv)O_aq3_z)AK#(i<5%JjBeY9H6BS0o4F;LCCM~R->0dM$< zw3DRVH_QD5h?Ao!$h%Lx8D#ajJ;Sc?KH)OyjkJMD}^9YH(0*qw3#s$LeGA^Ika}hpH z&EEL-yH{s`g8wHEOsVjNM)El$$sdmKhk97RH-07cyU~$J{|`7ux%lV%_O!gG4|I!p z1q3p6e;)&iRl@%vRcD*?*~VKafve8pg7kci8*$!WcWUUZNdNibU9Y#mdK}^UoBrkQ z5?-D!e?3YyA-o?HZ7NRYDiEIv{O_)1K6t>EG-4y{_f~i})_)ozrJw{ch4GRm4>yd>h41Ok)`^J{KayEF#s3SZ&s@hBh|iQMy2w60Dt#F9WeOk7vL`|GWgFqi(-p7zv}@no;aPx ze_KJs;lqM^`O7-h8ufQQ;6?1q0@vSG;8Dr^9#C)a4e=q~UyDk7pBT9MpQb`ydE(fU zeI)yIe=VGEXBhWgfbXUj`yzn#9P7TvKp)+Z@49|h*=rGpN}L>p#P46lKQoFT-1oo5 z8WrbT2tx3y`{Mua9Im*Sr$Nl9Nf5k2QMEcf5zJ41rY9?9+K5T~P~yM9^t zEy`_)z z-cMjr?CI%2%&~b&Xa@&at%OGZ#v9rIP5_EsJrP%3p5S13&uckf3a=_p?+w1QBo9*G zrh_U3(HrN(Vf#z&`x#HIc|e_4gABX3ucsW`%SbuKJnH+2?nFXW>f|1myq{TfG@Nhd zFO@Lwr=5|#0Gf0a=Hv z_{`5YM>+j_+qC(gHKC}9q~Xsj#YOrB3yE>H3O~MXNjuZG7LV!G4P&-x^5fzNd^rr~ z3bU?f?hVW`>@i$tDO{6b7%^A-+>vCO4pmsaQ(5QA4vd2fM&e5!=O;0xd+ao3((Wuy zf$O=$FNR5QVitw>u2PIzX?Kut zZnrnd^Hc}_YvSG~_@t=YXc>h=t{lzj>-9++Gsd3aZ3o|=*V$xn#*9NBEQS*wy+7V} ziDBRU`SP-joK8Jkxg%*n97wrrvgK^k)GS1VjO)tB4>7D7}LsARxVj zj)(|^rj*b@r72aq)X+-;2}p^QfP#qh8ajkt5_$+d1G znpw$O>sfcX?(4qpCmW4L@VzHLhG+-zTYC&>JP5zB+~-F8VqOu*`$U;cgSk%^v*cpG z{r+|8W)lzcWS>6&;P%Gw)t$|4$F@dQt-SyXTpVo3jbLg&1l0mAcOpYf&aCxOB-5Za zd#(&F%{VN8w_|s=?0I*r&Yj+A);86qHT+%Y>x|>&rc8FAu>PLe_`_jYzl>Wl8Xm*Q z&N^fM* zEG&gO_tnfM%nQvEoMRblpv6ZuczO%0KK?xp*wPV^z`#i9H8WbA%DmA(-PY_rs^m*yu?xFtKyZ9X*#v9V zvRmCDjfAD|o$fvwm2yfl>*$~OB+0|&A8nMuw!F0jYK-vy9n`p_DNd+qUhL7syN#f$ z9$j51z2;=zj^x~~+#e3e_^|el^9$6!Y($Ow)J8INRc;gvW=iI&!FJEjMnII9L(O7! z&3d?ZzW3LOZvL?NCZ1*vim|AgwkEv>Mqy>O0a|LX~-JMvj>BY2q_Z^l7eMWJ^{8NJhS#eI?&E&C`mo=An}2yarAX~T)TOh3 zV$+EoDS>hDcOU%86uC_M#Xw=;z1z_?5+4_rkeLbNH?97;giDOK===RbKA-Po;1O0hjp~2GwTiVk@s=UJ zF>=99YMJuXGT%_2ab6l#$oX|}`h?FIZag+t;^F)onZ-zHQ|x2Wzf!Cb>0u1%+W!^F zsY#KX`ZnZWY1EK(Y!EE;+v-U_M@rE)=RWZM>wE*!F++Cbuf-&NjF8gszc2rLwf1kn zYWb@L_}}kEyuHBUQS|QXhQLRXDMm(KI`C^i6U@?o+5GhP^XZy}SFT=7Oh|BlPHnK6 zdyy2A&hjblb^Vi=mIftlBh|460)*atKu$99EL|!u8FrH0x77y#7H+hx)vFO6z^?}h z1G3`c<1cD6Sl2Fdk(TUoy+zuv6YkJF(K5IQB6GCxVs(*U#xSeB?Zq!=`C9PCdlH6; zc}Bt#0Y*ike{GKb+6@wxc*gPnpL(tii7>}M9uGB3D}7iw@4b6Ygl2PcOI zsdw+&j?yD{cX>sT3p#k9DNe#?ZQ#f2W>N9{<=!$@_!TKUKpcH3O2<=njK5~N>$&M$ zHk{~l-8^!dI(@E&zPtY?9P(VH?8C1+CP9T~T#)o0=*<`qvQbx0oQ-mK5aCz?^E+1b zAIb}>We2>8d*{?`R1gEOXd${g^9s%kFm6tETv(n*x-k@@b_#2`Ez{ut z1?(t%=b&MnCSv>D$iIFnzr(k(x8apa8>;vUzus?{p5=P6?25se#ew%ppdR*OzIDQr z^;I#S{2K~PQYCPTiBc=HNH4$EWF8P4t=me$*lxgYz7g%K$KrN-!MV4L$ss|9v*7!a zE-^j%K=pW)$bsN%$}9NRLY^*e@5aomuw+~H6Aho*JzQ$CdsQYDtyW>-ud8Ug&hN;X zC3`sCpUr$?{O(Z3uk=_?u3t`0BSh{uHq;L|6R>LVMiRsnnttR7Q7-lJexAI7DwYmH zf?l``eqYnUydv0##}wtdNvS}`YKzBE1z?kdz@ad-_rg&?l8~$|wq1-%F=6!OW5Z~n z&%%irp4wsCZiQ%tt*96)Tn+e;ANB;MtUDq1+OqCW+d_> z^njy{h>7u~WmLkGN%T^ut_o8umfxeXM!5F%=d&{@;m(0UhnE^u@u2RZ`pEk=QQF$= z@X5#KZf^L&&HVQ51v^~PmdQrt?J6PDrC*8g%z561QxNowrpweN1pZzRzqyPR#bwv{ zIPNQ@ZjRwGgv~kNUIYCiJo{#DSbL=Dj}>$LN0(3w0Ihld@<@qUPFK?pu4Kw?)PIRm ziWTbTwa-!e4SG$;uHB{%A+ug3@?%lh<{yf!ucCUQq zbTl{I@*A92LJBsK(XrE{A;>rP7J&Z|g^yW3Sqt>>+m=$RHqW7J)H+l|=9x-iHt9xX*T|(+d@b9Qob6H~7 zD#f*rU+Q(v+4DtL8Rory1DXKiH<$4pa`h2^<7on~!h7Dtkc_`M{;MF><(z=gFi+w? zq%!nwOe>Be{e#}GOQzpDgR6D2XoPM@TsBCwZA_1peeghs^k2#6i(Q2{+)pw5zUY}# z%A@1EIXU+xdls4joDS+XsFNd4ON1DrDU|<3`qgNqZ4r$J=;8=M)X~jdV4> zHy{XFOQzD){z5Gz2r};dZwT}EZT%a-{ojJGH=1gLgw6(rlD2BE6%rDf#~qwR4!CKa zy2e}djcIqO#801!^78V~uk;t7YVRR_ZYDFzFUX4~Cd@=#rp%lXbxn9M%_+P#(D*vH z#lz39acaF3cb%6EsZKj6Vub5zTpjXQ%pYn)&BFHft#uSF9Dr~*{^p7;ReEy z`u>OWbz{qd?fD*>8+kDQ9#Yh_l$P2FIC(2VNH;h4VE%Ep%(^<0gQYku6Pvdww97$E zKC@r>dg4`=#$61+C>n4v;}DwW68fX4j(si3Z0BWp>DTB3<_C9m&xM&ot}OLvp(-g_ z`A)KLVg`9WkI*^(D#vbUOOk~L%mTW%2v)hStdd#A5sd?(6RAM`4SJO4qJ8JMAZE6( zv3xQtD4u*7KHQzC5=X}l^{IOPt~IF&4Qfsb50x1cwN8<?hj}DCJ523U?`0%3vUnBN1bmbk4Fo<2m(N_b*3-}{+aNj+ckvA+~uRM^_ zb%?g`DlR|ilQt4vI~-#~ve(G$2<}-L=6-zSxP{nX%pV>- zd8wvL*wQEge1pxKL$iUMPvzWrFYO@mL4k64Yva7<=d#V;RxN#LB;e^q+dA$~FPIy7du&fPnPaqQYUPmPp8jt%ASpsX)&#xO^7)=!x^l!ov{ z<$%J?{q~w~m*uifK)v3BVh%`0?#A+0C8VhPX1)o2h|0%*>x_+{Y1Wo7H*lngP`moB z!rdpqa%Sp96)>d;C?5!Y;NqzJSeL4b$A(C^u#9uw!+eNwMPZ}mfGJW2v>;+kz~&OB z^)7h1bZf4%VIj(Ax!#-^-ydO;sOOrcT$ylqV45oPJpE4neKokeoRYm<+Y$b}Y`u)> zwoCs25>QfoMG@RqXp5maH;>6LPD=U!jnb1Gs8N-y{ zai2amF*kDcsj8gKk_x#?pHkgy!i(#Fu$(*blr*XcC1E+y84{LbzE*2~Ei(gByd^-L zdH-p$C}sSex}6!(<`_v@>EBdBQ3;7SkC&(jYhuL^djNO@4$^^5zBQX0gg|tLeRpBr zhNX%ljl|77A)5HZs&*MTr@DZpU=@rV6Wknc0CZ6s-`HfOA&Kt=>4A|&Mxs9jztD28 ztbX60K2p|??CungwjWN$YX3!e^~o06+cv%n7asFD5Tr^I0qdH>#f{LEi65!YqRKdH z#x`CRzkFnO(DtfMe>`y2TTMjs4?U=M`z!UQc!?Bmny{w1tMidjhVBpF2&MJxZ(G;6 zb`O`)(PA)3* zCuS}KKAH7lv0JW48^aII=_LwY1v%sBEV&>TU1Uaus37N4XHNQrZ3l6k;R!QYQ9t|5 z<2041VIOYV#N>~|gKudA$HME~H!hMawPrAI;{aN<~CoEVy z$ES3NEm0ijHtA{*DR%%@eB1jxjR5;%xu zr+7=lE0fj9j#zdFzc)ZVIzEnz=;{Vb*$t|c=1uYi5hyc>QLxm5pHIvu^NYByp`EpZ zzcKzoZTXC^uKy^P*Z9L*smX52TfaIOl~ejI6;!zEbvv_Zj$gEpe%`?MRPRXo2UVt8zK1kF95e69`w74S5;M2 z)6yDF;x{Fw*LGF-uT$SB7@u9FOkm3DJmtRVavHZ0=;I_59aR zQuX6_L+97Q=`)p)IteAW5`I^^q^gqTY%mYY6a9ZRg-AZe1HEALI|2Oq{>^ssD+Yha zynoI>|GC-D`-tiEw}1T{iM#v!_A$f1&L?5t&*9qtqj5nZ|NedXf34R4=8LZo>_K63 z{^ZizogH_Z7cWTcDuJnRDgBqfH9jP5cFs$xUP#v3I8)RPNW5%tuvpm?_4AT;O-&7O zs=j8z*)s$y&vt4YkW|v_m5zx?>Z$e8=uUJ)f#73T2@5weUP;N6e++1rLIqB{Vfhz7 z5vN7}IK=OYrfsCl)T=}xF}^M9G^&w$XSiB(6vsCbae7>){U2Inq$&*dP3$zpoMQcF zdL*XxYB=!E;Twh5XCCRlrmp^%C!~=qKTTWzk0YNsA5OI?OKQTMOgj-Xt5We}M#v+) z+<}8=AKIBEi`_hx+}CQkY$(##bYSR55s`@RpFy-zAY6P83`O0>HteRNMcCdK7v`n5 z6pj@}&4oHRtLaEvNm_8ZW8lsWOw76KK9#1feQ7P#XQdY%zUMumW(5_i`Q_>bKni3f zEVgK=bqbYZTcV#`{bfCG+wXo=8w88ODH{8c$jkwCd68aO;up(3i`79QQba$H1;I7K z**av;EaLs)ihv`Ge7u0sdH6s>cyRz=zHV@|C9NLW|52sx<-LJc=;wJ?$snR+E_K_h zHV#Y0w%C?)fDeicNi`a{!c}dHwn?E|u9dIW$n#%F-P_Rq9SpNvDH8|-pIxI76OouB zq70P(U~Ol&QpG(rMH=P|Wqf^Za4&=Iu(}ytwVLAnt>I;fY3>ZzpKfBR(FRQ8y=&jK ze=)V;McQ*rHHw<}MfKWkK>UBE(ncOib6*-}@!&LY>3gE#d{A}>Dh511R+VKR%sNx0 zzS?2LSUjR&2e{!BGUa=~rh#75-;+D%?tl<}@mOl%f&_z^L;F~sB| zL|P4|nP`{tx9_ArEne!N56dQc=RR;jtxZlR)S|CbYLl54qiYIDbjsLgVGCPP)r96( zD}yho-JNsatu0o$zc?oNYzGBlPL|Kr-gc}F-MyZRZ0{RBw?0(n!n~YR=&W0%rBXac zP^U-d6f@6LBn(fxi$0AOc<_wVM2&FFufhI6^Qy)5{9z7ojtF?3;PZ~%B9s3>d}U>- z$HAf>+->?QfJg4%A(%vE(fWH0rzb$VsM*P+gb-tj`l?u4$I!2+OhTl*?##R zclYb(_0ajzIbab;VH+T?1lV!lR=Hu4JOwI+@sF(cBBEw7cX-f9b9F`-pwmFhZlaY0 zi+0I1CfM_7hQS5<@1>*)-6|oaX^f zsRS@ft1<|Qs!!b8e{P-MTU=5j zZSPSB>bWLTHRbA$8`Y)u-%^x}NiW?R7^iT_M|C{u^Hi^HV?e2TTCzaLU@}gxCFoY`U4 zNlu(%HkSmuh`)AWOhQKTRv@u z3A|*q46qrp%WVj90USw<Wt}0KW&(W}r1Zrnr zH*_t_4}s>2=CE*6xbQ0@8Xx+Tqw{>EZ5=F>KTRm}@GJ~F=fG-ee1QA9HV3Yi-h*DH zp4-|7I`SvKoUiEkY{_GSs@tlZ?VAEw%y5)oWG@~ZDy4%o5cI#FqrN%gC=8J}`yd1< zT}wz-IVtW&sw}5QZ;+$wrbAUcK@XvN({1 zy^2;<~j)u5{h; zmvBKJcqYt>^=LqZi%W^5D=Hp5Cnz3-J4%ROOKi$zU~^~$G_10c<>V~T2!bMi(|z}N zs`tK&$;x@NHTD*uxp7I>k5^sxw|UiL3`OoH{K!f7$hvW%XO&{ zbbI4hFYuUzzW!TaZms^`V2*?x|BWSow{rf!N7Da419SRsHa=|I;~bjtI^pVT8-dzu zEMcx-t31pAHyXw4qKNm|eJSe3$dN=?Qy2sISPx$DW>sn(bn+_~z)n6a{7%2dSF(q)!bF`xCPA2h9TVb z=2Df80$z8d6(j3h>Cs;rCJx3$R;tNrpG}wpUnZ0jC7c4wX}LEGvYSn0e5x_j%eezo z$1>x|D+FWWz{w|JkFKc6!CP|f?<(Q8z_B=uaC=a(kGn%|Dw&@4(8<+h#;{-2sQ!bu zIbh6!`*f&`+L2?*BJ4=ny1<2hHQ##e7U2k5b?7+CY~xMkNnDI71B=_Wi#sG-wFE?6 z1wtEywQ|vcdg7OiOlv6yy8)qb36)G453#n^FQAJaCzqv(vA1Rq1 z?})cRN5{luL0v`#w1`6k!4-NGlp(uInA={VuzbuGN~S7Z09jg7+zGnLG$VkL;j)Nb zEVo%hjIVQO3-Xb;HWosWvY=5(bLP9NC&%&xk7d5GsV^Db+ktsY&fYrm%>Cc;dsedP z2K0$d5%Y)hib*PewE#OxlUrVPnjK?A3U~1DK8pUWx0Gd9xFmBgt-P=F9V=AxFREb< zR!v*-Hq1oR)&uAMYuj)B;n^~R*4#mT)D4CBl66Jyxt3*8~X2DR=RQMWtBPW;w`Ddbv6AvmDm$ zhN$9B2yxv$ppOE@jauqqF(}`NJ&1iFi^uS}47HN`Ok~`3voL8?#q{k(NgpkZzMJMn z$N!T+t=&RYX&|Yk$a_{wV;{qc+&VM}@fWL0nK&@S*X0J3wGv?IS?cQ+u*&o^;bZ!r z8ly6W@spUbbwR1NW|STZ3zHsP5ES+BTDYWf)Rg_>qUp{_!Wz(3Lw-_pQ@4ddB6C%~ zGoU0*R3idJyr(jq(6s1z?|N>1lH!gd!cb_?)s(h}@~+W)qPf@)N{>qN>S z4;oEUhE3#?tK|{M?g(DTxIdl80E4Uv_~R7L%>}8!qAn#M<3S>$qZoa!7*c*bHyS|X zl2A|#@Ryx-mX(~$o@!>-Zrrb79-EV0EA=f%m111UmkxF4qv*kQw>juH4y153y&qM$ z@f;ptR;6KY8Zg*xK((`b8@zoa3Ydsq!E7%}OTE%@p=R?dHbmmzDF@KOLirVE@{Kzd zAskE8nZ6_9)b1U6;>BZyxr1A8L!@e-xVn?yqE-wGn8-a2l9Y+%6%4@@1Yb85%U+m^MIB*tq+e5XV3L# zZis+J9jSfo!!efb)hGS8b!eLN-30AQBwEiPB|U3*L&bZI^Ha46vvT;0CtB0-;9y(b5~mg%c| zv0cEi$Mbu(!rJn)(Gs`CmNSyu`RS|oQ4v%G>2L3!8isLx`(nO$(nLP`J0E=wRR}Tq zAk;b|KSwz^b7bZ(HRP15;-?ftd8}K8TnnWN67#MAOqYtNm$&(3FV3e5 zm~^kS+u+sYx0^My)GK8POC9{Erb~ReOOZMec5Qwaj&q&Q;g9zP@rW=tkAaV#sQQ(b z+L>BC56Cdw;3ZW+%r+~Hx2(loaTSub6_OmAvytQ&&j*QLZxA!A;-^jLpmZxD%3Gq) z-B9-9Z|rm&Y-V&vGlb@L_bb-46;V(T0qxj zf4Q<6KgqZXlS(sZQdbPLLP&h~wxp{jn|mSZ-Q_EnuD3KP79X*Z`U1hlIBXRH!#Y2G|?@*#C@xkdsq^HIjkofzOW5mUl}!<$*k`kJV2}<;pxD@ zo;jb*hVF3b^jn;(Wi*HR{DKp?!+T)3Epbd&U75Boou8~V;lT=x$~p6XROuH~iW(6! zH_hAkA9;HfSNeOwt5eB$b2UGk_z>Dl`jq`C(6C*PYHVZ0=Y{HHZQjW3zw>F$a!#yTQ)0%s_NOFxb_Pjed`%q z{3PoA!uMih>6ivTzZ@}}MP6T&iqAbkwd8dF)p)H;`IP~ExCb|v-;VvJm8*>(^sbe2 zhA});vr!BH);us=yoLI<0ABgHK5xrX*q`;Q%l(#8#A^Z5BbDnT0EM{Az= z5r<*uUULj|@+g}DOj4UbKwIQoDLs!9cf+vwSF5TtDM2+OOz8 zJXfG%4<8$*);|&lOBOVy)tb?{^U4&=)YMK0TlP%(Gy*#t;I9zvBj`iR9w-1&D3Z1G z`Pt-v^kkt`1{Tws7w7Lz5uSSJkC+JO7qitVUGL3sB0^MLDDiOn4O@4ooV*h_1~Se* z(1lc13)P#<9v7E$N8_EmGw;wVWY~h}^LL2q z`K`9AGYLoHfwTkVirqcCcW;SRR?g!O=Ut7HQaTln8>XFVb7q{V_>{& zdNbmyWhgYdcd8GEpIXR8{Tp{qUo^x{?ooDF*Q&^#PY1FJv0qY&9iUZGL@Dy_@E zG_4GeNKKHN1s^p3G(i-+uiki^h+xW=|5IFR-z2GI8J?#u-~LluCy0}3YwxsJ-Vu&d z1x(>Q;v?W8dFZL^RiEd-CxJ=C^3g!HpBhe=llR16g&SV!}i}Y+&^#h&A&y+|1AmjEyuxZN)kQv z_3b%|gv7*rg&Ui7;E1cg6SJ?Esj(+5l=_dsgMtW)Qz!d|t@54} zgQE=&l-@(yD=amCI8QZMlR?s-l24xEH%H1(!)uU}3*3V;@sm?+t1M202WPoO zACvUG`9yC`K^o$t%+H5lFvR{GxPM80!5Fx|Guc5L90#w)ZX@jb(aH|c1Mj_;z>OGb z#We=Bi{#;cxLhe=uOhV~|ApmQd&?E#dfH{F#@o$LP)*#Ky`+i#L}nhs)5Q@$jIX&O z1+uhM1q3{toTG{nB)tE;_Y!{a7(U@wH)a`7u^uqZ?_YHjy4r3q&xT`U=hI#Y{M;q^ zdCJ*ly`5Okwq|9I^V~to&-Prc2?3Z8OV%=e^4Ir|$&aojf@pRVUEans=bSCZiMoH= zOU19Sn8cYJevSGo!j87)ec`u{yyGl)n{y&RweUMd^7ATdh|iSVWBKd6HN;Q3fk!lo z>}#p@y5pbKcez%Vs1NPt9@%(vDf;fHf{%7~;*hn6nVCTC&`;J_5$e?X#}}`O0oLAw zMZNWw_t9g}^cEKkBn;h$Ay39G+(ZN()ZLr27g8l`BBThp-bE4rS)Uy?u}B#sR0W#99%Wi<9wqIbAk6#7!#rE@7Vry0 z`Gx~>yYm(kjbbkQr@Whj=C|A$4}<^#Vo%db9t;@r6#gj2cnq^)wv$u;YtfT_yA~>L z_zrMvTYrH&13tb^gmydJW7-P6)vUNAa~R0kpI>$?74F8`KRdqOyScOIvyM#mP(4=> zs%2UjH5@$3SJ`7%1S4$pdhqw}v9feYlR(08Ia1GGZj$DanZdH+Dcr>dToX8 z&I5mm#@Br~*-%~46%H`=XG`GlNTBCM<_z?Keu$;i6R`h5ybl5h11ER&*0}`aM0r;Y zE-so57LGG*m!@aiTS^Q{SVc^MKbvd7RtoFLEg%wTcK-)2a|#4U3qI94c{IyA_aC%* zA7aq)Haj(@Bnqyr(XD(cW8QE>CDm>ru)x6*=cAVKKt+Dgk@U23TJki3#yDAxM|1V{ z7QL@mHp_inwk$-~+)3Tjta!)<(eNsIBLr2j>hkviNa~avK;2h z8iaT0KdP9(;HG73JvWOg{%`FYfDPQ4C8L*8VTb^s%9Hzo!6?B}!eq zbJ?X_l9iD=Ifjdec>J3>{JeXZ53R2PVRi{<7qeHX#RbR;j~Ux|I>3jRgtdA;E^f9a z-n*c!ySRFU3fy^AwJvV7=NZvUMXa?%`)J)=T`KN!xGOO6v6&>HHtKMm^fA&=DcQMNn|_7 zJK0*-wDFe(%q?mT?Ss3A*gVCCR#RM-gGAZ$V*(Vk0$yoPa`-DG2Wt%XvRfy_CKu3w z1GA$j6nn^nZ5?~_+DsR^#&cJ|CtC1^!^H0T1!a_pq$%yhQDnU=#q!v97kvB2yClb( zz8>m7pn#C^h==$~sj+bw@$^hpG1oa(hayFk3M(CoO_XZN>|XuuBb`o2`?}^CkLMwV zf=?#W+*qTIHN(;?FDEv-XEG{>LhY#)Pkc%jpu0%9qr)=R|9RAq=85viL#IpX58+w8--EQoBva6P zAzHIKL0$fd&?LD7bkkBA&`jvDHJB+!7`S-XO~tv45GBveKA;773?bhQ(08dB$4sUm zin2L#cH|Pat=_-CC}z6TxZ1q!lV?t4+s-*yV&aXXBQYM5^k!Y#5sC!M%_spw%q*8M!DSWU^Znl+E8{4mFz)|kQK z2=d)%ZLcOL%y52G?c{)YLy^W<$K)}+B+Jm@qylCL#M@B2z}T_@5@Qxn|2miK4CX|! zF!jc(=X2Tj>#66}a-OU8ht2a*ph0fB0TW~#Jb|`Wy))=@F2e@2{?{kiao@z-<^K+A zYTA4;hpdUe_zCr2zn_swZ@GL;k1$eL9i^~re8c7Sy)dm&Nh@FitJF}vC{P; zq&OX|RL08x#JVj{r&Lj$({?mNK9g;ZagecP!C+AXS?J$*n2Gu z8W=-2mda5e6Q=l9dp%Zguk0+D-m`UiE&+FrZoaT40Y_FKnu9W}PsQ~e;yI^~?#^VK zOQql`1ydaXRJu-y8`e&9D_w!Vy8dg*hWp;MoUCybxCT&eE~wY_El_VYaS8k?ZHQdc z0n@L(H$2!#&SiqmIi{febOtgWrt92LNj$k7asxAD$n7d|<@;i3B| z!FnTn+6g{IWGxlErb$gA_)dO=PuzcqRu<9wtspG^>?b)U1FRpCW}~DUG{i=1i_Dep zCA#uhQZQ=u4X8juoR=8VcvLb0%0|(kjeCdGQYtqzyT-hC7Ur&2-4zCmLgo6cy>)8m zS1~Qo_o`DG%{TlP^pAYmhlBP!3@2vjnXJM*%5JdILyf=KhravqQn}bIMR{`IaXFtd z1`Q?@RmB^249sr$tQQ9ys;_=~MkqUOYNiyIQGRzKYZd%*rxeDO8@3#{BA?h7)})xX zDZH~cb@9-CYK~bxu#Sdf47xGp^|YDvj{!Xq@%l@V!VDFzd5g}SNr62k8mztw@|F{i zA1JqNuH5~ELY;Hbk9n=)elVcra58s|Pu{iS^Kl`j_KnJhfBi#tPCZLx!;^!p?&ldi zzQTMDjQ@*%e@!$|9osH9{<@*!NWE)+)09m>VnRFGusbtP)onQGfF)OXk-W>#Hgd{z zGZ!R~k=ruv{hla+*V->L;PBUOLZ@vf7CcRdaIGj2pYVELX{_cydEye4QJ`U(1T-{m z?9<)8p~mp^>m_v$OXi~Q+$RD`!F)M})RO5l;dM~JtH5J)ZF(i0WJY$NNNrQ#fo6-U z^iAqS-d=)Yv69BhR&~D?Mb}c6BHR(aqn9L7pF4P86C+tM*i0`U8mlt$0}f4!1zUi~tjR zO!SIi%&}euqQ2BYYPUbJC!iGP;-$@>o8#VXUOG=W_V%bJ*(RVo8^;2h<{jzl{z57L zIa}81;9|I^=#Vws96qQTLwYWY+N~&AA3JwJ6zEF~(@`JjWLe(d_y~_{xRg>ebV-}< z4l-A?F2_9}%#9MTS>mro1QqXwf{UZCi4VpSq!VM!e}pyp+*pVgcxAWoaIJ%y?2b$O zEhO?;&~}Udxi1J`LIhFV+}#6`$CnTf#vzrvkarK-m_uXN)3(tJVJ&w zFP$torr)!z_~H6x(zVJFR#F_HY3KLSOp5F7qbQ8Df%wgxBSR}1P9w2Bu0e_7#vqSJ z{-u-@M3=D2JI%dr$!g;M_cDm+H+M>?qK>me`a`VG`Byk9b0aK|s9prdKol614MThDW2)^c09;knbtjS~aYbpwl>ipH;) ztT<}0#>pSooSW#z$hX_x4QX(F+pt&X{$iZA-F;|aw3KD#>zGR=XHR~pt!1|6(h&6! z`nZMLBr~pfb0ie#i(&xIo~TA#TQ%s~N19aeKa_QGF3jx<+}qYpo8mw?xMZa=XHMQ6 ziLMEB(AQY-BWA#AYl!gfZx93MV3|%ulMiUATjp8cr@nTKh{B{<1b%2@4dM*~;J*}U z7M1#=9oA>7#xmvrPoKM0A%#@a3IZS;Zq4(N4iFGOt;1HqB6IJYJ6XRWa461PZQUJd z;$Yr32dj~g5D#HXc;`lF#MjMJ!C{M~F#R-Ph*A-N) zcSIrspVRnGe3NzB@;fZ7*IviM>BKcN$z@Wo*<9VW3R{9>3AS%JZh=gdSEQ;ud0Y9e zrvAQVguJ@W7dNoj+E2aN;-!^Ko{)yodefP54G;Ta8sIaFJA3}{QE(}#MXy(wKKanx zWEikjbD=H^4wK>Ext}|_Ys*4iLqoiNd?^)N1(nX^@(k$nL)0%H&MFGHjY|dE7#_1t z{aiUzNGTm+k=5Tmn4dh1k1iR>!ia+mPhf=b+S%v%7NTXHb2;(>`I|@zl?3+BX(FyG zD`%7<{t2O=cVzuZvwX6&XY5JQK@^vO3=#}%Y=^QV<1??3`@^PtkjVncCeiUjg%o)_ zVR9>&-l~nTlUU!q11q13h@I)PcROz)CrDd|2=ZOaNJ!){HK+AVB9Xh+G2#j-t*H+e zNd-XT7%hM_Y`|9=LmC@w4V6|rgINZBQygvd38;8CDfi_$J)6r#ITfvG0nkbAQNWDW zz}}}aqk4|USB}Ll3><;Q3?~yAq1KIyS}gDlS2LhLuJ0c%w*8l>jPN~(O(E9(U}a|P zvsXE&g&V5lr{FqhI55yTeY1u$pU_bN>~1%QjPG7i@|VVcnhVb~q4GCq&GD2Urahn@ zoPmHKbuLwMDYcKJj*)kIEV30ZmbxEs6}BPP_%u>03=T4%<~y0fgH+et7s#rrtS9|S zyAyTgiAGpYLqIDFOjnP1$6pSc9h$1~R}0`vMzc1&E1R7bAG*DRgXyzvt2Uf7t+Z}7 zL^=AVn0(Qz!bHFm0)Z4Bj_gp5#*{+pgZ^lxt5q*d^90uWRSQ)L?7j`Qw9|Cxs9{X} zk=?lkwp?Ki5v9*+{V1yKTSL2C5%p%tB5)W-V@7_@z_HMti$%kuFkt3MotC3fu=^_< z_qTr%E%)M@{;{z`$fa*p31QL;9bgX7{YkXL76u6H%h4&*LdCQ2-Id9@k1#!%w8S)m zVM(tKm4F7mJr7#_x15weyq+%wXG`+w862ha4VEh<3<(kxB4}*v`|5C&k_gzCyB*X= z5-S=w`^bjZk^+=x*Ssi`F`OUIypOQ|_O5i>IStOmvbWqx8c;EBZa#EgFB}t$p;^7q zmF(TrRO3F+W3wbi_&EGHL(n-dk|Sv)f@2amP!k2TaT+=n)XnE)5%K=vdXQ!g>0V2- z+i0&z#RV09HxrRO^uMpWDV$hq9l!e{OwK{2ntx!vr8v$T_U=I7fgL5+c|QxqRsn8$ zgEm~5lzHCPkxL3<g~ z8KrEq-!mq)!`@@>_F*j9GW8vvmGCardBtca6d9!cz&RX|g0k4^=C z;eOg+9zlOW1Qi7cb>ID1hQ6JNX3sn3Wi%WF|NDsj(c~Qeq9ZM{u^2)ZM#(bO>9ooOR{CZlq*(fzoeCUZab=5DWHR+!{&;g? zT{hs6bxJm`HjfOyP>;=_ zR{9{8cq;^Y@20NF20x+B!^O96aVogqKqvc&osRykxQ~t@_j4SZSKYePaYyo`?>fI^ zrI_pNJlgZR-?&ic7tph@l@@~rIosK0d~;>f5p;qlK3)JHJU#Xof7y&(4yeX|RpmT1 zL@SF)f3lbOQl5q^!1a|5`gTGCPKqsJQcK31yAzVQ+Fg8NVzBoA^3v zYo@Trf{A+;rPS?ZY*=uPXjCh6J#);=bM4^vbWV$Bn#5oORTp;jL&+ZXc>h4##37&k zW8L8q_2=PHUIQ;$3(%6e!R7Z$u*1=Y6NC7BmK+{>p1AIQ4c6K`izzQ~CQSx zco`gMH8S0~I3v?5AN}-Nq!&gIE#M=gVpdknQ^y{6M7I)rt|kU^t~BRTsJqCQJu1Ns zGtwy1_`%%k8&`88Cdl-Yqzyo}Iqk0SK>!!y<})vcR~kg@mxdnikvITu#+CSXb8|QG zo=N@3L{l1FuA4;H^h6n>WSi2qw=9v&gAse%t^FH*;g`?PQ?$w(_QO^*>a z1HZQD59521ZQb6Kk{x?x5p3{|Ap43rHT9d`T9gK9BH0Gb5X8-VA7H9Jy*NzL+so#IE&T7SrTl?wC93gjCH6Q|xE zSAGu^b^|3nY&wwrf+^_u&^_|CWZ@`Am$q|+{4w*9(Q&-I@%X`lbSL2jb@fl9pJrWg z-=Cs2)}e;$Us;T!o?N{Y*qvniEUs&ts4QZo6)PoIVkY~KB75&0LhJ@UWpud@_Z@N@scNF9VpK_u5&vv zaHgH$6P*46@r;FRi4*fZNIaf-t?u;#YO*N4ItopDfF>GF7#V%Yi2a<-^?@C2yS5}6 z7{_KA#Nz-~yT0*`kQm{OU{S#hyk8!B3(P&7aYRKzGJ2ZbkFJhht)eZqCv9mhkzgVQ zKDf%@sRUda8oKX$u`H~*QZI>bODP0nr@a3r04ruwY-5YuSNh+TUW z+wfL*e^0q(@A+|Bl_6>ArR^juk;OwQ=KE`5F)2Ho*I$Y_!PkN-oUbQW?MEnq8@iOy z<4X>1R%oj7jm!+sA2&QbshcF=4h~|1pL+5vj~(VrELCv>=8lKEd@rAlVJz*X+BA+S zPv)vLQS2DyQRTL* zEg!8|3N3%uR`+xl`La7xew~c85X&V#Rv;D--&XNJVn-^nIcM)qdH>imxZ;unOVhjA z)+SHMp2c~D7JA2j=>C=E38KvE@g=guZ8Cr+Q7z>_vdrELb9$>;&xo^3v39(3I=Eql>X|+q_i;7BAA4|nbSHkd49uhRH1Th$ zK-_oQOim{*vWZAG%(_`~DNR(GQ(9W=2or^I^P)?!)CW8317O5SymJHL*hUw^UE|pp`z1x7o^zD;6&r zWxwdjo*DNw7x>&p+7@K}X%bbk&I|MP>RiIkm!Ci1Ky>m0K@(}VxbA|A>wf;iB|YJD zJPYdB>gD}J!`;U&d}}N9&yFbPNuT4@n3a|Kt3-E;b+38^-C>tFljnzr$)UX^%*UY1 zjB9Twl0S8>TJ;{y%Wuq5Ee*DP zube(;kTqJhUh<*!es8IP-Mzw%!nQE@yyM#sRm>}xT+zxZd9`}xj<%$Y{|7)Ze)TcE zMwV+kMXG{vOX>HlXJ>6nD+2B&y*l|hW`>!6FXeMKar_NYH@4!*t=KyJAhFWXmF=uX zR!PyhVp~sT`U>=}^=HJDs`?&P0f5&7tSf?Tt#HzN@H{W#!$gN)YrMijbw2pY1+upd zwqN^AFEyaamXI!2aOuN)ZVS#vLx|NYMg8|&V^s1J`>KT)?Rx!h5L^&bRObnBuLyPJ zd$v8hZU;ME)agSH-qCV9Ta8rK-Ct_#_@2V28sVY_mUdohcZ1Ann6-~@+|d>Hqv8lC zkM}Y65n4Wh+E15F=~cvewj#SqWNDmNr4!!D_S~>RQFNLyDn$f))YsgO7S_9F&3xzT zddd$)L9<6je(PPm&H*WSsjIffkx*lmb!6ZTe(1w?jCcJ5tD7vehWUE`M|)o#7UkEq zD@Z6EQlg@gf^;`ZNVgy$ASGSWIfSHy^w1#!(mix{cf-&_cQY{GAmZ;k-}jz#oxi^8 zI_G-%kGY)vZ6*UijiF2|@xGykaK74xJTpY40-&O5^_ma^E0#-%>f zFhJNiro#X5Yrc@*#iy&rDuiY>*HbYc#nP%GzDO~0>S(u1*mHLF;L>!FIu}6LuKD}| zAkW~SSdRFx33X(1*UeeIBL|y?itl*OAFk9u;MMLjGovo;KR#`qb8MvQ3`}++o2Qa zus=K2!IYQUmR;kV@u?LgXS(vFFZE;pM=`A{MBZY+YU7}i+ITV1DW>2z3H)T;t#MRg zdF2dkc6^-a6RvAgL|*=>zW+o9X2cF;G^2G*_op}#>w@8G3Pfj zc(05(9&`F1idy2xmaWaSTWfy($(gFsS8uNIW6H5vB`i{zsF_safN}G~w{LqE<18JM z7^pT@RhDU*edD{a+`BnNbz^PLZsF`|+V>L8Knl~*E#;Of-_Fe2ap)PMb3; z7IRi!$1*2bqc*ScK}C}`mg*m^7v$Ab!izE^QQ^41r)B}4vlTWWL>5ocacMf1do#wI zYZE9~xfLF&xWv(dJQyH8^Jn9j-&)rFkKDGUJgIjNU$XmWs%5^ebFrl*jTn+pJH`#w z7qgOx|FB7@&ay;waM3<(^>cgq8`0|Gk?q13p`Dv{)f1R_pCQL&xm z94)|4W@u!&na4d)JWbPPTYwo4je&$`@^=^%OfmOxJxg-DkBgbZR0gKCa4}+9$~zq# z@Oem<_>G$p@<`dDIe6-e+8Bh7*@|sHIp`pdU7DAu_KdAI|AI)>0niTGxlrgI$~V71 zS8<_d0kFEKmq*=^U2eG1JEm%waQ2FpCmPM^r%j zOnS7ZNHruM&-uU97D!lWGdQcS;09`fE?Bok>uM&WB<)ok1GJgkO5G8i@*`l-B2G*Q%L6EP<79= z$<-Hz9U@yTgx;^Y9&HTzeqx^R&7u883dIK3-rJ(=!7s$x7j?QXAN^30zAZlP8AzT& z+D+H@!9?z^+Bl6c0;Ph1e_sCG2U%Q*l4-R6KU@+>R5)qoSevxSX_EK59Q)UF54)N2 z5O9=J_Kb^m>^p(4C;U`YzPUYYl!`CdsL({=<SZQ%Lg5kEOUKT_%iB;w}SPEKlR|hdmwq?$m0j|`)xpw-6;QT%X9(YgeDEL82CzshwVxk;2)EWhF3df# z6=;=+Bmi%{+gbcpYgc(3MZJ!ru+S#MOYvv28(tA|v7n&e+& zM%Oau`7;iP*>C8ue8fu2iAw0ye#HYl^sGTk;p>cF<`3_C1Q_GE=G9cap;@Y$2t3Q&=S%IX|D|j|PeD z+cVANUVIYh+*;bE7C4}kFJr6-!}Vq$pJS6yhar%`Mk7u0x#DStTO_LJPJ^=j#6;(5^{1mzG1qgb5XJ~%0r#l-U}~`;8aGPpS=LxML3sxEfU`TK zoyNS_^3}7IBHL03@eb}k@In?S&3^u9IC3KJNt}}-RlB|D&HMN3{ z;cU~=ef6aiuZ1u8xMT<#&?2Y!qy?#z+@C5$rM|m$nGMaZ6aAH~F+v10`0wI5>MoOJ zd;xjXJkc9tx%f?PARRTs;;nQTl#&ckLNMN5@+BLRtXyL8x<8R`-6MZ_<;z;;-Aqu6 zKNmcFQ2fSa6v?E8QXZs^XlFA1f@4N914{DUyj#qExwP7#l?R*vJF$UDRu2PgoE!7a zcf2SYqTgHP0OmTN0i5-7Uel!An4xuDoA}9)#q7yQFmL`lVqU*31!(N zWwYHAn<|^~>2EP*d{@g9b1*P#&7Nj*f}U~ zuhe{+1D^3!bTs%bA8G;1BL> z6NOnH(n3{eCbk#CT9%W3(f-1ul&9=8s=a$f?JsafR%({k)mmms<6o$oC)ck`(Hy$-(WDjHe>@V!k~*C=Qwu)G5mqyGLl zA^(SKU$p!&{hPx6_>&aZ2k_^REv3`k4dwwe-2J@Zc#ih&u4&5wJmkoB4T?y+b$1WN zpSQqWj?aV#&6B&PC|!OY;aLaFvVn?sU&6tMJ3-z1X;ya+ry+v}9$oERuO#oj^j7)? z4*YvC|7vEQziwGn;EGay&d;C7?{e}zAt7jdGcO%&%KT>bQW-y$O!@}|q{=4ph=_=M zb#I_%eoLu9jjJkyO7)3SKAzJu!~N=l&0>bQdiL1|(Wvm7*X_wdwCBk8gwJunVa8%G zP4uU1x-EA>kD&WTVU~v-N|$7c$w*czQZ`LU^vz=U3iLQ8)7*44{}U}mio9PP#13-< zF)X~#QK1(}Qy~6ruQ>j9VV5G_``CAaA}6y|sFE>zZ}piT`QAMC9&+xIy+OU-dkJL^ zGD3p8jzU6+$jFf*1S11Acb4IUgheLs(MS#b9KKWz=>P7dLb#sROKk*dKUzN`)Gj_( zO`Cz)mMruZy`G4!Cog9s8m+?AooB~9R82~+-0}{1A|8;c@?#=m#5?YHQzXw~LLjH* zvv>}3-LYJ?qn+96+-mIeYg;PXWj~t2dosmmdM7OKT9QG->w8z$B5|L1?wihYNj;6@ zh(zEg{BD=9omKUFh+lmh=4xGH1>C9_fh4S-3vD2M0}oz*O&fsY^v6N?xRSG4r(aG7 z;(X#*Bn+;4)}<3F!%NRHjw+Yz^Z7D)UgZdUS}mqgr^sFEHOOR5=@ST7ANdT~I)|!X z>7s-h_3!Gn?>njB=abp;j%%r)7CRhl=P1Qix>8Tt_3Q1eAtb;l$m`nYfkBTCopyYN z&)U6-@U&3_!z@DocwUI&I4O?x%132oPXWDwZhWv=)$+{TbE9s$D&Z5AjpGDlu? zZ@;83dpr(1{Jd2F?tW^6#cJCkzF`sHSpSUp&Bj;Idf^S9!5uU^>+zzwU-zU%H1{eO z7k0WPL%}Z)>$3B)!5?)uwH?yGcOX2N0BcS(BjxdG=g@A6D46x;hTuNiG1wd1vN0lH zj!#y4Y-O>DL@7_O#`;qezU9ysc;IFH2(1mz`av2Mk0(1!@tQmM>Xn2?eB>6b{>0Gv zws$?3+dSRO*d?3vR0L0fU_syP8;B!*Jav-cYVcB82RrnL#}=@NTV?mFjXQpBanllY z5Y~ldbrD=s;mln<^rZA^oJ&jc2q($?yPe;n)jM-t%O-h?nj)SOEz@@(y7*{PM&#S> zQy|>2N+QHgfevG899bPy*{4XhydP<_Px9uFTdl0EAPwb@m1fsacheblszS$_Xy-U zU+T-ou(N;j3d&DBH}k7QTy`q`wzkZ`vcXSz%NMt19!}xUesK4CF8njL&{j1yL_J{b zP75H9v1mN@>f7$K#ICkoea(qtmCn~tSx*SpuPT^_9qH~UhS7ClqZkOIG$NJ4#mf7a+~!q@;mvrIDNH%=3VL;Kw^`^>VXcZ-Bwz_;tzIs zD?jRg^Z0>BG_O3-dX2g{Xf^xjG%Sy){zTY9$hdHi`vyg^`me{GM1y<6e{o4M->z@( z=jNqOCx`b=bGWSx@LQzdJ^avazImn@o7q)+8s)SsYy{TR)#Y;B`}9CGUH-Svn>Run zv!3R0S6!8mP&RmWg)@1|W4BRgI?iyiJ)Oy__=1HBP51_qN+6Dr(EPK{9tC=L@m;(q z-o?emEj1eSf^Yw4*9y+H=ZsevVK@XgTPniCP_HtR}|JNr>Ic$mT1Tu(P^}6rzw6uf+*9{v+wJ}*m=jO2g}eNpmqt0iZ+)|y(5S$-jiT4;ul`Jz z2a+Tae7^pDoB!##2@wq7sfeyD5m;Rct*01#Z$^etFjH#9!76wi{uE*SL0!t|crG{i zFq6cUv~{eMPavT+CVnVzHurC+u8iTiB%8+36WajwLH|(riE7uimhlro1e<`fK9d8+ zm0A2_&G-X+h{B zYTa-T>pyfaouTE>BgF$F#EvNnw;GCX-n257s!7% zlR(M0TOrFL6|q8s8~DcYBSznntT{R;m+~(1l&$g5iCe`bDlIpk7K@;g37OJZm4g`^ zwIm;Q$71EXFA9Ft?(Wi8IhgS8+zQz4`KEV?FY&m9tnyp`DKD2{2TpnYK8l2Im?~)0 zKhyYZ$J;e}@p+vBn*^_->vRqCb|vgrYN>6LHn^tqa9HJgoKpW}1syiujGr&^R5@Nk zr1!OwT3uCWQa9`w&{0HNW;H~oF+>c5SkM;>w@~Neu;!q5tj6&rse{3Cvzrmx*2RIH zR@}alRy`0aP8;T6(F1HLK|k!TBk7ZJ)D_`?op!OL@y>KlOsh9FMnNF~0l8A3%PEc? z@u?%y-VTOftAgZ~ELBw<-&VYBfynXe*xF~1ogKRa-NC;QI#V_!y=JYV&&ieF@~35} z$#G4Ld*1e^3bljDjGEdWmWJpkkbf9!@`_mpyJ~_=WK?}D)-4iWa6-nuviM(B zZ#+9&Mb<_%pyZvh_!tP6P8A4LS~h*_N%B9vNw2dHd7tCRIw1}-bABbrqWA#r(T&>a_W&5v1vk4_5I zRUfAYKxW#f52g68OZdhD)9mm8QyM1P8DX8EWV55P(Y*}RN)Y(o4(uI3eYBuott@*n z{|lA_Hfe)QttBM;kQ85)y(wlU_QXvewAENhs(!gKrLNiCkYsN4h^q*q217P|%3+q! z3xriiAPAFvwxx!(_Pv+lxZF?!IOW)___+94pXzxBaLUw}eOh{0$N0K{wfA?V2sc-& zw@4v13p5R%VP_Zd3EgtWt_K6PuX{?9#8~&Z#*62Ph{)BW=f2yOIP5GJ;rS*^2Vq#g z?$x-!r@c>i-$w7T8g#3h_DO~Ng?|j-f(Demn;B-Km|0P!;u(HHJL12dVjQ0!WmHzh zbTPCv`hv`s zqGntNST~_HbfhSe*e!$o#YB(#Dt^jDWk3A;fVV~Vjo1S$Pd2}3O?cjaa_?m) z6+JKKF%3pUA|Sck(!19rLnvAYxKPSX?j=e$Gg1N7N~-xqS#eSX?f$tHVkGOA{cLra zO|8^oAt}X3d(2+@TCd{VZb=~P{r!_ZNGj|S7`YL1ctn5c_AnB3LCMFIAxLjV=d_m8 zdB%ve3fi%I^nlB-Y~f(H;D%bU`o$PFRon!7l&syQU090rx*qi}bA}^{_=Zx!OuHjI zh$)v<@V>TV68aLfI@00Cxn*(@+m#qtGSr5@Ul69PUI|!$1bNLGT!s@3+XF+d_a9xN zf+hA}Mb~d<5S4lozU894wzzUma@`FfF9qB3;t#ehDsl`tc*TdE&U4ui19qJVvB-mEzzipp@+V*IF#tz# zi0rusn5Ghde|PyFd|ezcR1m zc_N%nF4+Ch)KwsloR4^i?Zj-to0poPRF}i3rNi*z(n6Wqn&4YKc&FNL>y}UwRLRM; z{KAv9S~k_E8CIetct>G8;PzZ{Zha%<5)maP8RxfV!c0ljj}+*{sI*T2-BqI9h8Ncz z?^e@L*Zx^U-R0aLF&&g*augP`ExFBTxHJgHClIHjCBKV6)zBm1p`O?HxU8)7LYN5A z;a9EHK1A7+T=g+@I&Ud`SGVY~n7rlk<}_WKoqKeToEp$ouGT!Ha#HTXv{Z&0CP^Or zs`ZW~g0ny6a3&^#KKJB~&&m3JlkizCG|9aC8C;X_Ph`srr|w!lr@Te`!z9sB_}fd~ zQ8-yAkJbh0UZmrmaRs;*Fv=7r}kUU*j7 zZAK@j$h}R6FAK!%I6e*pI!}0Y_Ds=lgkSyq$TSS1qW|zm9bH{7nVE5KfDisFIHEcX zP2zb?`zg`WWFW;8FDs2n{pL!XG{iB0W8M;+_0R8tOA>G4%ortKq9)037WYX_t8?__Vv(LSZ`5j`fQt4n2<8ikvaHZPM~NgN+S=VA z8lg^jB5?Fj*dN``?T0yqaxY^w=WJVIvT(uV-U4PON7xX*!ue7f_g%>NR%pHrJN3Pt zV=1|rAkt@9=~oOBNf>(7Z-0DP_adp<|)tQnz`xKz!g8-I1lSd|5+5+6!%94;V25aJ2e9z*eZ95T%3@l1F+mkxWL;Q|{^U&kT`jtpRZIs}36@t&&l-GAjD1 z^Zp2htu0psqC^zx46H% zSY!9)A(LIjX_x0hXlG+-&?^Fsi`UkxE3CT&hNfowze{bN&B!R8yRSWH%QfmS4+Tq) z%25v-Clz#Dn9@hYxHSQZ1X8pz$67@F!irp}1SyV|$@rImZ0pQgrF|$)90vs&Nh>8) zs)x%dHnEpodVG8kM#X5i&PZ-05>_3un2(C_qMSjC$z(r=At!aEbKKr8mae|qCGM;L zUE>sb#s@4{zMct_o>~yz(l|seX=(~6Nb1}r>sb=Sr-EzEd2CJ8qMr8m=}c;Ic7hq_ zdy$7@i)%4K*u24VtI(KJRl@~!_Sns&1kqJw5DV0o2~3i=FQsJd1|!6NEr&4vhLGVb z5|TkNc-dE1I(f`|s?C{HFTM<}z3ydQCt|r+c)_NzN2n?Qy71>Jd;2GZ2@*=j$9hyT zF-+Sv|1b<4LIybus&)&AM`WdSU0h# zn+-!u#{GL=aO&YC%I-;AL%6n|6=b=6J=S7=wmPbo9V0t$wnZln(wa{4hh-{6G&gAq zoV8M|h7#_WV6(q8PuLZNgn=4q#~C1R73aEG5`INWjOfI_Ijz=O8BAh2ufYwfHGg7A z@ABPu-FT~vbY!wVm>q1EaB05dr3vsIx%UX4z+7Wp&CqVka9 zN8&m0aDZC7`p2&h)zNSIK63%Rhu!eEbv=ZQ>zvCr&q|D0U&i@PYHh6=Ll6Ij)=NpS zfXuW_vaPM|kOk7>)XDOkmKJXlV~WgJzT_0SuG{vUng@ZLq*(zBdiI1u`S>C{Yowm< zNq&4iHm;q?BJFB5U{hoK>K-dkQppU}r6TN8+tbkf9b@_mNCpF&$@k!*);Cc#hSboN zW6-UOdQR-Ap&@()b3DKF;fQNH*C-?%s3%CTS~yZiDND;c6>YpEt*x;=|5cr3zu}A} zc7ilo)K@rSi5vRH+*cMxn%@zH;bw;XIS-}O-bp-gSE&Bd*UCCCuZEcuIkQAw-7D&> zh5l)~7v`%vmz1{pWSQXjzMu9>7at4*UozIg&s)w)Cu%0|O}dlzAwKjanfAaUnGABj zrVF1nHjBN+667&wD|34XxrXWQlDM5;_Qa6b4YZafuD9-3E0<++rIEWmR4zi6REv#b zUBv7+eVfPFw!eKM%>uac+5uE2kGuELDi2p}ofkl$))tN~XryMNT;R&^(;v-_N8nIv zseV@R%h?d)iq7=CS7UE6joS}8glLCsv?`!l8!^BFoYQUDvAZfDpc&W_2j+}Ap?)#xb>-;daWIrtcFs|(h zMgi=#^0j=^umk3Tal!rBSmssw&Avptx1c7>NtIqOq2 zX^i#J1w#z!@Xqs^dDBhJU;2hV_=Ikeb?A1)P%mCe%=-}BtJoy=pe^WQ2Y3E}lA2!? zJv0f(45wc>+-Z6j~O(EQ#IrJ^j0+yk=V6dKgv~Ad8jR6ZE~A1SC;m~I&QqUkMeA|p^B3> zl8RCxmF@A1N8fZTyG_9$%vAqJw?Zl9&z|ydO+`^wBxkhhDt79L=3UT^PY##NwQyw< z+{JS6LWkj5k{t7?71daN-nqF=1*#8PxdUeq%j2~N}B zXwK^C4$~>JSwh_(Y*!y+7ZtN0<9&imAvPb&MTW5chVaS4yG418=c=}cr zgaFUfqurAby@PSzg%sely{KsE>%SyD;*Ij`UU=Xg&GXzd4G#4J1C7-F%mrNA`aFe# z_8tYnod7@xQS>GqKrmQ*7fa(k5Q3Bc_rz)c5ts$uq(*4d4DKe70>=YBx_jT1MgHFR z5dl0upe)k;_Xj+`2Ok~6^Itc4nA-u;(86^DS^mG1%eN`Zd+^aA3Mu0Ly|277{Lu=y z^nZfNo841$$AHF~A-jM-WLm&gD z9IlSdzhu(kqqwdxlD*q^aBwR2g^k`neYX8z8rw4#Unvm1oVs};e}IP2xu3hNkv4#~ z5zOYma@v=vR>99&&~jnCL}GLKEq5XUM4J)|S294$maKjmL=Z}UTHI<;>~_SDIT*?U zoo(yah7Q3G@6_+U><)bCfF8vd zsJ(PuFz?l=m85~vZ|}JRh;s#Pj!3+Mk72HZtHaAKXEIvbR?Qf32 zC{Uh_DJ$JhgjEk>3Bc!?9y6P;V!^0`i0Tm3uuN9#v@Ob$qK)o78Gon}bsiL+h1b!W zKxFt%EN&~&o?6(Dnt2VTrL!DWcEw)oP`o_(j>LV1DTxg9T_2mG&oQcVH@DY#us1_B zS?2&iwZDt3pg#iaw_$uujn7%%o6rH&u!Ni-$nAB$gc;{qy>pAB%rjUxJpD4;d%Ayu zN*XF&)k7<*J&nwO2@wT+nl#gbpWO95cK3(%b{1^T)q>SpjAH`-YkmFzB^KcvU@CdK=lQN!QLL^%sz8$qOtux<5n3FR-VWSp{nmv+#zDd71ltiRrq|49`;R z$pKP*ORD?ERW5dT@7@D(36VF-q?udfd9zm-m8BsCrBTYRg0@I{Ds0Q;?UorEnfPnY z{i<9!JFAJD+Iqr|zt<%(m#9Xlz@*hh9Sf^}%P?33NP1Jo>(ex0m&CKIL6D@!y{8iM z!B*k_1@b{(|B75Y;55if>^TmtYwxqKC3e(vnDfyM*|dy1-ns)IgupNOpcu{hg;^;Q`C2^DF` zveAhZVsw&H|M#ejEHrd+1_EpKGyTX`(ogv+2UArW99B^Z0v@aLR%uQ~?logGS5CS2 z>`9*#$ek*f&n1Z}O$%&|q8xV2_X!!!%}gYwD%CC3(G{#1fvHa0Rc)-4E4>YOYNu3B zwgm+ZCekUNaM1;5YeHC0f4Z|FQn8u$T?9EUZayVx_PzfQV+$Mr035PwUf4ZvtM*l& z3i& z!^(yU#m5!ua?Xr02zpdc2?vXlLN5Camdy*h%IK`dXf1_Fde`7W#Q{Hal-XTpxV6-G z z)2Wui?FvO0cK+J3ciO*taNTJ$n9bXp-gJdtwE#ENMvnFzR0c6`(xAxsQi+L`sK&}T z85nX5=qf%XYe`c0R*{e`=1OlOBBr2-kueS+>|E*GMQ@goyiwLb^N7DH(eq?_gas{R z$>kPQcz?}4AJ0ZFO;P&;D;p3&<-W$7wa^;uxVAULj=xb8F-n3~cZy2saUG2#;73e- zB7xN~bxjJ~Y{XeHy)Wn&nX`Ha)&b1~mEozOc@4Rv9%BsH68lfr9KbjX`DIa@s~I>u z9%J4XFI+cWwI)@32HpCP%64vrGebjayxY{p>X?wu_DdVtQrO#kdeR<$qs8bDy++?h zH34)aFn11i3m#7DH=Wo+r9X>|2Oo1VTm`UqyX*F1SO2nWixs)zno}J=gs5l83UqjW zi<8fBl48MIW!^p%nd#Js9ofdpcQh9cU?z@b$UQmT>XMti$*%Dj?9cvCngmA5c4db; zQm}eSO`R!aLOY&IC1-d&iTh2~!_N-o-|@KpOC+AzBCJzD9GNjm~E|(lWV) z>25o2Mu1reuw*Bocx4&?e~Xz4ofe(ej{SZ;1kx8k+)vG}g&7%)xB6*KN;f4}6MQgg zSOO;*!6Ij1>Y=ht5%ASJb{XKouH2flH;iZZKf^1UClP9`4oj!q{c4m@C4zCx9`ys9 zeunCJ!14(D#>duNfYv<)BKVmgxtSB_l{9E7XtTjZk74nMzO{YeG_m|+^f9*_#uFJR z^^Y&rDYHubLz$2zjP2SUP`&|3A!uDIzIj;ppz8zc0uS!{4q^1$m^ncR%nChzEW`G% zk8xHTSZya~srwqoRqhscncCa}CJIY6R>}vCGaq8=5@g?Lc^+wYJMOGnZ`DHxu3eO_ zYL0zAqm7lUmOSOMVH|XtsTkJeO7j-aE19VXYT7@$u=^PvZGl6gn$kO@OY_L=+N*m7 zr9>$AJU+zK0^Y8*)KqnT@zlC3qwDt&;c-9pZe$!EOTU08S_EzzNU0UjPrPQBMLS|7yBMg&4_(EdoP z!dsV2(Mn8*e_P&&cEYmp{H+ff<0EN8bj!i;+c0~+1>H6adyI@}59qr)mxT|O+M}lp zK%3j?h%@&qa+HP5_q}{N88>!^5WbUtn*I39B*Fq&pphJJ#GnHIiFe$cLUVo@E<)xx zZ`=5T)}MQ#(ZEA^M67IkiMt>tf)Ng`mPvE`emvlMf>m66WXJqYXXx{d{l1|5^k1Z- za^m98;q5d7l>LMJe#0XxHh8L`IAnTkznVqPnWq4bj{LV=W~=NbYh5_uD%Q4bdp~jI z-!dt<1PQOOPe|=yIE1Y_VDXJ2P{*8hD6*xQtydA^(F2vM0a}WL} NAu219|MrvDe*kl3#OMG3 literal 0 HcmV?d00001 diff --git a/kafka-plugins-0.9/icons/Kafka-batchsink.png b/kafka-plugins-0.10/icons/Kafka-batchsink.png similarity index 100% rename from kafka-plugins-0.9/icons/Kafka-batchsink.png rename to kafka-plugins-0.10/icons/Kafka-batchsink.png diff --git a/kafka-plugins-0.9/icons/Kafka-batchsource.png b/kafka-plugins-0.10/icons/Kafka-batchsource.png similarity index 100% rename from kafka-plugins-0.9/icons/Kafka-batchsource.png rename to kafka-plugins-0.10/icons/Kafka-batchsource.png diff --git a/kafka-plugins-0.10/icons/KafkaAlerts-alertpublisher.png b/kafka-plugins-0.10/icons/KafkaAlerts-alertpublisher.png new file mode 100644 index 0000000000000000000000000000000000000000..041cdb49c14e9bb31ddf2c2256c6dc5b72a12e4e GIT binary patch literal 2066 zcmY*adpy(c9{=h#6kbNgCe}EnVq@74Ys0X~DVbZ@VIoW_8@By6x6oYjGAfib{9H

HUT+Toh&Ka(lr#WXRzfM00Kl;V zfazcWz+D6YNObXiFTC+=Uq%4#%QwP*w;B zbvPW3j}4>ayj{sZt}Ay0#8Eao8iz)6xm*-?H;Ne>fws1{w?|uH&=`!Rl3~e;W3WSb zmJF83cai_-xQ4SrV(#8K^>3&`D_mA``eII z0N7;a?n?AMx^ea*^;(~wu2>X@vWA&HBvlY?F7~>pQM-M#4RrhY3W}B*xoN{vnQhzj z_KJ!c_&rrG-5sv$3pD=Tr0Qjm(~h0pj;*`f`sB&->igv6!r9qC0@p6y#BuRU!mQ}3 zXnwHAUM}9&-hV0eV`Z4MT;a1I5Om+Ia0a-CLunkahDu$t}fTR$Y4Pv6g>3le9VYBmUqKOS7sUOiz>1}{}*Y9@yE zVimMKHMuL<(LPt^NTsTzL2TptmFmimJL}ueg?D5^hNNamEjt4!iqI$L^jR2jHCoIt zEujbq6_;g&m~FE=aLDpf)xP(}_7vv8S*zikYpwP{BNHtJUh^Gx?&3amEs`}) zsyb$nWpB=_vY)AmCmqReP5y9_eQwlSS13G=BM+w*$HUHgrXa2g?_BNAQs3I$>^&Rs z?9v!lA~v5Xq57RPUdqqoPIV}j^zH})_Gds8?jDZNQH9OYRkS$i#znPbk zHbKINyaBj9flqFk@)k5&dl zPxtI2&WCsYYIeOOCd~r7;6DDRWFyZ$w$!Fskd7W#OY;5;=8(&V(0)no)GWPAt0h6+ z^YtTRP72>yaIoI~o+nNb?4RMvv+fe$w zPvzKxqR)LDtnn?2stUj0qUT?yD7{fIiFvZT-E^6z78)4zpt)7gFVrlyuspl9zP4F= ze?kw#9A}iXW!!U+8xqy-)F_O7b$#pGw}`jF`u<;uyC7l36K;eYqLjB~$!IBvr^AkX zbDERD=|8Giq+JmRO=wcbEeEYo`f)$-?D;Zxte$a^BKOkFx`^@S64(kO6qgA2Mr(iMtrc~J+V za5O)^)8y!CD8j&{c0FanQEoTcT;Qsi?(o=u)lM7tLY+Sq(6_CYVXA!{#Qk%oS>RjY gC);EGl}R5rhGo@$plRUgYoD(>>457k=ipQS2KYUkRR910 literal 0 HcmV?d00001 diff --git a/kafka-plugins-0.10/pom.xml b/kafka-plugins-0.10/pom.xml new file mode 100644 index 0000000..a579aa8 --- /dev/null +++ b/kafka-plugins-0.10/pom.xml @@ -0,0 +1,172 @@ + + + + kafka-plugins + co.cask.hydrator + 1.8.2-SNAPSHOT + + 4.0.0 + + Apache Kafka 0.10 plugins + kafka-plugins + 1.8.2-SNAPSHOT-0.10.2.0 + + + + org.apache.kafka + kafka_2.11 + ${kafka10.version} + + + org.slf4j + slf4j-log4j12 + + + + + org.apache.spark + spark-streaming-kafka-0-10_2.11 + ${spark2.version} + + + org.apache.kafka + kafka_2.11 + + + org.apache.spark + spark-tags_2.11 + + + + + org.apache.spark + spark-mllib_2.11 + ${spark2.version} + provided + + + org.apache.xbean + xbean-asm5-shaded + + + + + org.apache.spark + spark-streaming_2.11 + ${spark2.version} + provided + + + org.apache.spark + spark-core_2.11 + ${spark2.version} + provided + + + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + org.apache.hadoop + hadoop-client + + + com.esotericsoftware.reflectasm + reflectasm + + + org.apache.curator + curator-recipes + + + org.tachyonproject + tachyon-client + + + org.scala-lang + scala-compiler + + + org.eclipse.jetty.orbit + javax.servlet + + + + net.java.dev.jets3t + jets3t + + + org.apache.xbean + xbean-asm5-shaded + + + + + co.cask.cdap + cdap-spark-core2_2.11 + ${cdap.version} + test + + + co.cask.cdap + cdap-data-pipeline2_2.11 + ${cdap.version} + test + + + co.cask.cdap + cdap-data-streams2_2.11 + ${cdap.version} + test + + + + + + + org.apache.felix + maven-bundle-plugin + 3.3.0 + + + <_exportcontents>co.cask.hydrator.plugin.*;org.apache.spark.streaming.kafka010.*; + org.apache.kafka.*;com.google.common.base.*; + *;inline=false;scope=compile + true + lib + + + + + co.cask + cdap-maven-plugin + 1.0.0 + + + system:cdap-data-pipeline[4.3.0-SNAPSHOT,6.0.0-SNAPSHOT) + system:cdap-data-streams[4.3.0-SNAPSHOT,6.0.0-SNAPSHOT) + + + + + create-artifact-config + prepare-package + + create-plugin-json + + + + + + + + diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java new file mode 100644 index 0000000..8872014 --- /dev/null +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java @@ -0,0 +1,174 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator.plugin.alertpublisher; + +import co.cask.cdap.api.annotation.Description; +import co.cask.cdap.api.annotation.Macro; +import co.cask.cdap.api.annotation.Name; +import co.cask.cdap.api.annotation.Plugin; +import co.cask.cdap.api.dataset.lib.KeyValue; +import co.cask.cdap.api.plugin.PluginConfig; +import co.cask.cdap.etl.api.Alert; +import co.cask.cdap.etl.api.AlertPublisher; +import co.cask.cdap.etl.api.AlertPublisherContext; +import co.cask.cdap.etl.api.PipelineConfigurer; +import co.cask.hydrator.common.KeyValueListParser; +import com.google.common.base.Strings; +import com.google.gson.Gson; +import kafka.common.Topic; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; +import javax.annotation.Nullable; + +/** + * Kafka Alert Publisher + */ +@Plugin(type = AlertPublisher.PLUGIN_TYPE) +@Name("KafkaAlerts") +public class KafkaAlertPublisher extends AlertPublisher { + private static final Logger LOG = LoggerFactory.getLogger(KafkaAlertPublisher.class); + private static final Gson GSON = new Gson(); + private final Config config; + + private KafkaProducer producer; + + public KafkaAlertPublisher(Config config) { + this.config = config; + } + + @Override + public void configurePipeline(PipelineConfigurer pipelineConfigurer) throws IllegalArgumentException { + config.validate(); + } + + @Override + public void initialize(AlertPublisherContext context) throws Exception { + super.initialize(context); + config.validate(); + Properties props = new Properties(); + // Add client id property with stage name as value. + props.put(ProducerConfig.CLIENT_ID_CONFIG, context.getStageName()); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, config.brokers); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); + props.put("producer.type", "sync"); + + // Override any property set above with user specified producer properties + for (Map.Entry producerProperty : config.getProducerProperties().entrySet()) { + props.put(producerProperty.getKey(), producerProperty.getValue()); + } + + this.producer = new KafkaProducer<>(props); + } + + @Override + public void publish(Iterator iterator) throws Exception { + while (iterator.hasNext()) { + String alert = GSON.toJson(iterator.next()); + try { + // We do not specify key here. So the topic partitions will be chosen in round robin fashion. + ProducerRecord record = new ProducerRecord<>(config.topic, alert); + producer.send(record); + } catch (Exception e) { + // catch the exception and continue processing rest of the alerts + LOG.error("Exception while emitting alert {}", alert, e); + } + + } + } + + @Override + public void destroy() { + super.destroy(); + producer.close(); + } + + /** + * Kafka Producer Configuration. + */ + public static class Config extends PluginConfig { + + @Name("brokers") + @Description("Specifies the connection string where Producer can find one or more brokers to " + + "determine the leader for each topic.") + @Macro + private String brokers; + + @Name("topic") + @Description("Topic to which message needs to be published. The topic should already exist on kafka.") + @Macro + private String topic; + + @Name("producerProperties") + @Nullable + @Description("Additional kafka producer properties to set.") + private String producerProperties; + + @Description("The kerberos principal used for the source when kerberos security is enabled for kafka.") + @Macro + @Nullable + private String principal; + + @Description("The keytab location for the kerberos principal when kerberos security is enabled for kafka.") + @Macro + @Nullable + private String keytabLocation; + + public Config(String brokers, String topic, String producerProperties) { + this.brokers = brokers; + this.topic = topic; + this.producerProperties = producerProperties; + } + + private Map getProducerProperties() { + KeyValueListParser kvParser = new KeyValueListParser("\\s*,\\s*", ":"); + Map producerProps = new HashMap<>(); + if (!Strings.isNullOrEmpty(producerProperties)) { + for (KeyValue keyVal : kvParser.parse(producerProperties)) { + String key = keyVal.getKey(); + String val = keyVal.getValue(); + producerProps.put(key, val); + } + } + return producerProps; + } + + private void validate() { + // If the topic or brokers are macros they would not be available at config time. So do not perform + // validations yet. + if (Strings.isNullOrEmpty(topic) || Strings.isNullOrEmpty(brokers)) { + return; + } + + try { + Topic.validate(topic); + } catch (InvalidTopicException e) { + throw new IllegalArgumentException(String.format("Topic name %s is not a valid kafka topic. Please provide " + + "valid kafka topic name. %s", topic, e.getMessage())); + } + } + } +} diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java similarity index 96% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java index 5855fa7..d03b018 100644 --- a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java @@ -41,6 +41,7 @@ import co.cask.hydrator.common.ReferencePluginConfig; import co.cask.hydrator.common.SourceInputFormatProvider; import co.cask.hydrator.common.batch.JobUtils; +import co.cask.hydrator.plugin.common.KafkaHelpers; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.base.Strings; @@ -422,18 +423,13 @@ public void prepareRun(BatchSourceContext context) throws Exception { context.createDataset(tableName, KeyValueTable.class.getName(), DatasetProperties.EMPTY); } table = context.getDataset(tableName); + Map kafkaConf = new HashMap<>(); kafkaConf.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, config.getBrokers()); + // We save offsets in datasets, no need for Kafka to save them + kafkaConf.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + KafkaHelpers.setupKerberosLogin(kafkaConf, config.getPrincipal(), config.getKeytabLocation()); kafkaConf.putAll(config.getKafkaProperties()); - if (config.getKeytabLocation() != null && config.getPrincipal() != null) { - kafkaConf.put("sasl.jaas.config", String.format("com.sun.security.auth.module.Krb5LoginModule required \n" + - " useKeyTab=true \n" + - " storeKey=true \n" + - " useTicketCache=false \n" + - " keyTab=\"%s\" \n" + - " principal=\"%s\";", config.getKeytabLocation(), - config.getPrincipal())); - } kafkaRequests = KafkaInputFormat.saveKafkaRequests(conf, config.getTopic(), kafkaConf, config.getPartitions(), config.getInitialPartitionOffsets(), config.getMaxNumberRecords(), table); @@ -455,6 +451,7 @@ public void onRunFinish(boolean succeeded, BatchSourceContext context) { @Override public void initialize(BatchRuntimeContext context) throws Exception { super.initialize(context); + schema = config.getSchema(); Schema messageSchema = config.getMessageSchema(); for (Schema.Field field : schema.getFields()) { diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaInputFormat.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaInputFormat.java similarity index 76% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaInputFormat.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaInputFormat.java index aaa1e39..1aa4265 100644 --- a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaInputFormat.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaInputFormat.java @@ -18,6 +18,7 @@ import co.cask.cdap.api.common.Bytes; import co.cask.cdap.api.dataset.lib.KeyValueTable; +import co.cask.hydrator.plugin.common.KafkaHelpers; import com.google.common.base.Function; import com.google.common.base.Predicate; import com.google.common.collect.Collections2; @@ -40,11 +41,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.lang.reflect.Type; import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; @@ -61,14 +60,13 @@ public class KafkaInputFormat extends InputFormat { private static final Type LIST_TYPE = new TypeToken>() { }.getType(); @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) { return new KafkaRecordReader(); } @Override - public List getSplits(JobContext context) throws IOException, InterruptedException { + public List getSplits(JobContext context) { Gson gson = new Gson(); List finalRequests = gson.fromJson(context.getConfiguration().get(KAFKA_REQUEST), LIST_TYPE); List kafkaSplits = new ArrayList<>(); @@ -84,12 +82,13 @@ public List getSplits(JobContext context) throws IOException, Interr static List saveKafkaRequests(Configuration conf, String topic, Map kafkaConf, final Set partitions, Map initOffsets, - long maxNumberRecords, KeyValueTable table) throws Exception { + long maxNumberRecords, KeyValueTable table) { Properties properties = new Properties(); properties.putAll(kafkaConf); - try (Consumer consumer = new KafkaConsumer<>(properties, new ByteArrayDeserializer(), new ByteArrayDeserializer())) { + try (Consumer consumer = + new KafkaConsumer<>(properties, new ByteArrayDeserializer(), new ByteArrayDeserializer())) { // Get Metadata for all topics - @SuppressWarnings("unchecked") List partitionInfos = consumer.partitionsFor(topic); + List partitionInfos = consumer.partitionsFor(topic); if (!partitions.isEmpty()) { Collection filteredPartitionInfos = Collections2.filter(partitionInfos, @@ -111,7 +110,8 @@ public boolean apply(PartitionInfo input) { } } - private static List createKafkaRequests(Consumer consumer, Map kafkaConf, + private static List createKafkaRequests(Consumer consumer, + Map kafkaConf, List partitionInfos, Map offsets, long maxNumberRecords, KeyValueTable table) { @@ -123,8 +123,8 @@ public TopicPartition apply(PartitionInfo input) { return new TopicPartition(input.topic(), input.partition()); } }); - Map latestOffsets = getLatestOffsets(consumer, topicPartitions); - Map earliestOffsets = getEarliestOffsets(consumer, topicPartitions); + Map latestOffsets = KafkaHelpers.getLatestOffsets(consumer, topicPartitions); + Map earliestOffsets = KafkaHelpers.getEarliestOffsets(consumer, topicPartitions); List requests = new ArrayList<>(); for (PartitionInfo partitionInfo : partitionInfos) { @@ -157,34 +157,4 @@ public TopicPartition apply(PartitionInfo input) { } return requests; } - - private static Map getLatestOffsets(Consumer consumer, - List topicAndPartitions) { - consumer.assign(topicAndPartitions); - for (TopicPartition topicPartition : topicAndPartitions) { - consumer.seekToEnd(topicPartition); - } - - Map offsets = new HashMap<>(); - for (TopicPartition topicAndPartition : topicAndPartitions) { - long offset = consumer.position(topicAndPartition); - offsets.put(topicAndPartition, offset); - } - return offsets; - } - - private static Map getEarliestOffsets(Consumer consumer, - List topicAndPartitions) { - consumer.assign(topicAndPartitions); - for (TopicPartition topicPartition : topicAndPartitions) { - consumer.seekToBeginning(topicPartition); - } - - Map offsets = new HashMap<>(); - for (TopicPartition topicAndPartition : topicAndPartitions) { - long offset = consumer.position(topicAndPartition); - offsets.put(topicAndPartition, offset); - } - return offsets; - } } diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaKey.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaKey.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaKey.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaKey.java diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaMessage.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaMessage.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaMessage.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaMessage.java diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaReader.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaReader.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaReader.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaReader.java diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRecordReader.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRecordReader.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRecordReader.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRecordReader.java diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRequest.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRequest.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRequest.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaRequest.java diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaSplit.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaSplit.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaSplit.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaSplit.java diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java new file mode 100644 index 0000000..dda5173 --- /dev/null +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java @@ -0,0 +1,105 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator.plugin.common; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.common.TopicPartition; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * Utility class for Kafka operations + */ +public final class KafkaHelpers { + private static final Logger LOG = LoggerFactory.getLogger(KafkaHelpers.class); + + // This class cannot be instantiated + private KafkaHelpers() { + } + + /** + * Fetch the latest offsets for the given topic-partitions + * + * @param consumer The Kafka consumer + * @param topicAndPartitions topic-partitions to fetch the offsets for + * @return Mapping of topic-partiton to its latest offset + */ + public static Map getLatestOffsets(Consumer consumer, + List topicAndPartitions) { + consumer.assign(topicAndPartitions); + consumer.seekToEnd(topicAndPartitions); + + Map offsets = new HashMap<>(); + for (TopicPartition topicAndPartition : topicAndPartitions) { + long offset = consumer.position(topicAndPartition); + offsets.put(topicAndPartition, offset); + } + return offsets; + } + + /** + * Fetch the earliest offsets for the given topic-partitions + * + * @param consumer The Kafka consumer + * @param topicAndPartitions topic-partitions to fetch the offsets for + * @return Mapping of topic-partiton to its earliest offset + */ + public static Map getEarliestOffsets(Consumer consumer, + List topicAndPartitions) { + consumer.assign(topicAndPartitions); + consumer.seekToBeginning(topicAndPartitions); + + Map offsets = new HashMap<>(); + for (TopicPartition topicAndPartition : topicAndPartitions) { + long offset = consumer.position(topicAndPartition); + offsets.put(topicAndPartition, offset); + } + return offsets; + } + + /** + * Adds the JAAS conf to the Kafka configuration object for Kafka client login, if needed. + * The JAAS conf is not added if either the principal or the keytab is null. + * + * @param conf Kafka configuration object to add the JAAS conf to + * @param principal Kerberos principal + * @param keytabLocation Kerberos keytab for the principal + */ + public static void setupKerberosLogin(Map conf, @Nullable String principal, + @Nullable String keytabLocation) { + if (principal != null && keytabLocation != null) { + LOG.debug("Adding Kerberos login conf to Kafka for principal {} and keytab {}", + principal, keytabLocation); + conf.put("sasl.jaas.config", String.format("com.sun.security.auth.module.Krb5LoginModule required \n" + + " useKeyTab=true \n" + + " storeKey=true \n" + + " useTicketCache=false \n" + + " renewTicket=true \n" + + " keyTab=\"%s\" \n" + + " principal=\"%s\";", + keytabLocation, principal)); + } else { + LOG.debug("Not adding Kerberos login conf to Kafka since either the principal {} or the keytab {} is null", + principal, keytabLocation); + } + } +} diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java similarity index 89% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java index db14de8..ceded55 100644 --- a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java @@ -33,10 +33,13 @@ import co.cask.hydrator.common.KeyValueListParser; import co.cask.hydrator.common.ReferenceBatchSink; import co.cask.hydrator.common.ReferencePluginConfig; +import co.cask.hydrator.plugin.common.KafkaHelpers; import com.google.common.base.Strings; import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.io.Text; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,7 +63,6 @@ public class Kafka extends ReferenceBatchSink { private final KafkaOutputFormatProvider kafkaOutputFormatProvider; // Static constants for configuring Kafka producer. - private static final String BROKER_LIST = "bootstrap.servers"; private static final String ACKS_REQUIRED = "acks"; public Kafka(Config producerConfig) { @@ -169,7 +171,7 @@ public static class Config extends ReferencePluginConfig { private String keytabLocation; @Name("compressionType") - @Description("Additional kafka producer properties to set") + @Description("Compression type to be applied on message") @Macro private String compressionType; @@ -193,21 +195,13 @@ private static class KafkaOutputFormatProvider implements OutputFormatProvider { this.conf = new HashMap<>(); conf.put("topic", kafkaSinkConfig.topic); - conf.put(BROKER_LIST, kafkaSinkConfig.brokers); + conf.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaSinkConfig.brokers); conf.put("compression.type", kafkaSinkConfig.compressionType); + conf.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); + conf.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); - + KafkaHelpers.setupKerberosLogin(conf, kafkaSinkConfig.principal, kafkaSinkConfig.keytabLocation); addKafkaProperties(kafkaSinkConfig.kafkaProperties); - if (kafkaSinkConfig.principal != null && kafkaSinkConfig.keytabLocation != null) { - conf.put("additional." + "sasl.jaas.config", - String.format("com.sun.security.auth.module.Krb5LoginModule required \n" + - " useKeyTab=true \n" + - " storeKey=true \n" + - " useTicketCache=false \n" + - " keyTab=\"%s\" \n" + - " principal=\"%s\";", kafkaSinkConfig.keytabLocation, - kafkaSinkConfig.principal)); - } conf.put("async", kafkaSinkConfig.async); if (kafkaSinkConfig.async.equalsIgnoreCase("true")) { diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java similarity index 88% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java index 7dc52d6..f3a1e42 100644 --- a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java @@ -26,7 +26,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.serialization.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,13 +39,8 @@ public class KafkaOutputFormat extends OutputFormat { private static final Logger LOG = LoggerFactory.getLogger(KafkaOutputFormat.class); - // Static constants for configuring Kafka producer. - private static final String BROKER_LIST = "bootstrap.servers"; - private static final String KEY_SERIALIZER = "key.serializer"; - private static final String VAL_SERIALIZER = "value.serializer"; private KafkaProducer producer; - @Override public void checkOutputSpecs(JobContext jobContext) throws IOException, InterruptedException { } @@ -92,7 +86,12 @@ public RecordWriter getRecordWriter(TaskAttemptContext context) Properties props = new Properties(); // Configure the properties for kafka. - props.put(BROKER_LIST, configuration.get(BROKER_LIST)); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + configuration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + configuration.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + configuration.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)); props.put("compression.type", configuration.get("compression.type")); if (!Strings.isNullOrEmpty(configuration.get("hasKey"))) { @@ -117,8 +116,7 @@ public RecordWriter getRecordWriter(TaskAttemptContext context) // CDAP-9178: cached the producer object to avoid being created on every batch interval if (producer == null) { - producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props, new StringSerializer(), - new StringSerializer()); + producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props); } return new KafkaRecordWriter(producer, topic); diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/KafkaRecordWriter.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaRecordWriter.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/KafkaRecordWriter.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaRecordWriter.java diff --git a/kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/StringPartitioner.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/StringPartitioner.java similarity index 100% rename from kafka-plugins-0.9/src/main/java/co/cask/hydrator/plugin/sink/StringPartitioner.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/StringPartitioner.java diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java new file mode 100644 index 0000000..9f38083 --- /dev/null +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java @@ -0,0 +1,430 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator.plugin.source; + +import co.cask.cdap.api.annotation.Description; +import co.cask.cdap.api.annotation.Macro; +import co.cask.cdap.api.data.format.FormatSpecification; +import co.cask.cdap.api.data.schema.Schema; +import co.cask.cdap.api.dataset.lib.KeyValue; +import co.cask.cdap.format.RecordFormats; +import co.cask.hydrator.common.KeyValueListParser; +import co.cask.hydrator.common.ReferencePluginConfig; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import org.apache.kafka.common.TopicPartition; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import javax.annotation.Nullable; + +/** + * Conf for Kafka streaming source. + */ +@SuppressWarnings("unused") +public class KafkaConfig extends ReferencePluginConfig implements Serializable { + + private static final long serialVersionUID = 8069169417140954175L; + + @Description("List of Kafka brokers specified in host1:port1,host2:port2 form. For example, " + + "host1.example.com:9092,host2.example.com:9092.") + @Macro + private String brokers; + + @Description("Kafka topic to read from.") + @Macro + private String topic; + + @Description("The topic partitions to read from. If not specified, all partitions will be read.") + @Nullable + @Macro + private String partitions; + + @Description("The initial offset for each topic partition. If this is not specified, " + + "all partitions will have the same initial offset, which is determined by the defaultInitialOffset property. " + + "An offset of -2 means the smallest offset. An offset of -1 means the latest offset. " + + "Offsets are inclusive. If an offset of 5 is used, the message at offset 5 will be read.") + @Nullable + @Macro + private String initialPartitionOffsets; + + @Description("The default initial offset for all topic partitions. " + + "An offset of -2 means the smallest offset. An offset of -1 means the latest offset. Defaults to -1. " + + "Offsets are inclusive. If an offset of 5 is used, the message at offset 5 will be read. " + + "If you wish to set different initial offsets for different partitions, use the initialPartitionOffsets property.") + @Nullable + @Macro + private Long defaultInitialOffset; + + @Description("Output schema of the source, including the timeField and keyField. " + + "The fields excluding the timeField and keyField are used in conjunction with the format " + + "to parse Kafka payloads.") + private String schema; + + @Description("Optional format of the Kafka event. Any format supported by CDAP is supported. " + + "For example, a value of 'csv' will attempt to parse Kafka payloads as comma-separated values. " + + "If no format is given, Kafka message payloads will be treated as bytes.") + @Nullable + private String format; + + @Description("Optional name of the field containing the read time of the batch. " + + "If this is not set, no time field will be added to output records. " + + "If set, this field must be present in the schema property and must be a long.") + @Nullable + private String timeField; + + @Description("Optional name of the field containing the message key. " + + "If this is not set, no key field will be added to output records. " + + "If set, this field must be present in the schema property and must be bytes.") + @Nullable + private String keyField; + + @Description("Optional name of the field containing the kafka partition that was read from. " + + "If this is not set, no partition field will be added to output records. " + + "If set, this field must be present in the schema property and must be an integer.") + @Nullable + private String partitionField; + + @Description("Optional name of the field containing the kafka offset that the message was read from. " + + "If this is not set, no offset field will be added to output records. " + + "If set, this field must be present in the schema property and must be a long.") + @Nullable + private String offsetField; + + @Description("Max number of records to read per second per partition. 0 means there is no limit. Defaults to 1000.") + @Nullable + private Integer maxRatePerPartition; + + @Description("Additional kafka consumer properties to set.") + @Macro + @Nullable + private String kafkaProperties; + + @Description("The kerberos principal used for the source when kerberos security is enabled for kafka.") + @Macro + @Nullable + private String principal; + + @Description("The keytab location for the kerberos principal when kerberos security is enabled for kafka.") + @Macro + @Nullable + private String keytabLocation; + + public KafkaConfig() { + super(""); + defaultInitialOffset = -1L; + maxRatePerPartition = 1000; + } + + @VisibleForTesting + public KafkaConfig(String referenceName, String brokers, String topic, String schema, String format, + String timeField, String keyField, String partitionField, String offsetField) { + this(referenceName, brokers, topic, null, null, null, schema, format, + timeField, keyField, partitionField, offsetField); + } + + public KafkaConfig(String referenceName, String brokers, String topic, String partitions, + String initialPartitionOffsets, Long defaultInitialOffset, String schema, String format, + String timeField, String keyField, String partitionField, String offsetField) { + super(referenceName); + this.brokers = brokers; + this.topic = topic; + this.partitions = partitions; + this.initialPartitionOffsets = initialPartitionOffsets; + this.defaultInitialOffset = defaultInitialOffset; + this.schema = schema; + this.format = format; + this.timeField = timeField; + this.keyField = keyField; + this.partitionField = partitionField; + this.offsetField = offsetField; + } + + public String getTopic() { + return topic; + } + + public String getBrokers() { + return brokers; + } + + @Nullable + public String getTimeField() { + return Strings.isNullOrEmpty(timeField) ? null : timeField; + } + + @Nullable + public String getKeyField() { + return Strings.isNullOrEmpty(keyField) ? null : keyField; + } + + @Nullable + public String getPartitionField() { + return Strings.isNullOrEmpty(partitionField) ? null : partitionField; + } + + @Nullable + public String getOffsetField() { + return Strings.isNullOrEmpty(offsetField) ? null : offsetField; + } + + @Nullable + public String getFormat() { + return Strings.isNullOrEmpty(format) ? null : format; + } + + @Nullable + public Integer getMaxRatePerPartition() { + return maxRatePerPartition; + } + + public Schema getSchema() { + try { + return Strings.isNullOrEmpty(schema) ? null : Schema.parseJson(schema); + } catch (IOException e) { + throw new IllegalArgumentException("Unable to parse schema: " + e.getMessage()); + } + } + + // gets the message schema from the schema field. If the time, key, partition, or offset fields are in the configured + // schema, they will be removed. + public Schema getMessageSchema() { + Schema schema = getSchema(); + List messageFields = new ArrayList<>(); + boolean timeFieldExists = false; + boolean keyFieldExists = false; + boolean partitionFieldExists = false; + boolean offsetFieldExists = false; + + for (Schema.Field field : schema.getFields()) { + String fieldName = field.getName(); + Schema fieldSchema = field.getSchema(); + Schema.Type fieldType = fieldSchema.isNullable() ? fieldSchema.getNonNullable().getType() : fieldSchema.getType(); + // if the field is not the time field and not the key field, it is a message field. + if (fieldName.equals(timeField)) { + if (fieldType != Schema.Type.LONG) { + throw new IllegalArgumentException("The time field must be of type long or nullable long."); + } + timeFieldExists = true; + } else if (fieldName.equals(keyField)) { + if (fieldType != Schema.Type.BYTES) { + throw new IllegalArgumentException("The key field must be of type bytes or nullable bytes."); + } + keyFieldExists = true; + } else if (fieldName.equals(partitionField)) { + if (fieldType != Schema.Type.INT) { + throw new IllegalArgumentException("The partition field must be of type int."); + } + partitionFieldExists = true; + } else if (fieldName.equals(offsetField)) { + if (fieldType != Schema.Type.LONG) { + throw new IllegalArgumentException("The offset field must be of type long."); + } + offsetFieldExists = true; + } else { + messageFields.add(field); + } + } + if (messageFields.isEmpty()) { + throw new IllegalArgumentException( + "Schema must contain at least one other field besides the time and key fields."); + } + + if (getTimeField() != null && !timeFieldExists) { + throw new IllegalArgumentException(String.format( + "timeField '%s' does not exist in the schema. Please add it to the schema.", timeField)); + } + if (getKeyField() != null && !keyFieldExists) { + throw new IllegalArgumentException(String.format( + "keyField '%s' does not exist in the schema. Please add it to the schema.", keyField)); + } + if (getPartitionField() != null && !partitionFieldExists) { + throw new IllegalArgumentException(String.format( + "partitionField '%s' does not exist in the schema. Please add it to the schema.", partitionField)); + } + if (getOffsetField() != null && !offsetFieldExists) { + throw new IllegalArgumentException(String.format( + "offsetField '%s' does not exist in the schema. Please add it to the schema.", offsetFieldExists)); + } + return Schema.recordOf("kafka.message", messageFields); + } + + /** + * Get the initial partition offsets for the specified partitions. If an initial offset is specified in the + * initialPartitionOffsets property, that value will be used. Otherwise, the defaultInitialOffset will be used. + * + * @param partitionsToRead the partitions to read + * @return initial partition offsets. + */ + public Map getInitialPartitionOffsets(Set partitionsToRead) { + Map partitionOffsets = new HashMap<>(); + + // set default initial partitions + for (Integer partition : partitionsToRead) { + partitionOffsets.put(new TopicPartition(topic, partition), defaultInitialOffset); + } + + // if initial partition offsets are specified, overwrite the defaults. + if (initialPartitionOffsets != null) { + for (KeyValue partitionAndOffset : KeyValueListParser.DEFAULT.parse(initialPartitionOffsets)) { + String partitionStr = partitionAndOffset.getKey(); + String offsetStr = partitionAndOffset.getValue(); + int partition; + try { + partition = Integer.parseInt(partitionStr); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(String.format( + "Invalid partition '%s' in initialPartitionOffsets.", partitionStr)); + } + long offset; + try { + offset = Long.parseLong(offsetStr); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(String.format( + "Invalid offset '%s' in initialPartitionOffsets for partition %d.", partitionStr, partition)); + } + partitionOffsets.put(new TopicPartition(topic, partition), offset); + } + } + + return partitionOffsets; + } + + /** + * @return broker host to broker port mapping. + */ + public Map getBrokerMap() { + Map brokerMap = new HashMap<>(); + for (KeyValue hostAndPort : KeyValueListParser.DEFAULT.parse(brokers)) { + String host = hostAndPort.getKey(); + String portStr = hostAndPort.getValue(); + try { + brokerMap.put(host, Integer.parseInt(portStr)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(String.format( + "Invalid port '%s' for host '%s'.", portStr, host)); + } + } + if (brokerMap.isEmpty()) { + throw new IllegalArgumentException("Must specify kafka brokers."); + } + return brokerMap; + } + + /** + * @return set of partitions to read from. Returns an empty list if no partitions were specified. + */ + public Set getPartitions() { + Set partitionSet = new HashSet<>(); + if (partitions == null) { + return partitionSet; + } + for (String partition : Splitter.on(',').trimResults().split(partitions)) { + try { + partitionSet.add(Integer.parseInt(partition)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + String.format("Invalid partition '%s'. Partitions must be integers.", partition)); + } + } + return partitionSet; + } + + @Nullable + public String getPrincipal() { + return principal; + } + + @Nullable + public String getKeytabLocation() { + return keytabLocation; + } + + public Map getKafkaProperties() { + KeyValueListParser kvParser = new KeyValueListParser("\\s*,\\s*", ":"); + Map conf = new HashMap<>(); + if (!Strings.isNullOrEmpty(kafkaProperties)) { + for (KeyValue keyVal : kvParser.parse(kafkaProperties)) { + conf.put(keyVal.getKey(), keyVal.getValue()); + } + } + return conf; + } + + public void validate() { + // brokers can be null since it is macro enabled. + if (brokers != null) { + getBrokerMap(); + } + getPartitions(); + getInitialPartitionOffsets(getPartitions()); + + if (maxRatePerPartition == null || maxRatePerPartition < 0) { + throw new IllegalArgumentException(String.format("Invalid maxRatePerPartition %d. Rate must be 0 or greater.", + maxRatePerPartition)); + } + + if (!Strings.isNullOrEmpty(timeField) && !Strings.isNullOrEmpty(keyField) && timeField.equals(keyField)) { + throw new IllegalArgumentException(String.format( + "The timeField and keyField cannot both have the same name (%s).", timeField)); + } + + Schema messageSchema = getMessageSchema(); + // if format is empty, there must be just a single message field of type bytes or nullable types. + if (Strings.isNullOrEmpty(format)) { + List messageFields = messageSchema.getFields(); + if (messageFields.size() > 1) { + List fieldNames = new ArrayList<>(); + for (Schema.Field messageField : messageFields) { + fieldNames.add(messageField.getName()); + } + throw new IllegalArgumentException(String.format( + "Without a format, the schema must contain just a single message field of type bytes or nullable bytes. " + + "Found %s message fields (%s).", messageFields.size(), Joiner.on(',').join(fieldNames))); + } + + Schema.Field messageField = messageFields.get(0); + Schema messageFieldSchema = messageField.getSchema(); + Schema.Type messageFieldType = messageFieldSchema.isNullable() ? + messageFieldSchema.getNonNullable().getType() : messageFieldSchema.getType(); + if (messageFieldType != Schema.Type.BYTES) { + throw new IllegalArgumentException(String.format( + "Without a format, the message field must be of type bytes or nullable bytes, but field %s is of type %s.", + messageField.getName(), messageField.getSchema())); + } + } else { + // otherwise, if there is a format, make sure we can instantiate it. + FormatSpecification formatSpec = new FormatSpecification(format, messageSchema, new HashMap()); + + try { + RecordFormats.createInitializedFormat(formatSpec); + } catch (Exception e) { + throw new IllegalArgumentException(String.format( + "Unable to instantiate a message parser from format '%s' and message schema '%s': %s", + format, messageSchema, e.getMessage()), e); + } + } + } +} diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java new file mode 100644 index 0000000..55d679e --- /dev/null +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java @@ -0,0 +1,285 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator.plugin.source; + +import co.cask.cdap.api.annotation.Description; +import co.cask.cdap.api.annotation.Name; +import co.cask.cdap.api.annotation.Plugin; +import co.cask.cdap.api.data.format.FormatSpecification; +import co.cask.cdap.api.data.format.RecordFormat; +import co.cask.cdap.api.data.format.StructuredRecord; +import co.cask.cdap.api.data.schema.Schema; +import co.cask.cdap.api.flow.flowlet.StreamEvent; +import co.cask.cdap.etl.api.PipelineConfigurer; +import co.cask.cdap.etl.api.streaming.StreamingContext; +import co.cask.cdap.etl.api.streaming.StreamingSource; +import co.cask.cdap.format.RecordFormats; +import co.cask.hydrator.plugin.common.KafkaHelpers; +import com.google.common.base.Joiner; +import com.google.common.collect.Iterables; +import com.google.common.collect.Sets; +import kafka.api.OffsetRequest; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; +import org.apache.spark.api.java.JavaRDD; +import org.apache.spark.api.java.function.Function; +import org.apache.spark.api.java.function.Function2; +import org.apache.spark.streaming.Time; +import org.apache.spark.streaming.api.java.JavaDStream; +import org.apache.spark.streaming.kafka010.ConsumerStrategies; +import org.apache.spark.streaming.kafka010.KafkaUtils; +import org.apache.spark.streaming.kafka010.LocationStrategies; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +/** + * Kafka Streaming source + */ +@Plugin(type = StreamingSource.PLUGIN_TYPE) +@Name("Kafka") +@Description("Kafka streaming source.") +public class KafkaStreamingSource extends ReferenceStreamingSource { + private static final Logger LOG = LoggerFactory.getLogger(KafkaStreamingSource.class); + private final KafkaConfig conf; + + public KafkaStreamingSource(KafkaConfig conf) { + super(conf); + this.conf = conf; + } + + @Override + public void configurePipeline(PipelineConfigurer pipelineConfigurer) throws IllegalArgumentException { + super.configurePipeline(pipelineConfigurer); + conf.validate(); + pipelineConfigurer.getStageConfigurer().setOutputSchema(conf.getSchema()); + if (conf.getMaxRatePerPartition() != null && conf.getMaxRatePerPartition() > 0) { + Map pipelineProperties = new HashMap<>(); + pipelineProperties.put("spark.streaming.kafka.maxRatePerPartition", conf.getMaxRatePerPartition().toString()); + pipelineConfigurer.setPipelineProperties(pipelineProperties); + } + } + + @Override + public JavaDStream getStream(StreamingContext context) throws Exception { + context.registerLineage(conf.referenceName); + + Map kafkaParams = new HashMap<>(); + kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, conf.getBrokers()); + // Spark saves the offsets in checkpoints, no need for Kafka to save them + kafkaParams.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + kafkaParams.put("key.deserializer", ByteArrayDeserializer.class.getCanonicalName()); + kafkaParams.put("value.deserializer", ByteArrayDeserializer.class.getCanonicalName()); + KafkaHelpers.setupKerberosLogin(kafkaParams, conf.getPrincipal(), conf.getKeytabLocation()); + // Create a unique string for the group.id using the pipeline name and the topic + kafkaParams.put("group.id", Joiner.on("-").join(context.getPipelineName().length(), conf.getTopic().length(), + context.getPipelineName(), conf.getTopic())); + kafkaParams.putAll(conf.getKafkaProperties()); + + Properties properties = new Properties(); + properties.putAll(kafkaParams); + try (Consumer consumer = new KafkaConsumer<>(properties, new ByteArrayDeserializer(), + new ByteArrayDeserializer())) { + Map offsets = conf.getInitialPartitionOffsets(getPartitions(consumer)); + // KafkaUtils doesn't understand -1 and -2 as smallest offset and latest offset. + // so we have to replace them with the actual smallest and latest + List earliestOffsetRequest = new ArrayList<>(); + List latestOffsetRequest = new ArrayList<>(); + for (Map.Entry entry : offsets.entrySet()) { + TopicPartition topicAndPartition = entry.getKey(); + Long offset = entry.getValue(); + if (offset == OffsetRequest.EarliestTime()) { + earliestOffsetRequest.add(topicAndPartition); + } else if (offset == OffsetRequest.LatestTime()) { + latestOffsetRequest.add(topicAndPartition); + } + } + + Set allOffsetRequest = + Sets.newHashSet(Iterables.concat(earliestOffsetRequest, latestOffsetRequest)); + Map offsetsFound = new HashMap<>(); + offsetsFound.putAll(KafkaHelpers.getEarliestOffsets(consumer, earliestOffsetRequest)); + offsetsFound.putAll(KafkaHelpers.getLatestOffsets(consumer, latestOffsetRequest)); + for (TopicPartition topicAndPartition : allOffsetRequest) { + offsets.put(topicAndPartition, offsetsFound.get(topicAndPartition)); + } + + Set missingOffsets = Sets.difference(allOffsetRequest, offsetsFound.keySet()); + if (!missingOffsets.isEmpty()) { + throw new IllegalStateException(String.format( + "Could not find offsets for %s. Please check all brokers were included in the broker list.", missingOffsets)); + } + LOG.info("Using initial offsets {}", offsets); + + return KafkaUtils.createDirectStream( + context.getSparkStreamingContext(), LocationStrategies.PreferConsistent(), + ConsumerStrategies.Subscribe(Collections.singleton(conf.getTopic()), kafkaParams, offsets) + ).transform(new RecordTransform(conf)); + } + } + + private Set getPartitions(Consumer consumer) { + Set partitions = conf.getPartitions(); + if (!partitions.isEmpty()) { + return partitions; + } + + partitions = new HashSet<>(); + for (PartitionInfo partitionInfo : consumer.partitionsFor(conf.getTopic())) { + partitions.add(partitionInfo.partition()); + } + return partitions; + } + + /** + * Applies the format function to each rdd. + */ + private static class RecordTransform + implements Function2>, Time, JavaRDD> { + + private final KafkaConfig conf; + + RecordTransform(KafkaConfig conf) { + this.conf = conf; + } + + @Override + public JavaRDD call(JavaRDD> input, Time batchTime) { + Function, StructuredRecord> recordFunction = conf.getFormat() == null ? + new BytesFunction(batchTime.milliseconds(), conf) : new FormatFunction(batchTime.milliseconds(), conf); + return input.map(recordFunction); + } + } + + /** + * Common logic for transforming kafka key, message, partition, and offset into a structured record. + * Everything here should be serializable, as Spark Streaming will serialize all functions. + */ + private abstract static class BaseFunction implements Function, StructuredRecord> { + private final long ts; + protected final KafkaConfig conf; + private transient String messageField; + private transient String timeField; + private transient String keyField; + private transient String partitionField; + private transient String offsetField; + private transient Schema schema; + + BaseFunction(long ts, KafkaConfig conf) { + this.ts = ts; + this.conf = conf; + } + + @Override + public StructuredRecord call(ConsumerRecord in) throws Exception { + // first time this was called, initialize schema and time, key, and message fields. + if (schema == null) { + schema = conf.getSchema(); + timeField = conf.getTimeField(); + keyField = conf.getKeyField(); + partitionField = conf.getPartitionField(); + offsetField = conf.getOffsetField(); + for (Schema.Field field : schema.getFields()) { + String name = field.getName(); + if (!name.equals(timeField) && !name.equals(keyField)) { + messageField = name; + break; + } + } + } + + StructuredRecord.Builder builder = StructuredRecord.builder(schema); + if (timeField != null) { + builder.set(timeField, ts); + } + if (keyField != null) { + builder.set(keyField, in.key()); + } + if (partitionField != null) { + builder.set(partitionField, in.partition()); + } + if (offsetField != null) { + builder.set(offsetField, in.offset()); + } + addMessage(builder, messageField, in.value()); + return builder.build(); + } + + protected abstract void addMessage(StructuredRecord.Builder builder, String messageField, + byte[] message) throws Exception; + } + + /** + * Transforms kafka key and message into a structured record when message format is not given. + * Everything here should be serializable, as Spark Streaming will serialize all functions. + */ + private static class BytesFunction extends BaseFunction { + + BytesFunction(long ts, KafkaConfig conf) { + super(ts, conf); + } + + @Override + protected void addMessage(StructuredRecord.Builder builder, String messageField, byte[] message) { + builder.set(messageField, message); + } + } + + /** + * Transforms kafka key and message into a structured record when message format and schema are given. + * Everything here should be serializable, as Spark Streaming will serialize all functions. + */ + private static class FormatFunction extends BaseFunction { + private transient RecordFormat recordFormat; + + FormatFunction(long ts, KafkaConfig conf) { + super(ts, conf); + } + + @Override + protected void addMessage(StructuredRecord.Builder builder, String messageField, byte[] message) throws Exception { + // first time this was called, initialize record format + if (recordFormat == null) { + Schema messageSchema = conf.getMessageSchema(); + FormatSpecification spec = + new FormatSpecification(conf.getFormat(), messageSchema, new HashMap()); + recordFormat = RecordFormats.createInitializedFormat(spec); + } + + StructuredRecord messageRecord = recordFormat.read(new StreamEvent(ByteBuffer.wrap(message))); + for (Schema.Field field : messageRecord.getSchema().getFields()) { + String fieldName = field.getName(); + builder.set(fieldName, messageRecord.get(fieldName)); + } + } + } + +} diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/ReferenceStreamingSource.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/ReferenceStreamingSource.java new file mode 100644 index 0000000..6fb6d96 --- /dev/null +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/ReferenceStreamingSource.java @@ -0,0 +1,46 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator.plugin.source; + +import co.cask.cdap.api.dataset.DatasetProperties; +import co.cask.cdap.etl.api.PipelineConfigurer; +import co.cask.cdap.etl.api.streaming.StreamingSource; +import co.cask.hydrator.common.Constants; +import co.cask.hydrator.common.IdUtils; +import co.cask.hydrator.common.ReferencePluginConfig; + +/** + * Base streaming source that adds an External Dataset for a reference name, and performs a single getDataset() + * call to make sure CDAP records that it was accessed. + * + * @param type of object read by the source. + */ +public abstract class ReferenceStreamingSource extends StreamingSource { + private final ReferencePluginConfig conf; + + public ReferenceStreamingSource(ReferencePluginConfig conf) { + this.conf = conf; + } + + @Override + public void configurePipeline(PipelineConfigurer pipelineConfigurer) throws IllegalArgumentException { + super.configurePipeline(pipelineConfigurer); + // Verify that reference name meets dataset id constraints + IdUtils.validateId(conf.referenceName); + pipelineConfigurer.createDataset(conf.referenceName, Constants.EXTERNAL_DATASET_TYPE, DatasetProperties.EMPTY); + } +} diff --git a/kafka-plugins-0.9/src/test/java/co/cask/hydrator/EmbeddedKafkaServer.java b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/EmbeddedKafkaServer.java similarity index 89% rename from kafka-plugins-0.9/src/test/java/co/cask/hydrator/EmbeddedKafkaServer.java rename to kafka-plugins-0.10/src/test/java/co/cask/hydrator/EmbeddedKafkaServer.java index 068185b..e0721d2 100644 --- a/kafka-plugins-0.9/src/test/java/co/cask/hydrator/EmbeddedKafkaServer.java +++ b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/EmbeddedKafkaServer.java @@ -19,16 +19,20 @@ import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.util.concurrent.AbstractIdleService; +import kafka.metrics.KafkaMetricsReporter; import kafka.server.KafkaConfig; import kafka.server.KafkaServer; -import kafka.utils.Time; import org.I0Itec.zkclient.exception.ZkTimeoutException; +import org.apache.kafka.common.utils.Time; import org.apache.twill.internal.utils.Networks; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import scala.Option; +import scala.collection.JavaConverters; +import scala.collection.Seq; import java.net.BindException; +import java.util.Collections; import java.util.Properties; import java.util.Random; import java.util.concurrent.TimeUnit; @@ -97,6 +101,9 @@ protected void shutDown() throws Exception { } private KafkaServer createKafkaServer(KafkaConfig kafkaConfig) { + Seq metricsReporters = + JavaConverters.collectionAsScalaIterableConverter( + Collections.emptyList()).asScala().toSeq(); return new KafkaServer(kafkaConfig, new Time() { @Override @@ -117,7 +124,12 @@ public void sleep(long ms) { Thread.interrupted(); } } - }, Option.apply("embedded-server")); + + @Override + public long hiResClockMs() { + return System.currentTimeMillis(); + } + }, Option.apply("embedded-server"), metricsReporters); } /** diff --git a/kafka-plugins-0.9/src/test/java/co/cask/hydrator/Kafka9BatchSourceTest.java b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaBatchSourceTest.java similarity index 97% rename from kafka-plugins-0.9/src/test/java/co/cask/hydrator/Kafka9BatchSourceTest.java rename to kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaBatchSourceTest.java index 65acc7c..f6505ce 100644 --- a/kafka-plugins-0.9/src/test/java/co/cask/hydrator/Kafka9BatchSourceTest.java +++ b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaBatchSourceTest.java @@ -52,6 +52,7 @@ import org.apache.twill.kafka.client.KafkaClientService; import org.apache.twill.kafka.client.KafkaPublisher; import org.apache.twill.zookeeper.ZKClientService; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -68,7 +69,7 @@ /** * Unit tests for our plugins. */ -public class Kafka9BatchSourceTest extends HydratorTestBase { +public class KafkaBatchSourceTest extends HydratorTestBase { private static final ArtifactSummary APP_ARTIFACT = new ArtifactSummary("data-pipeline", "1.0.0"); @ClassRule public static final TestConfiguration CONFIG = new TestConfiguration("explore.enabled", false); @@ -108,6 +109,14 @@ public static void setupTestClass() throws Exception { kafkaClient.startAndWait(); } + @AfterClass + public static void cleanup() { + kafkaClient.stopAndWait(); + kafkaServer.stopAndWait(); + zkClient.stopAndWait(); + zkServer.stopAndWait(); + } + @Test public void testKafkaSource() throws Exception { Schema schema = Schema.recordOf( diff --git a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java new file mode 100644 index 0000000..0318aca --- /dev/null +++ b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java @@ -0,0 +1,278 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator; + +import co.cask.cdap.api.artifact.ArtifactSummary; +import co.cask.cdap.api.data.format.StructuredRecord; +import co.cask.cdap.api.data.schema.Schema; +import co.cask.cdap.api.dataset.table.Table; +import co.cask.cdap.datapipeline.DataPipelineApp; +import co.cask.cdap.datapipeline.SmartWorkflow; +import co.cask.cdap.etl.api.Alert; +import co.cask.cdap.etl.mock.alert.NullAlertTransform; +import co.cask.cdap.etl.mock.batch.MockSource; +import co.cask.cdap.etl.mock.test.HydratorTestBase; +import co.cask.cdap.etl.proto.v2.ETLBatchConfig; +import co.cask.cdap.etl.proto.v2.ETLPlugin; +import co.cask.cdap.etl.proto.v2.ETLStage; +import co.cask.cdap.proto.ProgramRunStatus; +import co.cask.cdap.proto.artifact.AppRequest; +import co.cask.cdap.proto.id.ApplicationId; +import co.cask.cdap.proto.id.ArtifactId; +import co.cask.cdap.proto.id.NamespaceId; +import co.cask.cdap.test.ApplicationManager; +import co.cask.cdap.test.DataSetManager; +import co.cask.cdap.test.TestConfiguration; +import co.cask.cdap.test.WorkflowManager; +import co.cask.hydrator.plugin.alertpublisher.KafkaAlertPublisher; +import co.cask.hydrator.plugin.sink.Kafka; +import com.google.common.base.Charsets; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import com.google.gson.Gson; +import org.apache.kafka.clients.consumer.RangeAssignor; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.twill.common.Cancellable; +import org.apache.twill.internal.kafka.client.ZKKafkaClientService; +import org.apache.twill.internal.utils.Networks; +import org.apache.twill.internal.zookeeper.InMemoryZKServer; +import org.apache.twill.kafka.client.FetchedMessage; +import org.apache.twill.kafka.client.KafkaClientService; +import org.apache.twill.kafka.client.KafkaConsumer; +import org.apache.twill.zookeeper.ZKClientService; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * Kafka Sink and Alerts Publisher test + */ +public class KafkaSinkAndAlertsPublisherTest extends HydratorTestBase { + private static final ArtifactSummary APP_ARTIFACT = new ArtifactSummary("data-pipeline", "1.0.0"); + @ClassRule + public static final TestConfiguration CONFIG = new TestConfiguration("explore.enabled", false); + + private static final Gson GSON = new Gson(); + + private static ZKClientService zkClient; + private static KafkaClientService kafkaClient; + private static InMemoryZKServer zkServer; + private static EmbeddedKafkaServer kafkaServer; + private static int kafkaPort; + + @BeforeClass + public static void setupTestClass() throws Exception { + ArtifactId parentArtifact = NamespaceId.DEFAULT.artifact(APP_ARTIFACT.getName(), APP_ARTIFACT.getVersion()); + + // add the data-pipeline artifact and mock plugins + setupBatchArtifacts(parentArtifact, DataPipelineApp.class); + + // add our plugins artifact with the data-pipeline artifact as its parent. + // this will make our plugins available to data-pipeline. + addPluginArtifact(NamespaceId.DEFAULT.artifact("example-plugins", "1.0.0"), + parentArtifact, + Kafka.class, + KafkaAlertPublisher.class, + RangeAssignor.class, + StringSerializer.class); + + zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build(); + zkServer.startAndWait(); + + kafkaPort = Networks.getRandomPort(); + kafkaServer = new EmbeddedKafkaServer(generateKafkaConfig(zkServer.getConnectionStr(), + kafkaPort, TMP_FOLDER.newFolder())); + kafkaServer.startAndWait(); + + zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build(); + zkClient.startAndWait(); + + kafkaClient = new ZKKafkaClientService(zkClient); + kafkaClient.startAndWait(); + } + + @AfterClass + public static void cleanup() { + kafkaClient.stopAndWait(); + kafkaServer.stopAndWait(); + zkClient.stopAndWait(); + zkServer.stopAndWait(); + } + + @Test + public void testKafkaSinkAndAlertsPublisher() throws Exception { + Schema schema = Schema.recordOf( + "user", + Schema.Field.of("id", Schema.nullableOf(Schema.of(Schema.Type.LONG))), + Schema.Field.of("first", Schema.of(Schema.Type.STRING)), + Schema.Field.of("last", Schema.of(Schema.Type.STRING))); + + // create the pipeline config + String inputName = "sinkTestInput"; + + String usersTopic = "records"; + String alertsTopic = "alerts"; + Map sinkProperties = new HashMap<>(); + sinkProperties.put("brokers", "localhost:" + kafkaPort); + sinkProperties.put("referenceName", "kafkaTest"); + sinkProperties.put("topic", usersTopic); + sinkProperties.put("schema", schema.toString()); + sinkProperties.put("format", "csv"); + sinkProperties.put("key", "last"); + sinkProperties.put("async", "FALSE"); + sinkProperties.put("compressionType", "none"); + + Map alertProperties = new HashMap<>(); + alertProperties.put("brokers", "localhost:" + kafkaPort); + alertProperties.put("topic", alertsTopic); + + ETLStage source = new ETLStage("source", MockSource.getPlugin(inputName)); + ETLStage sink = + new ETLStage("sink", new ETLPlugin("Kafka", Kafka.PLUGIN_TYPE, sinkProperties, null)); + ETLStage transform = new ETLStage("nullAlert", NullAlertTransform.getPlugin("id")); + ETLStage alert = + new ETLStage("alert", new ETLPlugin("KafkaAlerts", KafkaAlertPublisher.PLUGIN_TYPE, alertProperties)); + + ETLBatchConfig pipelineConfig = ETLBatchConfig.builder("* * * * *") + .addStage(source) + .addStage(transform) + .addStage(sink) + .addStage(alert) + .addConnection(source.getName(), transform.getName()) + .addConnection(transform.getName(), sink.getName()) + .addConnection(transform.getName(), alert.getName()) + .build(); + + // create the pipeline + ApplicationId pipelineId = NamespaceId.DEFAULT.app("testKafkaSink"); + ApplicationManager appManager = deployApplication(pipelineId, new AppRequest<>(APP_ARTIFACT, pipelineConfig)); + + + Set expected = ImmutableSet.of("100,samuel,jackson", + "200,dwayne,johnson", + "300,christopher,walken", + "400,donald,trump"); + + List records = new ArrayList<>(); + for (String e : expected) { + String[] splits = e.split(","); + StructuredRecord record = + StructuredRecord.builder(schema) + .set("id", Long.parseLong(splits[0])) + .set("first", splits[1]) + .set("last", splits[2]) + .build(); + records.add(record); + } + + // Add a null record to get an alert + StructuredRecord nullRecord = + StructuredRecord.builder(schema) + .set("first", "terry") + .set("last", "crews") + .build(); + records.add(nullRecord); + + DataSetManager sourceTable = getDataset(inputName); + MockSource.writeInput(sourceTable, records); + + WorkflowManager workflowManager = appManager.getWorkflowManager(SmartWorkflow.NAME); + workflowManager.start(); + workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 1, TimeUnit.MINUTES); + + // Assert users + Set actual = readKafkaRecords(usersTopic, expected.size()); + Assert.assertEquals(expected, actual); + + // Assert alerts + Set actualAlerts = readKafkaRecords(alertsTopic, 1); + // NullAlertTransform always returns empty hash map in alert + Assert.assertEquals(ImmutableSet.of(new Alert(transform.getName(), new HashMap())), + ImmutableSet.copyOf(Iterables.transform(actualAlerts, + new Function() { + @Override + public Alert apply(String s) { + return GSON.fromJson(s, Alert.class); + } + } + ))); + } + + private Set readKafkaRecords(String topic, final int maxMessages) throws InterruptedException { + KafkaConsumer kafkaConsumer = kafkaClient.getConsumer(); + + final Set kafkaMessages = new HashSet<>(); + KafkaConsumer.Preparer preparer = kafkaConsumer.prepare(); + preparer.addFromBeginning(topic, 0); + + final CountDownLatch stopLatch = new CountDownLatch(1); + Cancellable cancellable = preparer.consume(new KafkaConsumer.MessageCallback() { + @Override + public long onReceived(Iterator messages) { + long nextOffset = 0; + while (messages.hasNext()) { + FetchedMessage message = messages.next(); + nextOffset = message.getNextOffset(); + String payload = Charsets.UTF_8.decode(message.getPayload()).toString(); + kafkaMessages.add(payload); + } + // We are done when maxMessages are received + if (kafkaMessages.size() >= maxMessages) { + stopLatch.countDown(); + } + return nextOffset; + } + + @Override + public void finished() { + // nothing to do + } + }); + + stopLatch.await(30, TimeUnit.SECONDS); + cancellable.cancel(); + return kafkaMessages; + } + + private static Properties generateKafkaConfig(String zkConnectStr, int port, File logDir) { + Properties prop = new Properties(); + prop.setProperty("log.dir", logDir.getAbsolutePath()); + prop.setProperty("port", Integer.toString(port)); + prop.setProperty("broker.id", "1"); + prop.setProperty("num.partitions", "1"); + prop.setProperty("zookeeper.connect", zkConnectStr); + prop.setProperty("zookeeper.connection.timeout.ms", "1000000"); + prop.setProperty("default.replication.factor", "1"); + return prop; + } + +} diff --git a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java new file mode 100644 index 0000000..5ba26dc --- /dev/null +++ b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java @@ -0,0 +1,267 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator; + +import co.cask.cdap.api.artifact.ArtifactRange; +import co.cask.cdap.api.artifact.ArtifactSummary; +import co.cask.cdap.api.artifact.ArtifactVersion; +import co.cask.cdap.api.data.format.StructuredRecord; +import co.cask.cdap.api.data.schema.Schema; +import co.cask.cdap.api.dataset.table.Table; +import co.cask.cdap.common.utils.Networks; +import co.cask.cdap.common.utils.Tasks; +import co.cask.cdap.datapipeline.DataPipelineApp; +import co.cask.cdap.datastreams.DataStreamsApp; +import co.cask.cdap.datastreams.DataStreamsSparkLauncher; +import co.cask.cdap.etl.api.streaming.StreamingSource; +import co.cask.cdap.etl.mock.batch.MockSink; +import co.cask.cdap.etl.mock.test.HydratorTestBase; +import co.cask.cdap.etl.proto.v2.DataStreamsConfig; +import co.cask.cdap.etl.proto.v2.ETLPlugin; +import co.cask.cdap.etl.proto.v2.ETLStage; +import co.cask.cdap.proto.artifact.AppRequest; +import co.cask.cdap.proto.id.ApplicationId; +import co.cask.cdap.proto.id.ArtifactId; +import co.cask.cdap.proto.id.NamespaceId; +import co.cask.cdap.test.ApplicationManager; +import co.cask.cdap.test.DataSetManager; +import co.cask.cdap.test.SparkManager; +import co.cask.cdap.test.TestConfiguration; +import co.cask.hydrator.common.http.HTTPPollConfig; +import co.cask.hydrator.plugin.source.KafkaStreamingSource; +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.Uninterruptibles; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; +import org.apache.spark.streaming.kafka010.KafkaUtils; +import org.apache.twill.internal.kafka.client.ZKKafkaClientService; +import org.apache.twill.internal.zookeeper.InMemoryZKServer; +import org.apache.twill.kafka.client.Compression; +import org.apache.twill.kafka.client.KafkaClientService; +import org.apache.twill.kafka.client.KafkaPublisher; +import org.apache.twill.zookeeper.ZKClientService; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +/** + * Tests for Spark plugins. + */ +public class KafkaStreamingSourceTest extends HydratorTestBase { + + @ClassRule + public static final TestConfiguration CONFIG = new TestConfiguration("explore.enabled", false); + + private static final ArtifactId DATAPIPELINE_ARTIFACT_ID = + NamespaceId.DEFAULT.artifact("data-pipeline", "4.3.2"); + private static final ArtifactId DATASTREAMS_ARTIFACT_ID = + NamespaceId.DEFAULT.artifact("data-streams", "4.3.2"); + private static final ArtifactSummary DATASTREAMS_ARTIFACT = + new ArtifactSummary("data-streams", "4.3.2"); + + private static ZKClientService zkClient; + private static KafkaClientService kafkaClient; + private static InMemoryZKServer zkServer; + private static EmbeddedKafkaServer kafkaServer; + private static int kafkaPort; + + @ClassRule + public static TemporaryFolder tmpFolder = new TemporaryFolder(); + + + @BeforeClass + public static void setupTest() throws Exception { + // add the artifact for data pipeline app + setupBatchArtifacts(DATAPIPELINE_ARTIFACT_ID, DataPipelineApp.class); + + setupStreamingArtifacts(DATASTREAMS_ARTIFACT_ID, DataStreamsApp.class); + + // add artifact for spark plugins + Set parents = ImmutableSet.of( + new ArtifactRange(NamespaceId.DEFAULT.getNamespace(), DATAPIPELINE_ARTIFACT_ID.getArtifact(), + new ArtifactVersion(DATAPIPELINE_ARTIFACT_ID.getVersion()), true, + new ArtifactVersion(DATAPIPELINE_ARTIFACT_ID.getVersion()), true), + new ArtifactRange(NamespaceId.DEFAULT.getNamespace(), DATASTREAMS_ARTIFACT_ID.getArtifact(), + new ArtifactVersion(DATASTREAMS_ARTIFACT_ID.getVersion()), true, + new ArtifactVersion(DATASTREAMS_ARTIFACT_ID.getVersion()), true) + ); + addPluginArtifact(NamespaceId.DEFAULT.artifact("spark-plugins", "1.0.0"), parents, + KafkaStreamingSource.class, KafkaUtils.class, ByteArrayDeserializer.class, TopicPartition.class, + HTTPPollConfig.class); + + zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build(); + zkServer.startAndWait(); + + kafkaPort = Networks.getRandomPort(); + kafkaServer = new EmbeddedKafkaServer(generateKafkaConfig(zkServer.getConnectionStr(), + kafkaPort, TMP_FOLDER.newFolder())); + kafkaServer.startAndWait(); + + zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build(); + zkClient.startAndWait(); + + kafkaClient = new ZKKafkaClientService(zkClient); + kafkaClient.startAndWait(); + } + + @AfterClass + public static void cleanup() { + kafkaClient.stopAndWait(); + kafkaServer.stopAndWait(); + zkClient.stopAndWait(); + zkServer.stopAndWait(); + } + + @Test + public void testKafkaStreamingSource() throws Exception { + Schema schema = Schema.recordOf( + "user", + Schema.Field.of("id", Schema.of(Schema.Type.LONG)), + Schema.Field.of("first", Schema.of(Schema.Type.STRING)), + Schema.Field.of("last", Schema.of(Schema.Type.STRING))); + + Map properties = new HashMap<>(); + properties.put("referenceName", "kafkaPurchases"); + properties.put("brokers", "localhost:" + kafkaPort); + properties.put("topic", "users"); + properties.put("defaultInitialOffset", "-2"); + properties.put("format", "csv"); + properties.put("schema", schema.toString()); + + ETLStage source = new ETLStage("source", new ETLPlugin("Kafka", StreamingSource.PLUGIN_TYPE, properties, null)); + + DataStreamsConfig etlConfig = DataStreamsConfig.builder() + .addStage(source) + .addStage(new ETLStage("sink", MockSink.getPlugin("kafkaOutput"))) + .addConnection("source", "sink") + .setBatchInterval("1s") + .setStopGracefully(true) + .build(); + + AppRequest appRequest = new AppRequest<>(DATASTREAMS_ARTIFACT, etlConfig); + ApplicationId appId = NamespaceId.DEFAULT.app("KafkaSourceApp"); + ApplicationManager appManager = deployApplication(appId, appRequest); + + // write some messages to kafka + Map messages = new HashMap<>(); + messages.put("a", "1,samuel,jackson"); + messages.put("b", "2,dwayne,johnson"); + messages.put("c", "3,christopher,walken"); + sendKafkaMessage("users", messages); + + SparkManager sparkManager = appManager.getSparkManager(DataStreamsSparkLauncher.NAME); + sparkManager.start(); + sparkManager.waitForStatus(true, 10, 1); + + final DataSetManager
outputManager = getDataset("kafkaOutput"); + Tasks.waitFor( + ImmutableMap.of(1L, "samuel jackson", 2L, "dwayne johnson", 3L, "christopher walken"), + new Callable>() { + @Override + public Map call() throws Exception { + outputManager.flush(); + Map actual = new HashMap<>(); + for (StructuredRecord outputRecord : MockSink.readOutput(outputManager)) { + actual.put((Long) outputRecord.get("id"), outputRecord.get("first") + " " + outputRecord.get("last")); + } + return actual; + } + }, + 2, + TimeUnit.MINUTES); + + sparkManager.stop(); + sparkManager.waitForStatus(false, 10, 1); + + // clear the output table + MockSink.clear(outputManager); + + // now write some more messages to kafka and start the program again to make sure it picks up where it left off + messages = new HashMap<>(); + messages.put("d", "4,terry,crews"); + messages.put("e", "5,sylvester,stallone"); + sendKafkaMessage("users", messages); + + sparkManager.start(); + sparkManager.waitForStatus(true, 10, 1); + + Tasks.waitFor( + ImmutableMap.of(4L, "terry crews", 5L, "sylvester stallone"), + new Callable>() { + @Override + public Map call() throws Exception { + outputManager.flush(); + Map actual = new HashMap<>(); + for (StructuredRecord outputRecord : MockSink.readOutput(outputManager)) { + actual.put((Long) outputRecord.get("id"), outputRecord.get("first") + " " + outputRecord.get("last")); + } + return actual; + } + }, + 2, + TimeUnit.MINUTES); + + sparkManager.stop(); + } + + private static Properties generateKafkaConfig(String zkConnectStr, int port, File logDir) { + Properties prop = new Properties(); + prop.setProperty("log.dir", logDir.getAbsolutePath()); + prop.setProperty("port", Integer.toString(port)); + prop.setProperty("broker.id", "1"); + prop.setProperty("num.partitions", "1"); + prop.setProperty("zookeeper.connect", zkConnectStr); + prop.setProperty("zookeeper.connection.timeout.ms", "1000000"); + prop.setProperty("default.replication.factor", "1"); + return prop; + } + + private void sendKafkaMessage(@SuppressWarnings("SameParameterValue") String topic, Map messages) { + KafkaPublisher publisher = kafkaClient.getPublisher(KafkaPublisher.Ack.ALL_RECEIVED, Compression.NONE); + + // If publish failed, retry up to 20 times, with 100ms delay between each retry + // This is because leader election in Kafka 08 takes time when a topic is being created upon publish request. + int count = 0; + do { + KafkaPublisher.Preparer preparer = publisher.prepare(topic); + for (Map.Entry entry : messages.entrySet()) { + preparer.add(Charsets.UTF_8.encode(entry.getValue()), entry.getKey()); + } + try { + preparer.send().get(); + break; + } catch (Exception e) { + // Backoff if send failed. + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + } + } while (count++ < 20); + } + + +} diff --git a/kafka-plugins-0.9/widgets/Kafka-batchsink.json b/kafka-plugins-0.10/widgets/Kafka-batchsink.json similarity index 100% rename from kafka-plugins-0.9/widgets/Kafka-batchsink.json rename to kafka-plugins-0.10/widgets/Kafka-batchsink.json diff --git a/kafka-plugins-0.9/widgets/Kafka-batchsource.json b/kafka-plugins-0.10/widgets/Kafka-batchsource.json similarity index 100% rename from kafka-plugins-0.9/widgets/Kafka-batchsource.json rename to kafka-plugins-0.10/widgets/Kafka-batchsource.json diff --git a/kafka-plugins-0.10/widgets/Kafka-streamingsource.json b/kafka-plugins-0.10/widgets/Kafka-streamingsource.json new file mode 100644 index 0000000..56187f5 --- /dev/null +++ b/kafka-plugins-0.10/widgets/Kafka-streamingsource.json @@ -0,0 +1,151 @@ +{ + "metadata": { + "spec-version": "1.5" + }, + "display-name": "Kafka Consumer", + "configuration-groups": [ + { + "label": "Kafka Configuration", + "properties": [ + { + "widget-type": "textbox", + "label": "Reference Name", + "name": "referenceName" + }, + { + "widget-type": "csv", + "label": "Kafka Brokers", + "name": "brokers", + "widget-attributes": { + "delimiter": "," + } + }, + { + "widget-type": "textbox", + "label": "Kafka Topic", + "name": "topic" + }, + { + "widget-type": "csv", + "label": "Topic Partitions", + "name": "partitions", + "widget-attributes": { + "delimiter": "," + } + }, + { + "widget-type": "textbox", + "label": "Default Initial Offset", + "name": "defaultInitialOffset" + }, + { + "widget-type": "keyvalue", + "label": "Initial Partition Offsets", + "name": "initialPartitionOffsets", + "widget-attributes": { + "showDelimiter": "false", + "key-placeholder": "Partition", + "value-placeholder": "Offset" + } + }, + { + "widget-type": "textbox", + "label": "Time Field", + "name": "timeField" + }, + { + "widget-type": "textbox", + "label": "Key Field", + "name": "keyField" + }, + { + "widget-type": "textbox", + "label": "Partition Field", + "name": "partitionField" + }, + { + "widget-type": "textbox", + "label": "Offset Field", + "name": "offsetField" + }, + { + "widget-type": "textbox", + "label": "Max Rate Per Partition", + "name": "maxRatePerPartition", + "widget-attributes": { + "default": "1000" + } + }, + { + "widget-type": "keyvalue", + "label": "Additional Kafka Consumer Properties", + "name": "kafkaProperties", + "widget-attributes": { + "showDelimiter": "false", + "key-placeholder": "Kafka consumer property", + "value-placeholder": "Kafka consumer property value" + } + } + ] + }, + { + "label": "Format", + "properties": [ + { + "widget-type": "select", + "label": "Format", + "name": "format", + "widget-attributes": { + "values": [ + "", + "avro", + "binary", + "clf", + "csv", + "grok", + "syslog", + "text", + "tsv" + ], + "default": "" + } + } + ] + }, + { + "label": "Authentication", + "properties": [ + { + "widget-type": "textbox", + "label": "Kerberos Principal", + "name": "principal" + }, + { + "widget-type": "textbox", + "label": "Keytab Location", + "name": "keytabLocation" + } + ] + } + ], + "outputs": [ + { + "name": "schema", + "widget-type": "schema", + "widget-attributes": { + "default-schema": { + "name": "etlSchemaBody", + "type": "record", + "fields": [ + { + "name": "message", + "type": ["bytes", "null"] + } + ] + }, + "schema-default-type": "string", + "property-watch": "format" + } + } + ] +} diff --git a/kafka-plugins-0.10/widgets/KafkaAlerts-alertpublisher.json b/kafka-plugins-0.10/widgets/KafkaAlerts-alertpublisher.json new file mode 100644 index 0000000..3ae173f --- /dev/null +++ b/kafka-plugins-0.10/widgets/KafkaAlerts-alertpublisher.json @@ -0,0 +1,52 @@ +{ + "metadata": { + "spec-version": "1.5" + }, + "display-name": "Kafka Alert Publisher", + "configuration-groups": [ + { + "label": "Kafka Alert Publisher Config", + "properties": [ + { + "widget-type": "csv", + "label": "Kafka Brokers", + "name": "brokers", + "widget-attributes": { + "delimiter": "," + } + }, + { + "widget-type": "textbox", + "label": "Kafka Topic", + "name": "topic" + }, + { + "widget-type": "keyvalue", + "label": "Additional Kafka Producer Properties", + "name": "producerProperties", + "widget-attributes": { + "showDelimiter": "false", + "key-placeholder": "Kafka producer property", + "value-placeholder": "Kafka producer property value" + } + } + ] + }, + { + "label": "Authentication", + "properties": [ + { + "widget-type": "textbox", + "label": "Kerberos Principal", + "name": "principal" + }, + { + "widget-type": "textbox", + "label": "Keytab Location", + "name": "keytabLocation" + } + ] + } + ], + "outputs": [ ] +} diff --git a/kafka-plugins-0.8/docs/KAFKASOURCE.md b/kafka-plugins-0.8/docs/KAFKASOURCE.md index 46e95ea..6d1c0ca 100644 --- a/kafka-plugins-0.8/docs/KAFKASOURCE.md +++ b/kafka-plugins-0.8/docs/KAFKASOURCE.md @@ -12,7 +12,7 @@ Usage Notes Kafka Streaming Source can be used to read events from a kafka topic. It uses kafka consumer [0.8.2 apis](https://kafka.apache.org/082/documentation.html) to read events from a kafka topic. Kafka Source converts incoming kafka events into cdap structured records which then can be used for further transformations. -The source provides capabilities to read from latest offset or from beginning or from the provided kafka offset. The plugin relies on Spark Streaming offset [storage capabilities](https://spark.apache.org/docs/latest/streaming-kafka-0-10-integration.html) to manager offsets and checkpoints. +The source provides capabilities to read from latest offset or from beginning or from the provided kafka offset. The plugin relies on Spark Streaming offset [storage capabilities](https://spark.apache.org/docs/latest/streaming-kafka-0-8-integration.html) to manager offsets and checkpoints. Plugin Configuration --------------------- diff --git a/kafka-plugins-0.8/docs/Kafka-alert-publisher.md b/kafka-plugins-0.8/docs/Kafka-alert-publisher.md index bbda8a0..78031fe 100644 --- a/kafka-plugins-0.8/docs/Kafka-alert-publisher.md +++ b/kafka-plugins-0.8/docs/Kafka-alert-publisher.md @@ -4,7 +4,7 @@ Kafka Alert Publisher that allows you to publish alerts to kafka as json objects. The plugin internally uses kafka producer apis to publish alerts. The plugin allows to specify kafka topic to use for publishing and other additional kafka producer properties. -Please note that this plugin uses kafka 0.8.2 java apis, so it may not be compatible with higher versions of kafka. +This plugin uses kafka 0.8.2 java apis. Build ----- @@ -60,4 +60,4 @@ and limitations under the License. Cask is a trademark of Cask Data, Inc. All rights reserved. Apache, Apache HBase, and HBase are trademarks of The Apache Software Foundation. Used with -permission. No endorsement by The Apache Software Foundation is implied by the use of these marks. \ No newline at end of file +permission. No endorsement by The Apache Software Foundation is implied by the use of these marks. diff --git a/kafka-plugins-0.8/docs/Kafka-batchsink.md b/kafka-plugins-0.8/docs/Kafka-batchsink.md index 104c1c0..967edea 100644 --- a/kafka-plugins-0.8/docs/Kafka-batchsink.md +++ b/kafka-plugins-0.8/docs/Kafka-batchsink.md @@ -7,7 +7,7 @@ Kafka sink that allows you to write events into CSV or JSON to kafka. Plugin has the capability to push the data to a Kafka topic. It can also be configured to partition events being written to kafka based on a configurable key. The sink can also be configured to operate in sync or async mode and apply different -compression types to events. Kafka sink is compatible with Kafka 0.8, 0.9 and 0.10 +compression types to events. This plugin uses kafka 0.8.2 java apis. Configuration @@ -50,4 +50,4 @@ Additional properties like number of acknowledgements and client id can also be "kafkaProperties": "acks:2,client.id:myclient", "key": "message" } - } \ No newline at end of file + } diff --git a/kafka-plugins-0.8/docs/Kafka-batchsource.md b/kafka-plugins-0.8/docs/Kafka-batchsource.md index 6456a44..fdb062d 100644 --- a/kafka-plugins-0.8/docs/Kafka-batchsource.md +++ b/kafka-plugins-0.8/docs/Kafka-batchsource.md @@ -5,7 +5,7 @@ Description ----------- Kafka batch source. Emits the record from kafka. It will emit a record based on the schema and format you use, or if no schema or format is specified, the message payload will be emitted. The source will -remember the offset it read last run and continue from that offset for the next run. +remember the offset it read last run and continue from that offset for the next run. This plugin uses kafka 0.8.2 java apis. Use Case -------- @@ -94,4 +94,3 @@ For each Kafka message read, it will output a record with the schema: | count | int | | price | double | +================================+ - \ No newline at end of file diff --git a/kafka-plugins-0.8/docs/Kafka-streamingsource.md b/kafka-plugins-0.8/docs/Kafka-streamingsource.md index d047e18..44e0761 100644 --- a/kafka-plugins-0.8/docs/Kafka-streamingsource.md +++ b/kafka-plugins-0.8/docs/Kafka-streamingsource.md @@ -5,7 +5,7 @@ Description ----------- Kafka streaming source. Emits a record with the schema specified by the user. If no schema is specified, it will emit a record with two fields: 'key' (nullable string) and 'message' -(bytes). Kafka source is compatible with Kafka 0.8, 0.9 and 0.10 +(bytes). This plugin uses kafka 0.8.2 java apis. Use Case diff --git a/kafka-plugins-0.8/docs/KafkaAlerts-alertpublisher.md b/kafka-plugins-0.8/docs/KafkaAlerts-alertpublisher.md index 763e2b3..b1000f3 100644 --- a/kafka-plugins-0.8/docs/KafkaAlerts-alertpublisher.md +++ b/kafka-plugins-0.8/docs/KafkaAlerts-alertpublisher.md @@ -6,8 +6,7 @@ Description Kafka Alert Publisher that allows you to publish alerts to kafka as json objects. The plugin internally uses kafka producer apis to publish alerts. The plugin allows to specify kafka topic to use for publishing and other additional -kafka producer properties. Please note that this plugin uses kafka 0.8.2 java apis -so it may not be compatible with higher versions of kafka. +kafka producer properties. This plugin uses kafka 0.8.2 java apis. Configuration @@ -33,4 +32,4 @@ are like acks and client.id are specified as well. "topic": "alarm", "producerProperties": "acks:2,client.id:myclient" } - } \ No newline at end of file + } diff --git a/kafka-plugins-0.8/icons/KafkaAlerts-alertpublisher.png b/kafka-plugins-0.8/icons/KafkaAlerts-alertpublisher.png new file mode 100644 index 0000000000000000000000000000000000000000..041cdb49c14e9bb31ddf2c2256c6dc5b72a12e4e GIT binary patch literal 2066 zcmY*adpy(c9{=h#6kbNgCe}EnVq@74Ys0X~DVbZ@VIoW_8@By6x6oYjGAfib{9H

HUT+Toh&Ka(lr#WXRzfM00Kl;V zfazcWz+D6YNObXiFTC+=Uq%4#%QwP*w;B zbvPW3j}4>ayj{sZt}Ay0#8Eao8iz)6xm*-?H;Ne>fws1{w?|uH&=`!Rl3~e;W3WSb zmJF83cai_-xQ4SrV(#8K^>3&`D_mA``eII z0N7;a?n?AMx^ea*^;(~wu2>X@vWA&HBvlY?F7~>pQM-M#4RrhY3W}B*xoN{vnQhzj z_KJ!c_&rrG-5sv$3pD=Tr0Qjm(~h0pj;*`f`sB&->igv6!r9qC0@p6y#BuRU!mQ}3 zXnwHAUM}9&-hV0eV`Z4MT;a1I5Om+Ia0a-CLunkahDu$t}fTR$Y4Pv6g>3le9VYBmUqKOS7sUOiz>1}{}*Y9@yE zVimMKHMuL<(LPt^NTsTzL2TptmFmimJL}ueg?D5^hNNamEjt4!iqI$L^jR2jHCoIt zEujbq6_;g&m~FE=aLDpf)xP(}_7vv8S*zikYpwP{BNHtJUh^Gx?&3amEs`}) zsyb$nWpB=_vY)AmCmqReP5y9_eQwlSS13G=BM+w*$HUHgrXa2g?_BNAQs3I$>^&Rs z?9v!lA~v5Xq57RPUdqqoPIV}j^zH})_Gds8?jDZNQH9OYRkS$i#znPbk zHbKINyaBj9flqFk@)k5&dl zPxtI2&WCsYYIeOOCd~r7;6DDRWFyZ$w$!Fskd7W#OY;5;=8(&V(0)no)GWPAt0h6+ z^YtTRP72>yaIoI~o+nNb?4RMvv+fe$w zPvzKxqR)LDtnn?2stUj0qUT?yD7{fIiFvZT-E^6z78)4zpt)7gFVrlyuspl9zP4F= ze?kw#9A}iXW!!U+8xqy-)F_O7b$#pGw}`jF`u<;uyC7l36K;eYqLjB~$!IBvr^AkX zbDERD=|8Giq+JmRO=wcbEeEYo`f)$-?D;Zxte$a^BKOkFx`^@S64(kO6qgA2Mr(iMtrc~J+V za5O)^)8y!CD8j&{c0FanQEoTcT;Qsi?(o=u)lM7tLY+Sq(6_CYVXA!{#Qk%oS>RjY gC);EGl}R5rhGo@$plRUgYoD(>>457k=ipQS2KYUkRR910 literal 0 HcmV?d00001 diff --git a/kafka-plugins-0.8/pom.xml b/kafka-plugins-0.8/pom.xml index 85a05f6..c812ab7 100644 --- a/kafka-plugins-0.8/pom.xml +++ b/kafka-plugins-0.8/pom.xml @@ -5,13 +5,13 @@ kafka-plugins co.cask.hydrator - 1.8.1 + 1.8.2-SNAPSHOT 4.0.0 Apache Kafka 0.8 plugins kafka-plugins - 1.8.1-0.8.2.2 + 1.8.2-SNAPSHOT-0.8.2.2 @@ -25,6 +25,93 @@ + + org.apache.spark + spark-streaming-kafka_2.10 + ${spark1.version} + + + org.apache.spark + spark-mllib_2.10 + ${spark1.version} + provided + + + org.apache.spark + spark-streaming_2.10 + ${spark1.version} + provided + + + org.apache.spark + spark-core_2.10 + ${spark1.version} + provided + + + org.slf4j + slf4j-log4j12 + + + log4j + log4j + + + org.apache.hadoop + hadoop-client + + + com.esotericsoftware.reflectasm + reflectasm + + + org.apache.curator + curator-recipes + + + org.tachyonproject + tachyon-client + + + org.scala-lang + scala-compiler + + + org.eclipse.jetty.orbit + javax.servlet + + + + net.java.dev.jets3t + jets3t + + + asm + asm + + + + + co.cask.cdap + cdap-spark-core + ${cdap.version} + test + + + co.cask.cdap + cdap-data-pipeline + ${cdap.version} + test + + + co.cask.cdap + cdap-data-streams + ${cdap.version} + test + @@ -33,24 +120,15 @@ org.apache.felix maven-bundle-plugin 3.3.0 - true <_exportcontents>co.cask.hydrator.plugin.*;org.apache.spark.streaming.kafka.*; - kafka.serializer.*;kafka.common; + kafka.serializer.*;kafka.common;com.google.common.base.*; *;inline=false;scope=compile true lib - - - package - - bundle - - - co.cask @@ -59,6 +137,7 @@ system:cdap-data-pipeline[4.3.0-SNAPSHOT,6.0.0-SNAPSHOT) + system:cdap-data-streams[4.3.0-SNAPSHOT,6.0.0-SNAPSHOT) diff --git a/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java b/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java index 340181a..aed09a5 100644 --- a/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java +++ b/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java @@ -1,3 +1,19 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + package co.cask.hydrator.plugin.alertpublisher; import co.cask.cdap.api.annotation.Description; @@ -34,7 +50,7 @@ @Name("KafkaAlerts") public class KafkaAlertPublisher extends AlertPublisher { private static final Logger LOG = LoggerFactory.getLogger(KafkaAlertPublisher.class); - public static final Gson GSON = new Gson(); + private static final Gson GSON = new Gson(); private final Config config; private KafkaProducer producer; diff --git a/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java b/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java index f07637e..185f0d9 100644 --- a/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java +++ b/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java @@ -38,6 +38,8 @@ import org.apache.avro.reflect.Nullable; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.io.Text; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.serialization.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,9 +62,6 @@ public class Kafka extends ReferenceBatchSink { private final KafkaOutputFormatProvider kafkaOutputFormatProvider; // Static constants for configuring Kafka producer. - private static final String BROKER_LIST = "bootstrap.servers"; - private static final String KEY_SERIALIZER = "key.serializer"; - private static final String VAL_SERIALIZER = "value.serializer"; private static final String ACKS_REQUIRED = "acks"; public Kafka(Config producerConfig) { @@ -161,7 +160,7 @@ public static class Config extends ReferencePluginConfig { private String kafkaProperties; @Name("compressionType") - @Description("Additional kafka producer properties to set") + @Description("Compression type to be applied on message") @Macro private String compressionType; @@ -185,10 +184,10 @@ private static class KafkaOutputFormatProvider implements OutputFormatProvider { this.conf = new HashMap<>(); conf.put("topic", kafkaSinkConfig.topic); - conf.put(BROKER_LIST, kafkaSinkConfig.brokers); + conf.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaSinkConfig.brokers); conf.put("compression.type", kafkaSinkConfig.compressionType); - conf.put(KEY_SERIALIZER, "org.apache.kafka.common.serialization.StringSerializer"); - conf.put(VAL_SERIALIZER, "org.apache.kafka.common.serialization.StringSerializer"); + conf.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); + conf.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()); addKafkaProperties(kafkaSinkConfig.kafkaProperties); diff --git a/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java b/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java index 62ca176..f3a1e42 100644 --- a/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java +++ b/kafka-plugins-0.8/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java @@ -39,13 +39,8 @@ public class KafkaOutputFormat extends OutputFormat { private static final Logger LOG = LoggerFactory.getLogger(KafkaOutputFormat.class); - // Static constants for configuring Kafka producer. - private static final String BROKER_LIST = "bootstrap.servers"; - private static final String KEY_SERIALIZER = "key.serializer"; - private static final String VAL_SERIALIZER = "value.serializer"; private KafkaProducer producer; - @Override public void checkOutputSpecs(JobContext jobContext) throws IOException, InterruptedException { } @@ -91,9 +86,12 @@ public RecordWriter getRecordWriter(TaskAttemptContext context) Properties props = new Properties(); // Configure the properties for kafka. - props.put(BROKER_LIST, configuration.get(BROKER_LIST)); - props.put(KEY_SERIALIZER, configuration.get(KEY_SERIALIZER)); - props.put(VAL_SERIALIZER, configuration.get(VAL_SERIALIZER)); + props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + configuration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); + props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + configuration.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)); + props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + configuration.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)); props.put("compression.type", configuration.get("compression.type")); if (!Strings.isNullOrEmpty(configuration.get("hasKey"))) { diff --git a/kafka-plugins-0.8/src/test/java/co/cask/hydrator/Kafka8BatchSourceTest.java b/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaBatchSourceTest.java similarity index 97% rename from kafka-plugins-0.8/src/test/java/co/cask/hydrator/Kafka8BatchSourceTest.java rename to kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaBatchSourceTest.java index 706f773..91f9ea4 100644 --- a/kafka-plugins-0.8/src/test/java/co/cask/hydrator/Kafka8BatchSourceTest.java +++ b/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaBatchSourceTest.java @@ -52,6 +52,7 @@ import org.apache.twill.kafka.client.KafkaClientService; import org.apache.twill.kafka.client.KafkaPublisher; import org.apache.twill.zookeeper.ZKClientService; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -68,7 +69,7 @@ /** * Unit tests for our plugins. */ -public class Kafka8BatchSourceTest extends HydratorTestBase { +public class KafkaBatchSourceTest extends HydratorTestBase { private static final ArtifactSummary APP_ARTIFACT = new ArtifactSummary("data-pipeline", "1.0.0"); @ClassRule public static final TestConfiguration CONFIG = new TestConfiguration("explore.enabled", false); @@ -107,6 +108,14 @@ public static void setupTestClass() throws Exception { kafkaClient.startAndWait(); } + @AfterClass + public static void cleanup() { + kafkaClient.stopAndWait(); + kafkaServer.stopAndWait(); + zkClient.stopAndWait(); + zkServer.stopAndWait(); + } + @Test public void testKafkaSource() throws Exception { Schema schema = Schema.recordOf( diff --git a/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java b/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java new file mode 100644 index 0000000..ac6ab15 --- /dev/null +++ b/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java @@ -0,0 +1,277 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator; + +import co.cask.cdap.api.artifact.ArtifactSummary; +import co.cask.cdap.api.data.format.StructuredRecord; +import co.cask.cdap.api.data.schema.Schema; +import co.cask.cdap.api.dataset.table.Table; +import co.cask.cdap.datapipeline.DataPipelineApp; +import co.cask.cdap.datapipeline.SmartWorkflow; +import co.cask.cdap.etl.api.Alert; +import co.cask.cdap.etl.mock.alert.NullAlertTransform; +import co.cask.cdap.etl.mock.batch.MockSource; +import co.cask.cdap.etl.mock.test.HydratorTestBase; +import co.cask.cdap.etl.proto.v2.ETLBatchConfig; +import co.cask.cdap.etl.proto.v2.ETLPlugin; +import co.cask.cdap.etl.proto.v2.ETLStage; +import co.cask.cdap.proto.ProgramRunStatus; +import co.cask.cdap.proto.artifact.AppRequest; +import co.cask.cdap.proto.id.ApplicationId; +import co.cask.cdap.proto.id.ArtifactId; +import co.cask.cdap.proto.id.NamespaceId; +import co.cask.cdap.test.ApplicationManager; +import co.cask.cdap.test.DataSetManager; +import co.cask.cdap.test.TestConfiguration; +import co.cask.cdap.test.WorkflowManager; +import co.cask.hydrator.plugin.alertpublisher.KafkaAlertPublisher; +import co.cask.hydrator.plugin.sink.Kafka; +import com.google.common.base.Charsets; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; +import com.google.gson.Gson; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.twill.common.Cancellable; +import org.apache.twill.internal.kafka.EmbeddedKafkaServer; +import org.apache.twill.internal.kafka.client.ZKKafkaClientService; +import org.apache.twill.internal.utils.Networks; +import org.apache.twill.internal.zookeeper.InMemoryZKServer; +import org.apache.twill.kafka.client.FetchedMessage; +import org.apache.twill.kafka.client.KafkaClientService; +import org.apache.twill.kafka.client.KafkaConsumer; +import org.apache.twill.zookeeper.ZKClientService; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * Kafka Sink and Alerts Publisher test + */ +public class KafkaSinkAndAlertsPublisherTest extends HydratorTestBase { + private static final ArtifactSummary APP_ARTIFACT = new ArtifactSummary("data-pipeline", "1.0.0"); + @ClassRule + public static final TestConfiguration CONFIG = new TestConfiguration("explore.enabled", false); + + private static final Gson GSON = new Gson(); + + private static ZKClientService zkClient; + private static KafkaClientService kafkaClient; + private static InMemoryZKServer zkServer; + private static EmbeddedKafkaServer kafkaServer; + private static int kafkaPort; + + @BeforeClass + public static void setupTestClass() throws Exception { + ArtifactId parentArtifact = NamespaceId.DEFAULT.artifact(APP_ARTIFACT.getName(), APP_ARTIFACT.getVersion()); + + // add the data-pipeline artifact and mock plugins + setupBatchArtifacts(parentArtifact, DataPipelineApp.class); + + // add our plugins artifact with the data-pipeline artifact as its parent. + // this will make our plugins available to data-pipeline. + addPluginArtifact(NamespaceId.DEFAULT.artifact("example-plugins", "1.0.0"), + parentArtifact, + Kafka.class, + KafkaAlertPublisher.class, + StringSerializer.class); + + zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build(); + zkServer.startAndWait(); + + kafkaPort = Networks.getRandomPort(); + kafkaServer = new EmbeddedKafkaServer(generateKafkaConfig(zkServer.getConnectionStr(), + kafkaPort, TMP_FOLDER.newFolder())); + kafkaServer.startAndWait(); + + zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build(); + zkClient.startAndWait(); + + kafkaClient = new ZKKafkaClientService(zkClient); + kafkaClient.startAndWait(); + } + + @AfterClass + public static void cleanup() { + kafkaClient.stopAndWait(); + kafkaServer.stopAndWait(); + zkClient.stopAndWait(); + zkServer.stopAndWait(); + } + + @Test + public void testKafkaSinkAndAlertsPublisher() throws Exception { + Schema schema = Schema.recordOf( + "user", + Schema.Field.of("id", Schema.nullableOf(Schema.of(Schema.Type.LONG))), + Schema.Field.of("first", Schema.of(Schema.Type.STRING)), + Schema.Field.of("last", Schema.of(Schema.Type.STRING))); + + // create the pipeline config + String inputName = "sinkTestInput"; + + String usersTopic = "records"; + String alertsTopic = "alerts"; + Map sinkProperties = new HashMap<>(); + sinkProperties.put("brokers", "localhost:" + kafkaPort); + sinkProperties.put("referenceName", "kafkaTest"); + sinkProperties.put("topic", usersTopic); + sinkProperties.put("schema", schema.toString()); + sinkProperties.put("format", "csv"); + sinkProperties.put("key", "last"); + sinkProperties.put("async", "FALSE"); + sinkProperties.put("compressionType", "none"); + + Map alertProperties = new HashMap<>(); + alertProperties.put("brokers", "localhost:" + kafkaPort); + alertProperties.put("topic", alertsTopic); + + ETLStage source = new ETLStage("source", MockSource.getPlugin(inputName)); + ETLStage sink = + new ETLStage("sink", new ETLPlugin("Kafka", Kafka.PLUGIN_TYPE, sinkProperties, null)); + ETLStage transform = new ETLStage("nullAlert", NullAlertTransform.getPlugin("id")); + ETLStage alert = + new ETLStage("alert", new ETLPlugin("KafkaAlerts", KafkaAlertPublisher.PLUGIN_TYPE, alertProperties)); + + ETLBatchConfig pipelineConfig = ETLBatchConfig.builder("* * * * *") + .addStage(source) + .addStage(transform) + .addStage(sink) + .addStage(alert) + .addConnection(source.getName(), transform.getName()) + .addConnection(transform.getName(), sink.getName()) + .addConnection(transform.getName(), alert.getName()) + .build(); + + // create the pipeline + ApplicationId pipelineId = NamespaceId.DEFAULT.app("testKafkaSink"); + ApplicationManager appManager = deployApplication(pipelineId, new AppRequest<>(APP_ARTIFACT, pipelineConfig)); + + + Set expected = ImmutableSet.of("100,samuel,jackson", + "200,dwayne,johnson", + "300,christopher,walken", + "400,donald,trump"); + + List records = new ArrayList<>(); + for (String e : expected) { + String[] splits = e.split(","); + StructuredRecord record = + StructuredRecord.builder(schema) + .set("id", Long.parseLong(splits[0])) + .set("first", splits[1]) + .set("last", splits[2]) + .build(); + records.add(record); + } + + // Add a null record to get an alert + StructuredRecord nullRecord = + StructuredRecord.builder(schema) + .set("first", "terry") + .set("last", "crews") + .build(); + records.add(nullRecord); + + DataSetManager

sourceTable = getDataset(inputName); + MockSource.writeInput(sourceTable, records); + + WorkflowManager workflowManager = appManager.getWorkflowManager(SmartWorkflow.NAME); + workflowManager.start(); + workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 1, TimeUnit.MINUTES); + + // Assert users + Set actual = readKafkaRecords(usersTopic, expected.size()); + Assert.assertEquals(expected, actual); + + // Assert alerts + Set actualAlerts = readKafkaRecords(alertsTopic, 1); + // NullAlertTransform always returns empty hash map in alert + Assert.assertEquals(ImmutableSet.of(new Alert(transform.getName(), new HashMap())), + ImmutableSet.copyOf(Iterables.transform(actualAlerts, + new Function() { + @Override + public Alert apply(String s) { + return GSON.fromJson(s, Alert.class); + } + } + ))); + } + + private Set readKafkaRecords(String topic, final int maxMessages) throws InterruptedException { + KafkaConsumer kafkaConsumer = kafkaClient.getConsumer(); + + final Set kafkaMessages = new HashSet<>(); + KafkaConsumer.Preparer preparer = kafkaConsumer.prepare(); + preparer.addFromBeginning(topic, 0); + + final CountDownLatch stopLatch = new CountDownLatch(1); + Cancellable cancellable = preparer.consume(new KafkaConsumer.MessageCallback() { + @Override + public long onReceived(Iterator messages) { + long nextOffset = 0; + while (messages.hasNext()) { + FetchedMessage message = messages.next(); + nextOffset = message.getNextOffset(); + String payload = Charsets.UTF_8.decode(message.getPayload()).toString(); + kafkaMessages.add(payload); + } + // We are done when maxMessages are received + if (kafkaMessages.size() >= maxMessages) { + stopLatch.countDown(); + } + return nextOffset; + } + + @Override + public void finished() { + // nothing to do + } + }); + + stopLatch.await(30, TimeUnit.SECONDS); + cancellable.cancel(); + return kafkaMessages; + } + + private static Properties generateKafkaConfig(String zkConnectStr, int port, File logDir) { + Properties prop = new Properties(); + prop.setProperty("log.dir", logDir.getAbsolutePath()); + prop.setProperty("port", Integer.toString(port)); + prop.setProperty("broker.id", "1"); + prop.setProperty("num.partitions", "1"); + prop.setProperty("zookeeper.connect", zkConnectStr); + prop.setProperty("zookeeper.connection.timeout.ms", "1000000"); + prop.setProperty("default.replication.factor", "1"); + return prop; + } + +} diff --git a/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java b/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java new file mode 100644 index 0000000..0950556 --- /dev/null +++ b/kafka-plugins-0.8/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java @@ -0,0 +1,268 @@ +/* + * Copyright © 2018 Cask Data, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package co.cask.hydrator; + +import co.cask.cdap.api.artifact.ArtifactRange; +import co.cask.cdap.api.artifact.ArtifactSummary; +import co.cask.cdap.api.artifact.ArtifactVersion; +import co.cask.cdap.api.data.format.StructuredRecord; +import co.cask.cdap.api.data.schema.Schema; +import co.cask.cdap.api.dataset.table.Table; +import co.cask.cdap.common.utils.Networks; +import co.cask.cdap.common.utils.Tasks; +import co.cask.cdap.datapipeline.DataPipelineApp; +import co.cask.cdap.datastreams.DataStreamsApp; +import co.cask.cdap.datastreams.DataStreamsSparkLauncher; +import co.cask.cdap.etl.api.streaming.StreamingSource; +import co.cask.cdap.etl.mock.batch.MockSink; +import co.cask.cdap.etl.mock.test.HydratorTestBase; +import co.cask.cdap.etl.proto.v2.DataStreamsConfig; +import co.cask.cdap.etl.proto.v2.ETLPlugin; +import co.cask.cdap.etl.proto.v2.ETLStage; +import co.cask.cdap.proto.artifact.AppRequest; +import co.cask.cdap.proto.id.ApplicationId; +import co.cask.cdap.proto.id.ArtifactId; +import co.cask.cdap.proto.id.NamespaceId; +import co.cask.cdap.test.ApplicationManager; +import co.cask.cdap.test.DataSetManager; +import co.cask.cdap.test.SparkManager; +import co.cask.cdap.test.TestConfiguration; +import co.cask.hydrator.common.http.HTTPPollConfig; +import co.cask.hydrator.plugin.source.KafkaStreamingSource; +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.Uninterruptibles; +import kafka.common.TopicAndPartition; +import kafka.serializer.DefaultDecoder; +import org.apache.spark.streaming.kafka.KafkaUtils; +import org.apache.twill.internal.kafka.EmbeddedKafkaServer; +import org.apache.twill.internal.kafka.client.ZKKafkaClientService; +import org.apache.twill.internal.zookeeper.InMemoryZKServer; +import org.apache.twill.kafka.client.Compression; +import org.apache.twill.kafka.client.KafkaClientService; +import org.apache.twill.kafka.client.KafkaPublisher; +import org.apache.twill.zookeeper.ZKClientService; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +/** + * Tests for Spark plugins. + */ +public class KafkaStreamingSourceTest extends HydratorTestBase { + + @ClassRule + public static final TestConfiguration CONFIG = new TestConfiguration("explore.enabled", false); + + private static final ArtifactId DATAPIPELINE_ARTIFACT_ID = + NamespaceId.DEFAULT.artifact("data-pipeline", "4.3.2"); + private static final ArtifactId DATASTREAMS_ARTIFACT_ID = + NamespaceId.DEFAULT.artifact("data-streams", "4.3.2"); + private static final ArtifactSummary DATASTREAMS_ARTIFACT = + new ArtifactSummary("data-streams", "4.3.2"); + + private static ZKClientService zkClient; + private static KafkaClientService kafkaClient; + private static InMemoryZKServer zkServer; + private static EmbeddedKafkaServer kafkaServer; + private static int kafkaPort; + + @ClassRule + public static TemporaryFolder tmpFolder = new TemporaryFolder(); + + + @BeforeClass + public static void setupTest() throws Exception { + // add the artifact for data pipeline app + setupBatchArtifacts(DATAPIPELINE_ARTIFACT_ID, DataPipelineApp.class); + + setupStreamingArtifacts(DATASTREAMS_ARTIFACT_ID, DataStreamsApp.class); + + // add artifact for spark plugins + Set parents = ImmutableSet.of( + new ArtifactRange(NamespaceId.DEFAULT.getNamespace(), DATAPIPELINE_ARTIFACT_ID.getArtifact(), + new ArtifactVersion(DATAPIPELINE_ARTIFACT_ID.getVersion()), true, + new ArtifactVersion(DATAPIPELINE_ARTIFACT_ID.getVersion()), true), + new ArtifactRange(NamespaceId.DEFAULT.getNamespace(), DATASTREAMS_ARTIFACT_ID.getArtifact(), + new ArtifactVersion(DATASTREAMS_ARTIFACT_ID.getVersion()), true, + new ArtifactVersion(DATASTREAMS_ARTIFACT_ID.getVersion()), true) + ); + addPluginArtifact(NamespaceId.DEFAULT.artifact("spark-plugins", "1.0.0"), parents, + KafkaStreamingSource.class, KafkaUtils.class, DefaultDecoder.class, TopicAndPartition.class, + HTTPPollConfig.class); + + zkServer = InMemoryZKServer.builder().setDataDir(TMP_FOLDER.newFolder()).build(); + zkServer.startAndWait(); + + kafkaPort = Networks.getRandomPort(); + kafkaServer = new EmbeddedKafkaServer(generateKafkaConfig(zkServer.getConnectionStr(), + kafkaPort, TMP_FOLDER.newFolder())); + kafkaServer.startAndWait(); + + zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build(); + zkClient.startAndWait(); + + kafkaClient = new ZKKafkaClientService(zkClient); + kafkaClient.startAndWait(); + } + + @AfterClass + public static void cleanup() { + kafkaClient.stopAndWait(); + kafkaServer.stopAndWait(); + zkClient.stopAndWait(); + zkServer.stopAndWait(); + } + + @Test + public void testKafkaStreamingSource() throws Exception { + Schema schema = Schema.recordOf( + "user", + Schema.Field.of("id", Schema.of(Schema.Type.LONG)), + Schema.Field.of("first", Schema.of(Schema.Type.STRING)), + Schema.Field.of("last", Schema.of(Schema.Type.STRING))); + + Map properties = new HashMap<>(); + properties.put("referenceName", "kafkaPurchases"); + properties.put("brokers", "localhost:" + kafkaPort); + properties.put("topic", "users"); + properties.put("defaultInitialOffset", "-2"); + properties.put("format", "csv"); + properties.put("schema", schema.toString()); + + ETLStage source = new ETLStage("source", new ETLPlugin("Kafka", StreamingSource.PLUGIN_TYPE, properties, null)); + + DataStreamsConfig etlConfig = DataStreamsConfig.builder() + .addStage(source) + .addStage(new ETLStage("sink", MockSink.getPlugin("kafkaOutput"))) + .addConnection("source", "sink") + .setBatchInterval("1s") + .setStopGracefully(true) + .build(); + + AppRequest appRequest = new AppRequest<>(DATASTREAMS_ARTIFACT, etlConfig); + ApplicationId appId = NamespaceId.DEFAULT.app("KafkaSourceApp"); + ApplicationManager appManager = deployApplication(appId, appRequest); + + // write some messages to kafka + Map messages = new HashMap<>(); + messages.put("a", "1,samuel,jackson"); + messages.put("b", "2,dwayne,johnson"); + messages.put("c", "3,christopher,walken"); + sendKafkaMessage("users", messages); + + SparkManager sparkManager = appManager.getSparkManager(DataStreamsSparkLauncher.NAME); + sparkManager.start(); + sparkManager.waitForStatus(true, 10, 1); + + final DataSetManager
outputManager = getDataset("kafkaOutput"); + Tasks.waitFor( + ImmutableMap.of(1L, "samuel jackson", 2L, "dwayne johnson", 3L, "christopher walken"), + new Callable>() { + @Override + public Map call() throws Exception { + outputManager.flush(); + Map actual = new HashMap<>(); + for (StructuredRecord outputRecord : MockSink.readOutput(outputManager)) { + actual.put((Long) outputRecord.get("id"), outputRecord.get("first") + " " + outputRecord.get("last")); + } + return actual; + } + }, + 2, + TimeUnit.MINUTES); + + sparkManager.stop(); + sparkManager.waitForStatus(false, 10, 1); + + // clear the output table + MockSink.clear(outputManager); + + // now write some more messages to kafka and start the program again to make sure it picks up where it left off + messages = new HashMap<>(); + messages.put("d", "4,terry,crews"); + messages.put("e", "5,sylvester,stallone"); + sendKafkaMessage("users", messages); + + sparkManager.start(); + sparkManager.waitForStatus(true, 10, 1); + + Tasks.waitFor( + ImmutableMap.of(4L, "terry crews", 5L, "sylvester stallone"), + new Callable>() { + @Override + public Map call() throws Exception { + outputManager.flush(); + Map actual = new HashMap<>(); + for (StructuredRecord outputRecord : MockSink.readOutput(outputManager)) { + actual.put((Long) outputRecord.get("id"), outputRecord.get("first") + " " + outputRecord.get("last")); + } + return actual; + } + }, + 2, + TimeUnit.MINUTES); + + sparkManager.stop(); + } + + private static Properties generateKafkaConfig(String zkConnectStr, int port, File logDir) { + Properties prop = new Properties(); + prop.setProperty("log.dir", logDir.getAbsolutePath()); + prop.setProperty("port", Integer.toString(port)); + prop.setProperty("broker.id", "1"); + prop.setProperty("num.partitions", "1"); + prop.setProperty("zookeeper.connect", zkConnectStr); + prop.setProperty("zookeeper.connection.timeout.ms", "1000000"); + prop.setProperty("default.replication.factor", "1"); + return prop; + } + + private void sendKafkaMessage(@SuppressWarnings("SameParameterValue") String topic, Map messages) { + KafkaPublisher publisher = kafkaClient.getPublisher(KafkaPublisher.Ack.ALL_RECEIVED, Compression.NONE); + + // If publish failed, retry up to 20 times, with 100ms delay between each retry + // This is because leader election in Kafka 08 takes time when a topic is being created upon publish request. + int count = 0; + do { + KafkaPublisher.Preparer preparer = publisher.prepare(topic); + for (Map.Entry entry : messages.entrySet()) { + preparer.add(Charsets.UTF_8.encode(entry.getValue()), entry.getKey()); + } + try { + preparer.send().get(); + break; + } catch (Exception e) { + // Backoff if send failed. + Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); + } + } while (count++ < 20); + } + + +} diff --git a/kafka-plugins-0.9/pom.xml b/kafka-plugins-0.9/pom.xml deleted file mode 100644 index 933edb0..0000000 --- a/kafka-plugins-0.9/pom.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - kafka-plugins - co.cask.hydrator - 1.8.1 - - 4.0.0 - - Apache Kafka 0.9 plugins - kafka-plugins - 1.8.1-0.9.0.1 - - - - org.apache.kafka - kafka_2.10 - ${kafka9.version} - - - org.slf4j - slf4j-log4j12 - - - - - - - - - org.apache.felix - maven-bundle-plugin - 3.3.0 - true - - - <_exportcontents>co.cask.hydrator.plugin.*;org.apache.spark.streaming.kafka.*; - kafka.serializer.*;kafka.common;org.apache.kafka.common.serialization.*; - org.apache.kafka.clients.consumer.* - *;inline=false;scope=compile - true - lib - - - - - package - - bundle - - - - - - co.cask - cdap-maven-plugin - 1.0.0 - - - system:cdap-data-pipeline[4.3.0-SNAPSHOT,6.0.0-SNAPSHOT) - - - - - create-artifact-config - prepare-package - - create-plugin-json - - - - - - - - diff --git a/pom.xml b/pom.xml index ed89620..d10a316 100644 --- a/pom.xml +++ b/pom.xml @@ -8,7 +8,7 @@ co.cask.hydrator kafka-plugins pom - 1.8.1 + 1.8.2-SNAPSHOT @@ -20,7 +20,7 @@ kafka-plugins-0.8 - kafka-plugins-0.9 + kafka-plugins-0.10 @@ -66,11 +66,12 @@ UTF-8 4.3.2 1.8.2 - 1.6.1 + 1.6.1 + 2.2.0 widgets docs 0.8.2.2 - 0.9.0.1 + 0.10.2.0 2.3.0 ${project.basedir} @@ -80,39 +81,50 @@ co.cask.cdap cdap-etl-api ${cdap.version} + provided co.cask.cdap cdap-etl-api-spark ${cdap.version} - - - co.cask.cdap - cdap-common - ${cdap.version} - - - co.cask.cdap - cdap-formats - ${cdap.version} + provided co.cask.cdap - hydrator-test + cdap-common ${cdap.version} - test + provided + + + org.apache.twill + twill-core + + + org.apache.twill + twill-yarn + + co.cask.cdap - cdap-data-pipeline + cdap-formats ${cdap.version} - test co.cask.cdap - cdap-data-streams + hydrator-test ${cdap.version} test + + + org.apache.kafka + kafka_2.10 + + + org.apache.spark + spark-core_2.10 + + co.cask.hydrator @@ -242,69 +254,9 @@ 3.0 - org.apache.spark - spark-streaming-kafka_2.10 - ${spark.version} - - - org.apache.spark - spark-mllib_2.10 - ${spark.version} - provided - - - org.apache.spark - spark-streaming_2.10 - ${spark.version} - provided - - - org.apache.spark - spark-core_2.10 - ${spark.version} - provided - - - org.slf4j - slf4j-log4j12 - - - log4j - log4j - - - org.apache.hadoop - hadoop-client - - - com.esotericsoftware.reflectasm - reflectasm - - - org.apache.curator - curator-recipes - - - org.tachyonproject - tachyon-client - - - org.scala-lang - scala-compiler - - - org.eclipse.jetty.orbit - javax.servlet - - - - net.java.dev.jets3t - jets3t - - + com.google.guava + guava + 13.0.1 @@ -325,15 +277,6 @@ maven-bundle-plugin 2.5.4 true - - - <_exportcontents>co.cask.hydrator.plugin.*;org.apache.spark.streaming.kafka.*; - kafka.serializer.*;kafka.common; - *;inline=false;scope=compile - true - lib - - package @@ -350,6 +293,10 @@ org.apache.maven.plugins maven-surefire-plugin 2.14.1 + + -Xmx2048m -Djava.awt.headless=true -XX:MaxPermSize=256m -XX:+UseConcMarkSweepGC -XX:OnOutOfMemoryError="kill -9 %p" -XX:+HeapDumpOnOutOfMemoryError + false + org.apache.felix From d91236ff4792610511e2e576ed539a9c2e53b449 Mon Sep 17 00:00:00 2001 From: Poorna Chandra Date: Mon, 4 Jun 2018 14:38:27 -0700 Subject: [PATCH 2/5] Address review comments --- kafka-plugins-0.10/docs/KAFKASOURCE.md | 2 +- .../docs/Kafka-alert-publisher.md | 2 +- .../alertpublisher/KafkaAlertPublisher.java | 5 ++- .../plugin/batchSource/KafkaBatchSource.java | 11 ++---- .../hydrator/plugin/common/KafkaHelpers.java | 19 +++++++++- .../sink/{Kafka.java => KafkaBatchSink.java} | 20 ++++++---- .../hydrator/plugin/source/KafkaConfig.java | 37 +++---------------- .../plugin/source/KafkaStreamingSource.java | 13 ++----- .../KafkaSinkAndAlertsPublisherTest.java | 21 +++-------- .../hydrator/KafkaStreamingSourceTest.java | 1 - 10 files changed, 55 insertions(+), 76 deletions(-) rename kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/{Kafka.java => KafkaBatchSink.java} (94%) diff --git a/kafka-plugins-0.10/docs/KAFKASOURCE.md b/kafka-plugins-0.10/docs/KAFKASOURCE.md index 912d358..4d00321 100644 --- a/kafka-plugins-0.10/docs/KAFKASOURCE.md +++ b/kafka-plugins-0.10/docs/KAFKASOURCE.md @@ -67,7 +67,7 @@ to the users. ## License and Trademarks -Copyright © 2017 Cask Data, Inc. +Copyright © 2018 Cask Data, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/kafka-plugins-0.10/docs/Kafka-alert-publisher.md b/kafka-plugins-0.10/docs/Kafka-alert-publisher.md index f358fa5..2d53d74 100644 --- a/kafka-plugins-0.10/docs/Kafka-alert-publisher.md +++ b/kafka-plugins-0.10/docs/Kafka-alert-publisher.md @@ -45,7 +45,7 @@ CDAP IRC Channel: #cdap on irc.freenode.net ## License and Trademarks -Copyright © 2017 Cask Data, Inc. +Copyright © 2018 Cask Data, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java index 8872014..5f71edd 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/alertpublisher/KafkaAlertPublisher.java @@ -27,6 +27,7 @@ import co.cask.cdap.etl.api.AlertPublisherContext; import co.cask.cdap.etl.api.PipelineConfigurer; import co.cask.hydrator.common.KeyValueListParser; +import co.cask.hydrator.plugin.common.KafkaHelpers; import com.google.common.base.Strings; import com.google.gson.Gson; import kafka.common.Topic; @@ -37,11 +38,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; -import javax.annotation.Nullable; /** * Kafka Alert Publisher @@ -169,6 +170,8 @@ private void validate() { throw new IllegalArgumentException(String.format("Topic name %s is not a valid kafka topic. Please provide " + "valid kafka topic name. %s", topic, e.getMessage())); } + + KafkaHelpers.validateKerberosSetting(principal, keytabLocation); } } } diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java index d03b018..54f05c1 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java @@ -50,14 +50,9 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.kafka.clients.consumer.ConsumerConfig; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; import javax.annotation.Nullable; +import java.io.IOException; +import java.util.*; /** * Kafka batch source. @@ -398,6 +393,8 @@ public void validate() { format, messageSchema, e.getMessage()), e); } } + + KafkaHelpers.validateKerberosSetting(principal, keytabLocation); } } diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java index dda5173..b59b91a 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java @@ -16,15 +16,16 @@ package co.cask.hydrator.plugin.common; +import com.google.common.base.Strings; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; import java.util.HashMap; import java.util.List; import java.util.Map; -import javax.annotation.Nullable; /** * Utility class for Kafka operations @@ -102,4 +103,20 @@ public static void setupKerberosLogin(Map conf, @Nullabl principal, keytabLocation); } } + + /** + * Validates whether the principal and keytab are both set or both of them are null/empty + * + * @param principal Kerberos principal + * @param keytab Kerberos keytab for the principal + */ + public static void validateKerberosSetting(@Nullable String principal, @Nullable String keytab) { + if (Strings.isNullOrEmpty(principal) != Strings.isNullOrEmpty(keytab)) { + String emptyField = Strings.isNullOrEmpty(principal) ? "principal" : "keytab"; + String message = emptyField + " is empty. When Kerberos security is enabled for Kafka, " + + "then both the principal and the keytab have " + + "to be specified. If Kerberos is not enabled, then both should be empty."; + throw new IllegalArgumentException(message); + } + } } diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaBatchSink.java similarity index 94% rename from kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java rename to kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaBatchSink.java index ceded55..f97d6f9 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/Kafka.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaBatchSink.java @@ -43,10 +43,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; import java.util.HashMap; import java.util.List; import java.util.Map; -import javax.annotation.Nullable; /** * Kafka sink to write to Kafka @@ -54,8 +54,8 @@ @Plugin(type = BatchSink.PLUGIN_TYPE) @Name("Kafka") @Description("KafkaSink to write events to kafka") -public class Kafka extends ReferenceBatchSink { - private static final Logger LOG = LoggerFactory.getLogger(Kafka.class); +public class KafkaBatchSink extends ReferenceBatchSink { + private static final Logger LOG = LoggerFactory.getLogger(KafkaBatchSink.class); // Configuration for the plugin. private final Config producerConfig; @@ -65,7 +65,7 @@ public class Kafka extends ReferenceBatchSink { // Static constants for configuring Kafka producer. private static final String ACKS_REQUIRED = "acks"; - public Kafka(Config producerConfig) { + public KafkaBatchSink(Config producerConfig) { super(producerConfig); this.producerConfig = producerConfig; this.kafkaOutputFormatProvider = new KafkaOutputFormatProvider(producerConfig); @@ -75,9 +75,7 @@ public Kafka(Config producerConfig) { public void configurePipeline(PipelineConfigurer pipelineConfigurer) { super.configurePipeline(pipelineConfigurer); - if (!producerConfig.async.equalsIgnoreCase("true") && !producerConfig.async.equalsIgnoreCase("false")) { - throw new IllegalArgumentException("Async flag has to be either TRUE or FALSE."); - } + producerConfig.validate(); } @Override @@ -186,6 +184,14 @@ public Config(String brokers, String async, String key, String topic, String for this.kafkaProperties = kafkaProperties; this.compressionType = compressionType; } + + private void validate() { + if (!async.equalsIgnoreCase("true") && !async.equalsIgnoreCase("false")) { + throw new IllegalArgumentException("Async flag has to be either TRUE or FALSE."); + } + + KafkaHelpers.validateKerberosSetting(principal, keytabLocation); + } } private static class KafkaOutputFormatProvider implements OutputFormatProvider { diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java index 9f38083..46a33a2 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java @@ -24,21 +24,16 @@ import co.cask.cdap.format.RecordFormats; import co.cask.hydrator.common.KeyValueListParser; import co.cask.hydrator.common.ReferencePluginConfig; -import com.google.common.annotations.VisibleForTesting; +import co.cask.hydrator.plugin.common.KafkaHelpers; import com.google.common.base.Joiner; import com.google.common.base.Splitter; import com.google.common.base.Strings; import org.apache.kafka.common.TopicPartition; +import javax.annotation.Nullable; import java.io.IOException; import java.io.Serializable; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import javax.annotation.Nullable; +import java.util.*; /** * Conf for Kafka streaming source. @@ -138,30 +133,6 @@ public KafkaConfig() { maxRatePerPartition = 1000; } - @VisibleForTesting - public KafkaConfig(String referenceName, String brokers, String topic, String schema, String format, - String timeField, String keyField, String partitionField, String offsetField) { - this(referenceName, brokers, topic, null, null, null, schema, format, - timeField, keyField, partitionField, offsetField); - } - - public KafkaConfig(String referenceName, String brokers, String topic, String partitions, - String initialPartitionOffsets, Long defaultInitialOffset, String schema, String format, - String timeField, String keyField, String partitionField, String offsetField) { - super(referenceName); - this.brokers = brokers; - this.topic = topic; - this.partitions = partitions; - this.initialPartitionOffsets = initialPartitionOffsets; - this.defaultInitialOffset = defaultInitialOffset; - this.schema = schema; - this.format = format; - this.timeField = timeField; - this.keyField = keyField; - this.partitionField = partitionField; - this.offsetField = offsetField; - } - public String getTopic() { return topic; } @@ -426,5 +397,7 @@ public void validate() { format, messageSchema, e.getMessage()), e); } } + + KafkaHelpers.validateKerberosSetting(principal, keytabLocation); } } diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java index 55d679e..e44622e 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java @@ -52,14 +52,7 @@ import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; +import java.util.*; /** * Kafka Streaming source @@ -99,7 +92,9 @@ public JavaDStream getStream(StreamingContext context) throws kafkaParams.put("key.deserializer", ByteArrayDeserializer.class.getCanonicalName()); kafkaParams.put("value.deserializer", ByteArrayDeserializer.class.getCanonicalName()); KafkaHelpers.setupKerberosLogin(kafkaParams, conf.getPrincipal(), conf.getKeytabLocation()); - // Create a unique string for the group.id using the pipeline name and the topic + // Create a unique string for the group.id using the pipeline name and the topic. + // group.id is a Kafka consumer property that uniquely identifies the group of + // consumer processes to which this consumer belongs. kafkaParams.put("group.id", Joiner.on("-").join(context.getPipelineName().length(), conf.getTopic().length(), context.getPipelineName(), conf.getTopic())); kafkaParams.putAll(conf.getKafkaProperties()); diff --git a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java index 0318aca..411c356 100644 --- a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java +++ b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java @@ -39,7 +39,7 @@ import co.cask.cdap.test.TestConfiguration; import co.cask.cdap.test.WorkflowManager; import co.cask.hydrator.plugin.alertpublisher.KafkaAlertPublisher; -import co.cask.hydrator.plugin.sink.Kafka; +import co.cask.hydrator.plugin.sink.KafkaBatchSink; import com.google.common.base.Charsets; import com.google.common.base.Function; import com.google.common.collect.ImmutableSet; @@ -55,21 +55,10 @@ import org.apache.twill.kafka.client.KafkaClientService; import org.apache.twill.kafka.client.KafkaConsumer; import org.apache.twill.zookeeper.ZKClientService; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; +import org.junit.*; import java.io.File; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -100,7 +89,7 @@ public static void setupTestClass() throws Exception { // this will make our plugins available to data-pipeline. addPluginArtifact(NamespaceId.DEFAULT.artifact("example-plugins", "1.0.0"), parentArtifact, - Kafka.class, + KafkaBatchSink.class, KafkaAlertPublisher.class, RangeAssignor.class, StringSerializer.class); @@ -157,7 +146,7 @@ public void testKafkaSinkAndAlertsPublisher() throws Exception { ETLStage source = new ETLStage("source", MockSource.getPlugin(inputName)); ETLStage sink = - new ETLStage("sink", new ETLPlugin("Kafka", Kafka.PLUGIN_TYPE, sinkProperties, null)); + new ETLStage("sink", new ETLPlugin("Kafka", KafkaBatchSink.PLUGIN_TYPE, sinkProperties, null)); ETLStage transform = new ETLStage("nullAlert", NullAlertTransform.getPlugin("id")); ETLStage alert = new ETLStage("alert", new ETLPlugin("KafkaAlerts", KafkaAlertPublisher.PLUGIN_TYPE, alertProperties)); diff --git a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java index 5ba26dc..e124cc6 100644 --- a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java +++ b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaStreamingSourceTest.java @@ -94,7 +94,6 @@ public class KafkaStreamingSourceTest extends HydratorTestBase { @ClassRule public static TemporaryFolder tmpFolder = new TemporaryFolder(); - @BeforeClass public static void setupTest() throws Exception { // add the artifact for data pipeline app From c9c3699420ffb35ec3326157d3036974dd9753f6 Mon Sep 17 00:00:00 2001 From: Poorna Chandra Date: Tue, 5 Jun 2018 17:14:19 -0700 Subject: [PATCH 3/5] Add SASL config to Kafka output format --- .../co/cask/hydrator/plugin/common/KafkaHelpers.java | 3 ++- .../cask/hydrator/plugin/sink/KafkaOutputFormat.java | 12 +++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java index b59b91a..99608bf 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java @@ -19,6 +19,7 @@ import com.google.common.base.Strings; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.SaslConfigs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,7 +91,7 @@ public static void setupKerberosLogin(Map conf, @Nullabl if (principal != null && keytabLocation != null) { LOG.debug("Adding Kerberos login conf to Kafka for principal {} and keytab {}", principal, keytabLocation); - conf.put("sasl.jaas.config", String.format("com.sun.security.auth.module.Krb5LoginModule required \n" + + conf.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("com.sun.security.auth.module.Krb5LoginModule required \n" + " useKeyTab=true \n" + " storeKey=true \n" + " useTicketCache=false \n" + diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java index f3a1e42..1910991 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java @@ -19,13 +19,10 @@ import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.JobContext; -import org.apache.hadoop.mapreduce.OutputCommitter; -import org.apache.hadoop.mapreduce.OutputFormat; -import org.apache.hadoop.mapreduce.RecordWriter; -import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.*; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.config.SaslConfigs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,6 +111,11 @@ public RecordWriter getRecordWriter(TaskAttemptContext context) LOG.info("Property key: {}, value: {}", entry.getKey().substring(11), entry.getValue()); } + // Add Kerberos login information if any + if (!Strings.isNullOrEmpty(configuration.get(SaslConfigs.SASL_JAAS_CONFIG))) { + props.put(SaslConfigs.SASL_JAAS_CONFIG, configuration.get(SaslConfigs.SASL_JAAS_CONFIG)); + } + // CDAP-9178: cached the producer object to avoid being created on every batch interval if (producer == null) { producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props); From eed1b99ba3b3a25e01512c05d3dc713f946479a0 Mon Sep 17 00:00:00 2001 From: Poorna Chandra Date: Tue, 5 Jun 2018 17:35:52 -0700 Subject: [PATCH 4/5] Fix imports --- .../hydrator/plugin/batchSource/KafkaBatchSource.java | 8 ++++++-- .../co/cask/hydrator/plugin/sink/KafkaOutputFormat.java | 6 +++++- .../java/co/cask/hydrator/plugin/source/KafkaConfig.java | 7 ++++++- .../hydrator/plugin/source/KafkaStreamingSource.java | 9 ++++++++- .../cask/hydrator/KafkaSinkAndAlertsPublisherTest.java | 6 +++++- 5 files changed, 30 insertions(+), 6 deletions(-) diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java index 54f05c1..3eda57e 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/batchSource/KafkaBatchSource.java @@ -52,8 +52,12 @@ import javax.annotation.Nullable; import java.io.IOException; -import java.util.*; - +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; /** * Kafka batch source. */ diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java index 1910991..5a5bd40 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java @@ -19,7 +19,11 @@ import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.*; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.OutputCommitter; +import org.apache.hadoop.mapreduce.OutputFormat; +import org.apache.hadoop.mapreduce.RecordWriter; +import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.config.SaslConfigs; diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java index 46a33a2..fcbbe6e 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaConfig.java @@ -33,7 +33,12 @@ import javax.annotation.Nullable; import java.io.IOException; import java.io.Serializable; -import java.util.*; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; /** * Conf for Kafka streaming source. diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java index e44622e..58f67d3 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/source/KafkaStreamingSource.java @@ -52,7 +52,14 @@ import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; /** * Kafka Streaming source diff --git a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java index 411c356..d9dc825 100644 --- a/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java +++ b/kafka-plugins-0.10/src/test/java/co/cask/hydrator/KafkaSinkAndAlertsPublisherTest.java @@ -55,7 +55,11 @@ import org.apache.twill.kafka.client.KafkaClientService; import org.apache.twill.kafka.client.KafkaConsumer; import org.apache.twill.zookeeper.ZKClientService; -import org.junit.*; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; import java.io.File; import java.util.*; From 1e7a9d51c7e5afbf3dae71720c9d539a560ca75e Mon Sep 17 00:00:00 2001 From: Poorna Chandra Date: Thu, 14 Jun 2018 16:00:38 -0700 Subject: [PATCH 5/5] Replace Kafka constant to prevent class not found exception --- .../java/co/cask/hydrator/plugin/common/KafkaHelpers.java | 6 +++--- .../co/cask/hydrator/plugin/sink/KafkaOutputFormat.java | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java index 99608bf..3ea2704 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/common/KafkaHelpers.java @@ -19,20 +19,20 @@ import com.google.common.base.Strings; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.SaslConfigs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.annotation.Nullable; import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.Nullable; /** * Utility class for Kafka operations */ public final class KafkaHelpers { private static final Logger LOG = LoggerFactory.getLogger(KafkaHelpers.class); + public static final String SASL_JAAS_CONFIG = "sasl.jaas.config"; // This class cannot be instantiated private KafkaHelpers() { @@ -91,7 +91,7 @@ public static void setupKerberosLogin(Map conf, @Nullabl if (principal != null && keytabLocation != null) { LOG.debug("Adding Kerberos login conf to Kafka for principal {} and keytab {}", principal, keytabLocation); - conf.put(SaslConfigs.SASL_JAAS_CONFIG, String.format("com.sun.security.auth.module.Krb5LoginModule required \n" + + conf.put(SASL_JAAS_CONFIG, String.format("com.sun.security.auth.module.Krb5LoginModule required \n" + " useKeyTab=true \n" + " storeKey=true \n" + " useTicketCache=false \n" + diff --git a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java index 5a5bd40..5578629 100644 --- a/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java +++ b/kafka-plugins-0.10/src/main/java/co/cask/hydrator/plugin/sink/KafkaOutputFormat.java @@ -16,6 +16,7 @@ package co.cask.hydrator.plugin.sink; +import co.cask.hydrator.plugin.common.KafkaHelpers; import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; @@ -26,7 +27,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.config.SaslConfigs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,8 +116,8 @@ public RecordWriter getRecordWriter(TaskAttemptContext context) } // Add Kerberos login information if any - if (!Strings.isNullOrEmpty(configuration.get(SaslConfigs.SASL_JAAS_CONFIG))) { - props.put(SaslConfigs.SASL_JAAS_CONFIG, configuration.get(SaslConfigs.SASL_JAAS_CONFIG)); + if (!Strings.isNullOrEmpty(configuration.get(KafkaHelpers.SASL_JAAS_CONFIG))) { + props.put(KafkaHelpers.SASL_JAAS_CONFIG, configuration.get(KafkaHelpers.SASL_JAAS_CONFIG)); } // CDAP-9178: cached the producer object to avoid being created on every batch interval