/
KafkaSourceITCase.java
100 lines (86 loc) · 3.55 KB
/
KafkaSourceITCase.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
/*
* Copyright 2022-2023 Bytedance Ltd. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bytedance.bitsail.test.integration.legacy;
import com.bytedance.bitsail.common.configuration.BitSailConfiguration;
import com.bytedance.bitsail.test.integration.AbstractIntegrationTest;
import com.bytedance.bitsail.test.integration.legacy.container.KafkaCluster;
import com.bytedance.bitsail.test.integration.utils.JobConfUtils;
import com.alibaba.fastjson.JSONObject;
import com.google.common.collect.Maps;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
public class KafkaSourceITCase extends AbstractIntegrationTest {
private static final Logger LOG = LoggerFactory.getLogger(KafkaSourceITCase.class);
private final String topicName = "testTopic";
private final KafkaCluster kafkaCluster = new KafkaCluster();
private static String constructARecord(int index) {
JSONObject jsonObject = new JSONObject();
jsonObject.put("ID", index);
jsonObject.put("NAME", "text_" + index);
jsonObject.put("DATE", System.currentTimeMillis());
return jsonObject.toJSONString();
}
@Before
public void before() {
kafkaCluster.startService();
kafkaCluster.createTopic(topicName);
startSendDataToKafka();
}
private void startSendDataToKafka() {
KafkaProducer<String, String> producer = kafkaCluster.getProducer(topicName);
ScheduledThreadPoolExecutor produceService = new ScheduledThreadPoolExecutor(1);
AtomicInteger sendCount = new AtomicInteger(0);
produceService.scheduleAtFixedRate(() -> {
try {
for (int i = 0; i < 5000; ++i) {
String record = constructARecord(sendCount.getAndIncrement());
producer.send(new ProducerRecord(topicName, record));
}
} catch (Exception e) {
LOG.error("failed to send a record");
} finally {
LOG.info(">>> kafka produce count: {}", sendCount.get());
}
}, 0, 1, TimeUnit.SECONDS);
}
@Test
public void testKafkaSource() throws Exception {
BitSailConfiguration configuration = JobConfUtils.fromClasspath("kafka_to_print.json");
updateConfiguration(configuration);
submitJob(configuration);
}
protected void updateConfiguration(BitSailConfiguration jobConfiguration) {
// jobConfiguration.set(FakeReaderOptions.TOTAL_COUNT, TOTAL_SEND_COUNT);
Map<String, String> properties = Maps.newHashMap();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaCluster.getBootstrapServer());
properties.put("topic", topicName);
jobConfiguration.set("job.reader.connector.connector", properties);
}
@After
public void after() {
kafkaCluster.stopService();
}
}