Skip to content

Commit

Permalink
KAA-1279: Fixed some log appenders and verifiers errors
Browse files Browse the repository at this point in the history
  • Loading branch information
sashadidukh committed Oct 6, 2016
1 parent f43d862 commit fb3de14
Show file tree
Hide file tree
Showing 11 changed files with 119 additions and 94 deletions.
Expand Up @@ -78,7 +78,9 @@ protected Map<String, GenericAvroConverter<GenericRecord>> initialValue() {

};


/**
* Instantiates a new CassandraLogAppender.
*/
public CassandraLogAppender() {
super(CassandraConfig.class);
scheduler.scheduleWithFixedDelay(new Runnable() {
Expand Down Expand Up @@ -137,13 +139,15 @@ public void run() {
listener.onSuccess();
cassandraSuccessLogCount.getAndAdd(logCount);
break;
default:
break;
}
LOG.debug("[{}] appended {} logs to cassandra collection", tableName, logEventPack.getEvents().size());
} else {
listener.onInternalError();
}
} catch (Exception e) {
LOG.warn("Got exception. Can't process log events", e);
} catch (Exception ex) {
LOG.warn("Got exception. Can't process log events", ex);
listener.onInternalError();
}
}
Expand All @@ -167,8 +171,8 @@ protected void initFromConfiguration(LogAppenderDto appender, CassandraConfig co
executor = Executors.newFixedThreadPool(executorPoolSize);
callbackExecutor = Executors.newFixedThreadPool(callbackPoolSize);
LOG.info("Cassandra log appender initialized");
} catch (Exception e) {
LOG.error("Failed to init cassandra log appender: ", e);
} catch (Exception ex) {
LOG.error("Failed to init cassandra log appender: ", ex);
}
}

Expand Down Expand Up @@ -229,9 +233,9 @@ protected List<CassandraLogEventDto> generateCassandraLogEvent(LogEventPack logE
GenericRecord decodedLog = eventConverter.decodeBinary(logEvent.getLogData());
events.add(new CassandraLogEventDto(header, decodedLog));
}
} catch (IOException e) {
LOG.error("Unexpected IOException while decoding LogEvents", e);
throw e;
} catch (IOException ex) {
LOG.error("Unexpected IOException while decoding LogEvents", ex);
throw ex;
}
return events;
}
Expand Down Expand Up @@ -286,12 +290,12 @@ public void onSuccess(ResultSet result) {
}

@Override
public void onFailure(Throwable t) {
public void onFailure(Throwable throwable) {
cassandraFailureLogCount.getAndAdd(size);
LOG.warn("Failed to store record", t);
if (t instanceof UnsupportedFeatureException) {
LOG.warn("Failed to store record", throwable);
if (throwable instanceof UnsupportedFeatureException) {
callback.onRemoteError();
} else if (t instanceof IOException) {
} else if (throwable instanceof IOException) {
callback.onConnectionError();
} else {
callback.onInternalError();
Expand Down
Expand Up @@ -87,6 +87,9 @@ public class CassandraLogEventDao implements LogEventDao {
private String keyspaceName;
private CassandraConfig configuration;

/**
* Instantiates a new CassandraLogEventDao.
*/
public CassandraLogEventDao(CassandraConfig configuration) throws UnknownHostException {
if (configuration == null) {
throw new IllegalArgumentException("Configuration shouldn't be null");
Expand Down Expand Up @@ -249,8 +252,10 @@ public List<CassandraLogEventDto> save(List<CassandraLogEventDto> logEventDtoLis

@Override
public ListenableFuture<ResultSet> saveAsync(List<CassandraLogEventDto> logEventDtoList, String tableName,
GenericAvroConverter<GenericRecord> eventConverter, GenericAvroConverter<GenericRecord> headerConverter,
GenericAvroConverter<GenericRecord> clientProfileConverter, GenericAvroConverter<GenericRecord> serverProfileConverter,
GenericAvroConverter<GenericRecord> eventConverter,
GenericAvroConverter<GenericRecord> headerConverter,
GenericAvroConverter<GenericRecord> clientProfileConverter,
GenericAvroConverter<GenericRecord> serverProfileConverter,
String clientProfileJson, String serverProfileJson)
throws IOException {
LOG.debug("Execute async bath request for cassandra table {}", tableName);
Expand Down Expand Up @@ -388,6 +393,7 @@ private Insert[] prepareQuery(List<CassandraLogEventDto> logEventDtoList, String
} else {
throw new RuntimeException(ABSENT_SERVER_PROFILE_ERROR);
}
break;
case SERVER_BINARY:
if (serverProfileBinary != null) {
insert.value(element.getColumnName(), clientProfileBinary);
Expand All @@ -402,6 +408,8 @@ private Insert[] prepareQuery(List<CassandraLogEventDto> logEventDtoList, String
reuseTsValue = formatTs(reuseTsValue, element);
insert.value(element.getColumnName(), reuseTsValue);
break;
default:
break;
}
}

Expand All @@ -421,20 +429,20 @@ private String formatTs(String tsValue, ColumnMappingElement element) {
if (pattern == null || pattern.isEmpty()) {
tsValue = ts + "";
} else {
ThreadLocal<SimpleDateFormat> formatterTL = dateFormatMap.get(pattern);
if (formatterTL == null) {
formatterTL = new ThreadLocal<SimpleDateFormat>() {
ThreadLocal<SimpleDateFormat> formatterTl = dateFormatMap.get(pattern);
if (formatterTl == null) {
formatterTl = new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return new SimpleDateFormat(pattern);
}
};
dateFormatMap.putIfAbsent(pattern, formatterTL);
dateFormatMap.putIfAbsent(pattern, formatterTl);
}
SimpleDateFormat formatter = formatterTL.get();
SimpleDateFormat formatter = formatterTl.get();
if (formatter == null) {
formatter = new SimpleDateFormat(pattern);
formatterTL.set(formatter);
formatterTl.set(formatter);
}
tsValue = formatter.format(new Date(ts));
}
Expand Down
Expand Up @@ -24,6 +24,9 @@ public class CassandraLogEventDto {
private final RecordHeader header;
private final GenericRecord event;

/**
* Instantiates a new CassandraLogEventDto.
*/
public CassandraLogEventDto(RecordHeader header, GenericRecord event) {
super();
this.header = header;
Expand Down
Expand Up @@ -66,6 +66,9 @@ protected Map<String, GenericAvroConverter<GenericRecord>> initialValue() {

};

/**
* Instantiates a new KafkaLogAppender.
*/
public KafkaLogAppender() {
super(KafkaConfig.class);
scheduler.scheduleWithFixedDelay(new Runnable() {
Expand Down Expand Up @@ -123,8 +126,8 @@ public void run() {
} else {
listener.onInternalError();
}
} catch (Exception e) {
LOG.warn("Got exception. Can't process log events", e);
} catch (Exception ex) {
LOG.warn("Got exception. Can't process log events", ex);
listener.onInternalError();
}
}
Expand All @@ -145,8 +148,8 @@ protected void initFromConfiguration(LogAppenderDto appender, KafkaConfig config
executor = Executors.newFixedThreadPool(executorPoolSize);
topicName = configuration.getTopic();
LOG.info("Kafka log appender initialized");
} catch (Exception e) {
LOG.error("Failed to init kafka log appender: ", e);
} catch (Exception ex) {
LOG.error("Failed to init kafka log appender: ", ex);
}

}
Expand All @@ -165,9 +168,9 @@ protected List<KafkaLogEventDto> generateKafkaLogEvent(LogEventPack logEventPack
GenericRecord decodedLog = eventConverter.decodeBinary(logEvent.getLogData());
events.add(new KafkaLogEventDto(header, decodedLog));
}
} catch (IOException e) {
LOG.error("Unexpected IOException while decoding LogEvents", e);
throw e;
} catch (IOException ex) {
LOG.error("Unexpected IOException while decoding LogEvents", ex);
throw ex;
}
return events;
}
Expand Down Expand Up @@ -208,14 +211,14 @@ private LogAppenderCallback(LogDeliveryCallback callback, AtomicInteger kafkaSuc
}

@Override
public void onCompletion(RecordMetadata record, Exception e) {
if (e == null) {
public void onCompletion(RecordMetadata record, Exception ex) {
if (ex == null) {
kafkaSuccessLogCount.getAndAdd(size);
callback.onSuccess();
} else {
kafkaFailureLogCount.getAndAdd(size);
LOG.warn("Failed to store record", e);
if (e instanceof IOException) {
LOG.warn("Failed to store record", ex);
if (ex instanceof IOException) {
callback.onConnectionError();
} else {
callback.onInternalError();
Expand Down
Expand Up @@ -43,13 +43,16 @@ public class KafkaLogEventDao implements LogEventDao {
private static final String KEY_SERIALIZER = "org.apache.kafka.common.serialization.StringSerializer";
private static final String VALUE_SERIALIZER = "org.apache.kafka.common.serialization.StringSerializer";

private final Random RANDOM = new Random();
private static final Random RANDOM = new Random();

private KafkaProducer<String, String> producer;
private KafkaConfig configuration;
private String topicName;
private int partitionCount;

/**
* Instantiates a new KafkaLogEventDao.
*/
public KafkaLogEventDao(KafkaConfig configuration) {
if (configuration == null) {
throw new IllegalArgumentException("Configuration shouldn't be null");
Expand All @@ -58,13 +61,13 @@ public KafkaLogEventDao(KafkaConfig configuration) {
this.configuration = configuration;
this.topicName = configuration.getTopic();
this.partitionCount = configuration.getPartitionCount();
Properties kafkaProperties = new Properties();
StringBuilder serverList = new StringBuilder();
for (KafkaServer server : configuration.getKafkaServers()) {
serverList.append(server.getHost() + ":" + server.getPort() + ",");
}
serverList = serverList.deleteCharAt(serverList.length() - 1);
LOG.info("Init kafka cluster with property {}={}", ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, serverList);
Properties kafkaProperties = new Properties();
kafkaProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, serverList.toString());
LOG.info("Init kafka cluster with property {}={}", ProducerConfig.ACKS_CONFIG,
configuration.getKafkaAcknowledgement());
Expand Down Expand Up @@ -97,11 +100,11 @@ public List<Future<RecordMetadata>> save(List<KafkaLogEventDto> logEventDtoList,
for (KafkaLogEventDto dto : logEventDtoList) {
ProducerRecord<String, String> recordToWrite;
if (configuration.getUseDefaultPartitioner()) {
recordToWrite = new ProducerRecord<String, String>(topicName, getKey(dto), formKafkaJSON(dto,
recordToWrite = new ProducerRecord<String, String>(topicName, getKey(dto), formKafkaJson(dto,
eventConverter, headerConverter));
} else {
recordToWrite = new ProducerRecord<String, String>(topicName, calculatePartitionID(dto), getKey(dto),
formKafkaJSON(dto, eventConverter, headerConverter));
recordToWrite = new ProducerRecord<String, String>(topicName, calculatePartitionId(dto), getKey(dto),
formKafkaJson(dto, eventConverter, headerConverter));
}
results.add(producer.send(recordToWrite, callback));
}
Expand All @@ -116,7 +119,7 @@ public void close() {
}
}

private int calculatePartitionID(KafkaLogEventDto eventDto) {
private int calculatePartitionId(KafkaLogEventDto eventDto) {
return eventDto.hashCode() % partitionCount;
}

Expand All @@ -135,15 +138,15 @@ private String parseAcknowledgement(String record) {
}
}

private String formKafkaJSON(KafkaLogEventDto dto, GenericAvroConverter<GenericRecord> eventConverter,
private String formKafkaJson(KafkaLogEventDto dto, GenericAvroConverter<GenericRecord> eventConverter,
GenericAvroConverter<GenericRecord> headerConverter) throws IOException {
String eventJSON = eventConverter.encodeToJson(dto.getEvent());
String headerJSON = headerConverter.encodeToJson(dto.getHeader());
String eventJson = eventConverter.encodeToJson(dto.getEvent());
String headerJson = headerConverter.encodeToJson(dto.getHeader());
StringBuilder result = new StringBuilder("{");
if (headerJSON != null && !headerJSON.isEmpty()) {
result.append("\"header\":" + headerJSON + ",");
if (headerJson != null && !headerJson.isEmpty()) {
result.append("\"header\":" + headerJson + ",");
}
result.append("\"event\":" + eventJSON + "}");
result.append("\"event\":" + eventJson + "}");
return result.toString();
}

Expand Down
Expand Up @@ -28,6 +28,9 @@ public class KafkaLogEventDto implements Serializable {
private final RecordHeader header;
private final GenericRecord event;

/**
* Instantiates a new KafkaLogEventDto.
*/
public KafkaLogEventDto(RecordHeader header, GenericRecord event) {
super();
this.header = header;
Expand Down
Expand Up @@ -83,7 +83,7 @@ public OracleNoSqlLogAppender() {
private static String getHostName() {
try {
return InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
} catch (UnknownHostException ex) {
return "";
}
}
Expand All @@ -96,11 +96,11 @@ public void doAppend(LogEventPack logEventPack, RecordHeader header, LogDelivery
try {
doAppendGenericAvro(logEventPack, header);
listener.onSuccess();
} catch (FaultException e) {
LOG.error("Unable to append logs due to remote exception!", e);
} catch (FaultException ex) {
LOG.error("Unable to append logs due to remote exception!", ex);
listener.onRemoteError();
} catch (Exception e) {
LOG.error("Unable to append logs!", e);
} catch (Exception ex) {
LOG.error("Unable to append logs!", ex);
listener.onInternalError();
}
} else {
Expand All @@ -117,8 +117,8 @@ public void doAppend(LogEventPack logEventPack, RecordHeader header, LogDelivery
protected void initFromConfiguration(LogAppenderDto appender, OracleNoSqlConfig configuration) {
try {
kvStore = initKvStore(configuration);
} catch (Exception e) {
LOG.error("Failed to init kvStore: ", e);
} catch (Exception ex) {
LOG.error("Failed to init kvStore: ", ex);
}
}

Expand All @@ -141,9 +141,9 @@ private void doAppendGenericAvro(LogEventPack logEventPack, RecordHeader header)
binaryDecoder = DecoderFactory.get().binaryDecoder(event.getLogData(), binaryDecoder);
try {
recordData = datumReader.read(recordData, binaryDecoder);
} catch (IOException e) {
LOG.error("[{}] Unable to read log event!", e);
throw e;
} catch (IOException ex) {
LOG.error("[{}] Unable to read log event!", ex);
throw ex;
}
wrapperRecord.put(RecordWrapperSchemaGenerator.RECORD_HEADER_FIELD, header);
wrapperRecord.put(RecordWrapperSchemaGenerator.RECORD_DATA_FIELD, recordData);
Expand All @@ -165,9 +165,9 @@ private void initialize(LogEventPack logEventPack) throws Exception {
Schema userSchema = new Schema.Parser().parse(logEventPack.getLogSchema().getSchema());
datumReader = new GenericDatumReader<GenericRecord>(userSchema);
wrapperRecord = new GenericData.Record(recordWrapperSchema);
} catch (Exception e) {
} catch (Exception ex) {
LOG.error("[{}] Unable to initialize parameters for log event pack.", getName());
throw e;
throw ex;
}

}
Expand Down Expand Up @@ -231,8 +231,6 @@ private KVStore initKvStore(OracleNoSqlConfig configuration) throws Exception {
helperHostPorts[i] = node.getHost() + ":" + node.getPort();
}

KVStoreConfig config = new KVStoreConfig(configuration.getStoreName(), helperHostPorts);

Properties securityProperties = new Properties();
if (configuration.getUsername() != null) {
username = configuration.getUsername();
Expand Down Expand Up @@ -270,6 +268,8 @@ private KVStore initKvStore(OracleNoSqlConfig configuration) throws Exception {
if (configuration.getSslTrustStoreType() != null) {
securityProperties.put(KVSecurityConstants.SSL_TRUSTSTORE_TYPE_PROPERTY, configuration.getSslTrustStoreType());
}

KVStoreConfig config = new KVStoreConfig(configuration.getStoreName(), helperHostPorts);
config.setSecurityProperties(securityProperties);

KVStore kvStore = KVStoreFactory.getStore(config);
Expand Down

0 comments on commit fb3de14

Please sign in to comment.