Skip to content

Commit

Permalink
One more fix of code style
Browse files Browse the repository at this point in the history
  • Loading branch information
Kirill380 committed Sep 30, 2016
1 parent 28b70aa commit 94d9e16
Show file tree
Hide file tree
Showing 19 changed files with 118 additions and 142 deletions.
Expand Up @@ -47,9 +47,11 @@ public void close() throws IOException {
@Override
public void init(LogAppenderDto appenderDto, FileConfig config, Path filePath) {
LOG.info(
"[{}][{}] Initializing with rollingFileNamePatern: {}, rollingMaxHistory: {}, triggerMaxFileSize: {}, encoderPattern: {}",
"[{}][{}] Initializing with rollingFileNamePatern: {}, rollingMaxHistory: {},"
+ " triggerMaxFileSize: {}, encoderPattern: {}",
appenderDto.getTenantId(), appenderDto.getApplicationId(),
config.getRollingFileNamePatern(), config.getRollingMaxHistory(), config.getTriggerMaxFileSize(), config.getEncoderPattern());
config.getRollingFileNamePatern(), config.getRollingMaxHistory(),
config.getTriggerMaxFileSize(), config.getEncoderPattern());

LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory();
rfAppender = new RollingFileAppender();
Expand All @@ -62,7 +64,8 @@ public void init(LogAppenderDto appenderDto, FileConfig config, Path filePath) {
rollingPolicy.setParent(rfAppender);
rollingPolicy.start();

SizeBasedTriggeringPolicy triggeringPolicy = new ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy();
SizeBasedTriggeringPolicy triggeringPolicy =
new ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy();
triggeringPolicy.setMaxFileSize(config.getTriggerMaxFileSize());
triggeringPolicy.start();

Expand All @@ -76,7 +79,8 @@ public void init(LogAppenderDto appenderDto, FileConfig config, Path filePath) {
rfAppender.setTriggeringPolicy(triggeringPolicy);
rfAppender.start();

logger = loggerContext.getLogger(appenderDto.getTenantId() + "." + appenderDto.getApplicationToken());
logger = loggerContext.getLogger(appenderDto.getTenantId() + "."
+ appenderDto.getApplicationToken());
logger.setLevel(Level.ALL);
logger.addAppender(rfAppender);
LOG.debug("[{}][{}] Initialized with context {}", appenderDto.getTenantId(),
Expand Down
Expand Up @@ -28,20 +28,20 @@

import java.util.List;

/**
* The Interface FlumeEventBuilder.
*/

public abstract class FlumeEventBuilder {

private static final Logger LOG = LoggerFactory.getLogger(FlumeEventBuilder.class);

public abstract void init(FlumeConfig configuration);

public abstract List<Event> generateEvents(String appToken, LogSchema schema, List<LogEvent> logEvents,
ProfileInfo clientProfile, ProfileInfo serverProfile, RecordHeader header);
public abstract List<Event> generateEvents(String appToken, LogSchema schema,
List<LogEvent> logEvents,
ProfileInfo clientProfile,
ProfileInfo serverProfile, RecordHeader header);

/**
* This method generate flume events from own data structure <code>LogEventPack</code>
* This method generate flume events from own data structure <code>LogEventPack</code>.
*
* @param eventPack the event pack
* @param header the record header
Expand Down
Expand Up @@ -48,7 +48,7 @@ public KaaRestTemplate(String host, int port) {
}

/**
* Initialize KaaRestTempalte using following format host1:port1,host2:port2
* Initialize KaaRestTempalte using following format host1:port1,host2:port2.
*/
public KaaRestTemplate(String hostPortList) {
if (hostPortList == null) {
Expand All @@ -73,8 +73,17 @@ public KaaRestTemplate(String hostPortList) {
}

private void checkHostPortLists(String[] hosts, int[] ports) {
if ((hosts.length != ports.length) && (hosts != null)) {
throw new IllegalArgumentException("Length of arrays of hosts and ports must be the same length and not null");
if (hosts == null) {
throw new IllegalArgumentException("Parameter hosts can't be null.");
}

if (ports == null) {
throw new IllegalArgumentException("Parameter ports can't be null.");
}

if (hosts.length != ports.length) {
throw new IllegalArgumentException("Length of arrays of hosts "
+ "and ports must be the same length");
} else {
this.hosts = hosts;
this.ports = ports;
Expand Down Expand Up @@ -115,21 +124,23 @@ protected <T> T doExecute(URI url, HttpMethod method, RequestCallback requestCal
if (maxRetry <= 0) {
logger.error("Failed to connect to ({}:{})", getCurHost(), getCurPort(), ex);
throw new ResourceAccessException(
"I/O error on " + method.name() + " request for \"" + url + "\":" + ex.getMessage(), new IOException(ex));
"I/O error on " + method.name() + " request for \"" + url + "\":"
+ ex.getMessage(), new IOException(ex));
} else {
maxRetry--;
}
try {
setNewRequestFactory(index);
} catch (Exception e) {
logger.info("Failed to initialize new request factory ({}:{})", getCurHost(), getCurPort(), e);
} catch (Exception exception) {
logger.info("Failed to initialize new request factory ({}:{})",
getCurHost(), getCurPort(), exception);
continue;
}
url = updateURL(url);
isRequestFactorySet = true;
}
} catch (RestClientException e) {
throw e;
} catch (RestClientException ex) {
throw ex;
}
}
}
Expand Down
Expand Up @@ -37,7 +37,7 @@ public interface LogSchemaService {
List<LogSchemaDto> findLogSchemasByAppId(String applicationId);

/**
* Find all Log Schema versions for Application with specific id
* Find all Log Schema versions for Application with specific id.
*
* @param applicationId the id of Application
* @return List of Log Schema versions
Expand Down
Expand Up @@ -21,9 +21,6 @@

import java.util.List;

/**
* Server profile service
*/
public interface ServerProfileService {

/**
Expand Down
Expand Up @@ -33,42 +33,25 @@
import java.util.List;
import java.util.Map;

/**
* The Class LogAppender.
*/

public abstract class AbstractLogAppender<T extends SpecificRecordBase> implements LogAppender {

/**
* The Constant LOG.
*/
private static final Logger LOG = LoggerFactory.getLogger(AbstractLogAppender.class);

/**
* The Constant LOG_HEADER_VERSION.
*/
private static final int LOG_HEADER_VERSION = 1;
private final Class<T> configurationClass;
/**
* The converters.
*/
Map<String, GenericAvroConverter<GenericRecord>> converters = new HashMap<>();
/**
* The appender id.
*/

private Map<String, GenericAvroConverter<GenericRecord>> converters = new HashMap<>();

private String appenderId;
/**
* The name.
*/

private String name;
/**
* The application token.
*/

private String applicationToken;
/**
* The header.
*/

private List<LogHeaderStructureDto> header;
private int minSchemaVersion, maxSchemaVersion;
private int minSchemaVersion;
private int maxSchemaVersion;
private boolean confirmDelivery;

public AbstractLogAppender(Class<T> configurationClass) {
Expand All @@ -82,17 +65,27 @@ public AbstractLogAppender(Class<T> configurationClass) {
* @param header the header
* @param listener the listener
*/
public abstract void doAppend(LogEventPack logEventPack, RecordHeader header, LogDeliveryCallback listener);
public abstract void doAppend(LogEventPack logEventPack, RecordHeader header,
LogDeliveryCallback listener);

@Override
public void doAppend(LogEventPack logEventPack, LogDeliveryCallback listener) {
if (logEventPack != null) {
doAppend(logEventPack, generateHeader(logEventPack), listener);
} else {
LOG.warn("Can't append log events. LogEventPack object is null.");
}
}

/**
* Change parameters of log appender.
*
* @param appender the appender
* @param configuration the configuration
*/

protected abstract void initFromConfiguration(LogAppenderDto appender, T configuration);


public void initLogAppender(LogAppenderDto appender) {
this.minSchemaVersion = appender.getMinLogSchemaVersion();
this.maxSchemaVersion = appender.getMaxLogSchemaVersion();
Expand All @@ -102,8 +95,8 @@ public void initLogAppender(LogAppenderDto appender) {
AvroByteArrayConverter<T> converter = new AvroByteArrayConverter<>(configurationClass);
T configuration = converter.fromByteArray(rawConfiguration);
initFromConfiguration(appender, configuration);
} catch (IOException e) {
LOG.error("Unable to parse configuration for appender '" + getName() + "'", e);
} catch (IOException ex) {
LOG.error("Unable to parse configuration for appender '" + getName() + "'", ex);
}
}

Expand Down Expand Up @@ -165,14 +158,7 @@ public void init(LogAppenderDto appender) {
initLogAppender(appender);
}

@Override
public void doAppend(LogEventPack logEventPack, LogDeliveryCallback listener) {
if (logEventPack != null) {
doAppend(logEventPack, generateHeader(logEventPack), listener);
} else {
LOG.warn("Can't append log events. LogEventPack object is null.");
}
}


@Override
public boolean isSchemaVersionSupported(int version) {
Expand All @@ -192,27 +178,32 @@ public boolean isDeliveryConfirmationRequired() {
* @return the list
* @throws IOException the io exception
*/
protected List<LogEventDto> generateLogEvent(LogEventPack logEventPack, RecordHeader header) throws IOException {
LOG.debug("Generate LogEventDto objects from LogEventPack [{}] and header [{}]", logEventPack, header);
protected List<LogEventDto> generateLogEvent(LogEventPack logEventPack, RecordHeader header)
throws IOException {
LOG.debug("Generate LogEventDto objects from LogEventPack [{}] and header [{}]",
logEventPack, header);
List<LogEventDto> events = new ArrayList<>(logEventPack.getEvents().size());
GenericAvroConverter<GenericRecord> eventConverter = getConverter(logEventPack.getLogSchema().getSchema());
GenericAvroConverter<GenericRecord> headerConverter = getConverter(header.getSchema().toString());
GenericAvroConverter<GenericRecord> eventConverter = getConverter(
logEventPack.getLogSchema().getSchema());
GenericAvroConverter<GenericRecord> headerConverter = getConverter(
header.getSchema().toString());
try {
for (LogEvent logEvent : logEventPack.getEvents()) {
LOG.debug("Convert log events [{}] to dto objects.", logEvent);
if (logEvent == null | logEvent.getLogData() == null) {
if (logEvent == null || logEvent.getLogData() == null) {
continue;
}
LOG.trace("Avro record converter [{}] with log data [{}]", eventConverter, logEvent.getLogData());
LOG.trace("Avro record converter [{}] with log data [{}]",
eventConverter, logEvent.getLogData());
GenericRecord decodedLog = eventConverter.decodeBinary(logEvent.getLogData());
LOG.trace("Avro header record converter [{}]", headerConverter);
String encodedJsonLogHeader = headerConverter.encodeToJson(header);
String encodedJsonLog = eventConverter.encodeToJson(decodedLog);
events.add(new LogEventDto(encodedJsonLogHeader, encodedJsonLog));
}
} catch (IOException e) {
LOG.error("Unexpected IOException while decoding LogEvents", e);
throw e;
} catch (IOException ex) {
LOG.error("Unexpected IOException while decoding LogEvents", ex);
throw ex;
}
return events;
}
Expand All @@ -228,7 +219,7 @@ private GenericAvroConverter<GenericRecord> getConverter(String schema) {
GenericAvroConverter<GenericRecord> genAvroConverter = converters.get(schema);
if (genAvroConverter == null) {
LOG.trace("Create new converter for schema [{}]", schema);
genAvroConverter = new GenericAvroConverter<GenericRecord>(schema);
genAvroConverter = new GenericAvroConverter<>(schema);
converters.put(schema, genAvroConverter);
}
LOG.trace("Get converter [{}] from map.", genAvroConverter);
Expand Down
Expand Up @@ -31,16 +31,16 @@ public class Environment {

private static final String DEFAULT_SERVER_HOME_DIR = ".";

/**
* The Constant LOG.
*/

private static final Logger LOG = LoggerFactory.getLogger(Environment.class);

/**
* The Constant SYSTEM_PROPERTIES.
*/
private static final List<String> SYSTEM_PROPERTIES = Arrays.asList("java.version", "java.vendor", "java.home",
"java.class.path", "java.library.path", "java.io.tmpdir", "java.compiler", "os.name", "os.arch", "os.version",
private static final List<String> SYSTEM_PROPERTIES = Arrays.asList(
"java.version", "java.vendor", "java.home",
"java.class.path", "java.library.path", "java.io.tmpdir",
"java.compiler", "os.name", "os.arch", "os.version",
"user.name", "user.home", "user.dir", SERVER_HOME_DIR);

/**
Expand All @@ -50,7 +50,7 @@ private Environment() {
}

/**
* Logs environment state using {@link Logger}
* Logs environment state using {@link Logger}.
*/
public static void logState() {
LOG.info("Kaa version: {}, commit: {}", Version.PROJECT_VERSION, Version.COMMIT_HASH);
Expand Down
Expand Up @@ -20,7 +20,7 @@
import org.kaaproject.kaa.server.common.zk.operations.OperationsNodeListener;

/**
* Resolves {@link OperationsNodeInfo operations service node} for given entities
* Resolves {@link OperationsNodeInfo operations service node} for given entities.
*
* @author Andrew Shvayka
*/
Expand Down
Expand Up @@ -18,9 +18,7 @@

import java.util.List;

/**
* @author Andrew Shvayka
*/

public final class BootstrapClientSync {

private final int requestId;
Expand Down
Expand Up @@ -18,9 +18,7 @@

import java.util.Set;

/**
* @author Andrew Shvayka
*/

public final class BootstrapServerSync {
private final int requestId;
private final Set<ProtocolConnectionData> protocolList;
Expand Down
Expand Up @@ -18,9 +18,6 @@

import java.util.Arrays;

/**
* @author Andrew Shvayka
*/
public final class ProtocolConnectionData {
private final int accessPointId;
private final ProtocolVersionId protocolVersionId;
Expand Down
Expand Up @@ -1421,6 +1421,8 @@ List<Integer> getAllCtlSchemaVersionsByFqnTenantIdAndApplicationId(
SdkProfileDto getSdkProfile(String sdkProfileId) throws ControlServiceException;

/**
* Get SDK profile by uniq application identifier.
*
* @param applicationId the application id
* @return the list sdk profile dto
* @throws ControlServiceException the control service exception
Expand All @@ -1429,12 +1431,18 @@ List<SdkProfileDto> getSdkProfilesByApplicationId(String applicationId)
throws ControlServiceException;

/**
* Delete SDK profile.
*
* @param sdkProfileId the sdk profile id
* @throws ControlServiceException the control service exception
*/
void deleteSdkProfile(String sdkProfileId) throws ControlServiceException;



/**
* Return true if SDK profile has been already used.
*
* @param token the token
* @return boolean the sdk profile usage
* @throws ControlServiceException the control service exception
Expand Down

0 comments on commit 94d9e16

Please sign in to comment.