Skip to content

Commit

Permalink
[HOTFIX] change log level for data loading
Browse files Browse the repository at this point in the history
In current data loading, many log meant for debugging purpose is logged as INFO log, in order to reduce the entry of them, In this PR they are changed to DEBUG level.

This closes #2911
  • Loading branch information
jackylk authored and xuchuanyin committed Nov 9, 2018
1 parent 6707db6 commit 6f3b9d3
Show file tree
Hide file tree
Showing 10 changed files with 73 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -258,9 +258,12 @@ int getTableBlockSizeInMB() {
}
if (null == tableBlockSize) {
tableBlockSize = CarbonCommonConstants.BLOCK_SIZE_DEFAULT_VAL;
LOGGER.info("Table block size not specified for " + getTableUniqueName()
+ ". Therefore considering the default value "
+ CarbonCommonConstants.BLOCK_SIZE_DEFAULT_VAL + " MB");
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Table block size not specified for " + getTableUniqueName() +
". Therefore considering the default value " +
CarbonCommonConstants.BLOCK_SIZE_DEFAULT_VAL + " MB");
}
}
return Integer.parseInt(tableBlockSize);
}
Expand Down
39 changes: 22 additions & 17 deletions core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
Original file line number Diff line number Diff line change
Expand Up @@ -2474,7 +2474,7 @@ public static Map<String, Long> calculateDataIndexSize(CarbonTable carbonTable,
lockAcquired = carbonLock.lockWithRetries();
}
if (lockAcquired) {
LOGGER.info("Acquired lock for table for table status updation");
LOGGER.debug("Acquired lock for table for table status updation");
String metadataPath = carbonTable.getMetadataPath();
LoadMetadataDetails[] loadMetadataDetails =
SegmentStatusManager.readLoadMetadata(metadataPath);
Expand All @@ -2488,7 +2488,7 @@ public static Map<String, Long> calculateDataIndexSize(CarbonTable carbonTable,
// If it is old segment, need to calculate data size and index size again
if (null == dsize || null == isize) {
needUpdate = true;
LOGGER.info("It is an old segment, need calculate data size and index size again");
LOGGER.debug("It is an old segment, need calculate data size and index size again");
HashMap<String, Long> map = CarbonUtil.getDataSizeAndIndexSize(
identifier.getTablePath(), loadMetadataDetail.getLoadName());
dsize = String.valueOf(map.get(CarbonCommonConstants.CARBON_TOTAL_DATA_SIZE));
Expand Down Expand Up @@ -2524,7 +2524,7 @@ public static Map<String, Long> calculateDataIndexSize(CarbonTable carbonTable,
}
} finally {
if (carbonLock.unlock()) {
LOGGER.info("Table unlocked successfully after table status updation");
LOGGER.debug("Table unlocked successfully after table status updation");
} else {
LOGGER.error("Unable to unlock Table lock for table during table status updation");
}
Expand Down Expand Up @@ -2727,7 +2727,8 @@ public static void copyCarbonDataFileToCarbonStorePath(String localFilePath,
String carbonDataDirectoryPath, long fileSizeInBytes)
throws CarbonDataWriterException {
long copyStartTime = System.currentTimeMillis();
LOGGER.info("Copying " + localFilePath + " --> " + carbonDataDirectoryPath);
LOGGER.info(String.format("Copying %s to %s, operation id %d", localFilePath,
carbonDataDirectoryPath, copyStartTime));
try {
CarbonFile localCarbonFile =
FileFactory.getCarbonFile(localFilePath, FileFactory.getFileType(localFilePath));
Expand All @@ -2740,9 +2741,8 @@ public static void copyCarbonDataFileToCarbonStorePath(String localFilePath,
throw new CarbonDataWriterException(
"Problem while copying file from local store to carbon store", e);
}
LOGGER.info(
"Total copy time (ms) to copy file " + localFilePath + " is " + (System.currentTimeMillis()
- copyStartTime));
LOGGER.info(String.format("Total copy time is %d ms, operation id %d",
System.currentTimeMillis() - copyStartTime, copyStartTime));
}

/**
Expand Down Expand Up @@ -2797,10 +2797,12 @@ private static long getMaxOfBlockAndFileSize(long blockSize, long fileSize) {
String readableBlockSize = ByteUtil.convertByteToReadable(blockSize);
String readableFileSize = ByteUtil.convertByteToReadable(fileSize);
String readableMaxSize = ByteUtil.convertByteToReadable(maxSize);
LOGGER.info(
"The configured block size is " + readableBlockSize + ", the actual carbon file size is "
+ readableFileSize + ", choose the max value " + readableMaxSize
+ " as the block size on HDFS");
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
"The configured block size is " + readableBlockSize + ", the actual carbon file size is "
+ readableFileSize + ", choose the max value " + readableMaxSize
+ " as the block size on HDFS");
}
return maxSize;
}

Expand Down Expand Up @@ -3117,10 +3119,11 @@ public static Map<String, LocalDictionaryGenerator> getLocalDictionaryModel(
}
}
if (islocalDictEnabled) {
LOGGER.info("Local dictionary is enabled for table: " + carbonTable.getTableUniqueName());
LOGGER.info(
"Local dictionary threshold for table: " + carbonTable.getTableUniqueName() + " is: "
+ carbonTable.getLocalDictionaryThreshold());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Local dictionary is enabled for table: " + carbonTable.getTableUniqueName());
LOGGER.debug(String.format("Local dictionary threshold for table %s is %d",
carbonTable.getTableUniqueName(), carbonTable.getLocalDictionaryThreshold()));
}
Iterator<Map.Entry<String, LocalDictionaryGenerator>> iterator =
columnLocalDictGenMap.entrySet().iterator();
StringBuilder stringBuilder = new StringBuilder();
Expand All @@ -3129,8 +3132,10 @@ public static Map<String, LocalDictionaryGenerator> getLocalDictionaryModel(
stringBuilder.append(next.getKey());
stringBuilder.append(',');
}
LOGGER.info("Local dictionary will be generated for the columns:" + stringBuilder.toString()
+ " for table: " + carbonTable.getTableUniqueName());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(String.format("Local dictionary will be generated for the columns: %s for"
+ " table %s", stringBuilder.toString(), carbonTable.getTableUniqueName()));
}
}
return columnLocalDictGenMap;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,13 @@ public DataField[] getOutput() {
* @throws IOException
*/
public void initialize() throws IOException {
if (LOGGER.isInfoEnabled()) {
if (LOGGER.isDebugEnabled()) {
// This thread prints the rows processed in each step for every 10 seconds.
new Thread() {
@Override public void run() {
while (!closed) {
try {
LOGGER.info("Rows processed in step " + getStepName() + " : " + rowCounter.get());
LOGGER.debug("Rows processed in step " + getStepName() + " : " + rowCounter.get());
Thread.sleep(10000);
} catch (InterruptedException e) {
//ignore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ public void execute(CarbonLoadModel loadModel, String[] storeLocation,
// check and remove any bad record key from bad record entry logger static map
if (CarbonBadRecordUtil.hasBadRecord(loadModel)) {
LOGGER.error("Data Load is partially success for table " + loadModel.getTableName());
} else {
LOGGER.info("Data loading is successful for table " + loadModel.getTableName());
}
} catch (CarbonDataLoadingException e) {
if (e instanceof BadRecordFoundException) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -200,11 +200,13 @@ private void finish(CarbonFactHandler dataHandler, int iteratorIndex) {
exception = new CarbonDataWriterException(
"Failed for table: " + tableName + " in finishing data handler", e);
}
LOGGER.info("Record Processed For table: " + tableName);
String logMessage =
"Finished Carbon DataWriterProcessorStepImpl: Read: " + readCounter[iteratorIndex]
+ ": Write: " + readCounter[iteratorIndex];
LOGGER.info(logMessage);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Record Processed For table: " + tableName);
String logMessage =
"Finished Carbon DataWriterProcessorStepImpl: Read: " + readCounter[iteratorIndex] +
": Write: " + readCounter[iteratorIndex];
LOGGER.debug(logMessage);
}
CarbonTimeStatisticsFactory.getLoadStatisticsInstance().recordTotalRecords(rowCounter.get());
try {
processingComplete(dataHandler);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,11 +194,11 @@ public void finish(CarbonFactHandler dataHandler) {
configuration.getTableIdentifier().getCarbonTableIdentifier();
String tableName = tableIdentifier.getTableName();
dataHandler.finish();
LOGGER.info("Record Processed For table: " + tableName);
String logMessage =
"Finished Carbon DataWriterProcessorStepImpl: Read: " + readCounter + ": Write: "
+ rowCounter.get();
LOGGER.info(logMessage);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Record Processed For table: " + tableName);
LOGGER.debug("Finished Carbon DataWriterProcessorStepImpl: Read: " + readCounter +
": Write: " + rowCounter.get());
}
CarbonTimeStatisticsFactory.getLoadStatisticsInstance().recordTotalRecords(rowCounter.get());
processingComplete(dataHandler);
CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
Expand Down Expand Up @@ -235,7 +235,7 @@ public void processRow(CarbonRow row, CarbonFactHandler dataHandler) {
super.close();
if (listener != null) {
try {
LOGGER.info("closing all the DataMap writers registered to DataMap writer listener");
LOGGER.debug("closing all the DataMap writers registered to DataMap writer listener");
listener.finish();
} catch (IOException e) {
LOGGER.error("error while closing the datamap writers", e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,9 @@ public CarbonFactDataHandlerColumnar(CarbonFactDataHandlerModel model) {
}
}

LOGGER.info("Columns considered as NoInverted Index are " + noInvertedIdxCol.toString());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Columns considered as NoInverted Index are " + noInvertedIdxCol.toString());
}
}

private void initParameters(CarbonFactDataHandlerModel model) {
Expand All @@ -146,7 +148,7 @@ private void initParameters(CarbonFactDataHandlerModel model) {
.getBucketId()));
producerExecutorServiceTaskList =
new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
LOGGER.info("Initializing writer executors");
LOGGER.debug("Initializing writer executors");
consumerExecutorService = Executors.newFixedThreadPool(1, new CarbonThreadFactory(
"ConsumerPool_" + System.nanoTime() + ":" + model.getTableName() + ", range: " + model
.getBucketId()));
Expand Down Expand Up @@ -210,7 +212,10 @@ public void addDataToStore(CarbonRow row) throws CarbonDataWriterException {
blockletProcessingCount.incrementAndGet();
// set the entry count to zero
processedDataCount += entryCount;
LOGGER.info("Total Number Of records added to store: " + processedDataCount);

if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Total Number Of records added to store: " + processedDataCount);
}
dataRows = new ArrayList<>(this.pageSize);
this.entryCount = 0;
} catch (InterruptedException e) {
Expand Down Expand Up @@ -256,7 +261,7 @@ private boolean isVarcharColumnFull(CarbonRow row) {
}
if (SnappyCompressor.MAX_BYTE_TO_COMPRESS -
(varcharColumnSizeInByte[i] + dataRows.size() * 4) < (2 << 20)) {
LOGGER.info("Limited by varchar column, page size is " + dataRows.size());
LOGGER.debug("Limited by varchar column, page size is " + dataRows.size());
// re-init for next page
varcharColumnSizeInByte = new int[model.getVarcharDimIdxInNoDict().size()];
return true;
Expand Down Expand Up @@ -284,7 +289,9 @@ private TablePage processDataRows(List<CarbonRow> dataRows)

tablePage.encode();

LOGGER.info("Number Of records processed: " + dataRows.size());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Number Of records processed: " + dataRows.size());
}
return tablePage;
}

Expand All @@ -302,14 +309,18 @@ public void finish() throws CarbonDataWriterException {
if (producerExecutorService.isShutdown()) {
return;
}
LOGGER.info("Started Finish Operation");
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Started Finish Operation");
}
try {
semaphore.acquire();
producerExecutorServiceTaskList.add(producerExecutorService
.submit(new Producer(tablePageList, dataRows, ++writerTaskSequenceCounter, true)));
blockletProcessingCount.incrementAndGet();
processedDataCount += entryCount;
LOGGER.info("Total Number Of records added to store: " + processedDataCount);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Total Number Of records added to store: " + processedDataCount);
}
closeWriterExecutionService(producerExecutorService);
processWriteTaskSubmitList(producerExecutorServiceTaskList);
processingComplete = true;
Expand Down Expand Up @@ -377,7 +388,9 @@ public void closeHandler() throws CarbonDataWriterException {
consumerExecutorService.shutdownNow();
processWriteTaskSubmitList(consumerExecutorServiceTaskList);
this.dataWriter.writeFooter();
LOGGER.info("All blocklets have been finished writing");
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("All blocklets have been finished writing");
}
// close all the open stream for both the files
this.dataWriter.closeWriter();
}
Expand All @@ -402,12 +415,12 @@ private void setWritingConfiguration() throws CarbonDataWriterException {
pageSize :
CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
}
LOGGER.info("Number of rows per column page is configured as pageSize = " + pageSize);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Number of rows per column page is configured as pageSize = " + pageSize);
}
dataRows = new ArrayList<>(this.pageSize);

if (model.getVarcharDimIdxInNoDict().size() > 0) {
LOGGER.info("Number of rows per column blocklet is constrained by pageSize and actual size " +
"of long string column(s)");
varcharColumnSizeInByte = new int[model.getVarcharDimIdxInNoDict().size()];
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ protected void commitCurrentFile(boolean copyInCurrentThread) {
.add(executorService.submit(new CompleteHdfsBackendThread(carbonDataFileTempPath)));
}
} catch (IOException e) {
LOGGER.error("Failed to delete carbondata file from temp location" + e.getMessage());
LOGGER.error(e);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,11 @@ protected void writeFooterToFile() throws CarbonDataWriterException {
addPageData(tablePage);
}

LOGGER.info("Number of Pages for blocklet is: " + blockletDataHolder.getNumberOfPagesAdded()
+ " :Rows Added: " + blockletDataHolder.getTotalRows());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Number of Pages for blocklet is: " +
blockletDataHolder.getNumberOfPagesAdded() +
" :Rows Added: " + blockletDataHolder.getTotalRows());
}

// write the data
writeBlockletToFile();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,6 @@ public static SortScopeOptions.SortScope getSortScope(CarbonDataLoadConfiguratio
configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_SORT_SCOPE)
.toString());
}
LOGGER.info("sort scope is set to " + sortScope);
} catch (Exception e) {
sortScope = SortScopeOptions.getSortScope(CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT);
LOGGER.warn("Exception occured while resolving sort scope. " +
Expand Down

0 comments on commit 6f3b9d3

Please sign in to comment.