Skip to content

Commit

Permalink
[CARBONDATA-3360]fix NullPointerException in delete and clean files o…
Browse files Browse the repository at this point in the history
…peration

Problem:
when delete is failed due to hdfs quota exceeded or disk space is full, then tableUpdateStatus.write will be present in store.
So after that if clean files operation is done, we were trying to assign null to primitive type long, which will throw runtime exception, and .write file will not be deleted, since we consider it as invalid file.

Solution:
if .write file is present, then we do not fail clean files, we check for max query timeout for tableUpdateStatus.write file and then delete these .write files for any clean files operation after that.

This closes #3191
  • Loading branch information
akashrn5 authored and ravipesala committed May 8, 2019
1 parent affb40f commit 3268a45
Showing 1 changed file with 34 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -673,7 +673,8 @@ public static boolean isMaxQueryTimeoutExceeded(long fileTimestamp) {
private static boolean compareTimestampsAndDelete(
CarbonFile invalidFile,
boolean forceDelete, boolean isUpdateStatusFile) {
long fileTimestamp = 0L;
boolean isDeleted = false;
Long fileTimestamp;

if (isUpdateStatusFile) {
fileTimestamp = CarbonUpdateUtil.getTimeStampAsLong(invalidFile.getName()
Expand All @@ -683,21 +684,40 @@ private static boolean compareTimestampsAndDelete(
CarbonTablePath.DataFileUtil.getTimeStampFromFileName(invalidFile.getName()));
}

// if the timestamp of the file is more than the current time by query execution timeout.
// then delete that file.
if (CarbonUpdateUtil.isMaxQueryTimeoutExceeded(fileTimestamp) || forceDelete) {
// delete the files.
try {
LOGGER.info("deleting the invalid file : " + invalidFile.getName());
CarbonUtil.deleteFoldersAndFiles(invalidFile);
return true;
} catch (IOException e) {
LOGGER.error("error in clean up of compacted files." + e.getMessage(), e);
} catch (InterruptedException e) {
LOGGER.error("error in clean up of compacted files." + e.getMessage(), e);
// This check is because, when there are some invalid files like tableStatusUpdate.write files
// present in store [[which can happen during delete or update if the disk is full or hdfs quota
// is finished]] then fileTimestamp will be null, in that case check for max query out and
// delete the .write file after timeout
if (fileTimestamp == null) {
String tableUpdateStatusFilename = invalidFile.getName();
if (tableUpdateStatusFilename.endsWith(".write")) {
long tableUpdateStatusFileTimeStamp = Long.parseLong(
CarbonTablePath.DataFileUtil.getTimeStampFromFileName(tableUpdateStatusFilename));
if (isMaxQueryTimeoutExceeded(tableUpdateStatusFileTimeStamp)) {
isDeleted = deleteInvalidFiles(invalidFile);
}
}
} else {
// if the timestamp of the file is more than the current time by query execution timeout.
// then delete that file.
if (CarbonUpdateUtil.isMaxQueryTimeoutExceeded(fileTimestamp) || forceDelete) {
isDeleted = deleteInvalidFiles(invalidFile);
}
}
return false;
return isDeleted;
}

private static boolean deleteInvalidFiles(CarbonFile invalidFile) {
boolean isDeleted;
try {
LOGGER.info("deleting the invalid file : " + invalidFile.getName());
CarbonUtil.deleteFoldersAndFiles(invalidFile);
isDeleted = true;
} catch (IOException | InterruptedException e) {
LOGGER.error("error in clean up of invalid files." + e.getMessage(), e);
isDeleted = false;
}
return isDeleted;
}

public static boolean isBlockInvalid(SegmentStatus blockStatus) {
Expand Down

0 comments on commit 3268a45

Please sign in to comment.