Skip to content

Commit

Permalink
HBASE-21905: [FIFOCompaction] Don't compact already inflight store fi…
Browse files Browse the repository at this point in the history
…les (apache#1829)

This one surfaced as a flake test but turns out to be a legit bug
in FIFOCompaction code. FifoCompaction does not check if an empty
store file is already being compacted by an in-flight compaction
request and still enqueues. It can potentially race with a running
compaction (as in this test case, see jira for the exact exception).

Fixes the edge case and cleans up the test code a bit.

Signed-off-by: Andrew Purtell <apurtell@apache.org>
  • Loading branch information
bharathv authored and HomminLee committed Jun 12, 2020
1 parent b33966f commit c33b190
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 26 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,11 @@ public boolean needsCompaction(Collection<HStoreFile> storeFiles,
}

/**
* The FIFOCompactionPolicy only choose those TTL expired HFiles as the compaction candidates. So
* if all HFiles are TTL expired, then the compaction will generate a new empty HFile. While its
* max timestamp will be Long.MAX_VALUE. If not considered separately, the HFile will never be
* archived because its TTL will be never expired. So we'll check the empty store file separately.
* (See HBASE-21504)
* The FIFOCompactionPolicy only choose the TTL expired store files as the compaction candidates.
* If all the store files are TTL expired, then the compaction will generate a new empty file.
* While its max timestamp will be Long.MAX_VALUE. If not considered separately, the store file
* will never be archived because its TTL will be never expired. So we'll check the empty store
* file separately (See HBASE-21504).
*/
private boolean isEmptyStoreFile(HStoreFile sf) {
return sf.getReader().getEntries() == 0;
Expand Down Expand Up @@ -130,7 +130,7 @@ private Collection<HStoreFile> getExpiredStores(Collection<HStoreFile> files,
long currentTime = EnvironmentEdgeManager.currentTime();
Collection<HStoreFile> expiredStores = new ArrayList<>();
for (HStoreFile sf : files) {
if (isEmptyStoreFile(sf)) {
if (isEmptyStoreFile(sf) && !filesCompacting.contains(sf)) {
expiredStores.add(sf);
continue;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
Expand Down Expand Up @@ -58,6 +59,7 @@
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.ExpectedException;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;

@Category({ RegionServerTests.class, MediumTests.class })
public class TestFIFOCompactionPolicy {
Expand Down Expand Up @@ -216,23 +218,18 @@ public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
TEST_UTIL.getAdmin().flush(tableName); // HFile-0
put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
table.put(put);
final int testWaitTimeoutMs = 20000;
TEST_UTIL.getAdmin().flush(tableName); // HFile-1

HStore store = getStoreWithName(tableName);
Assert.assertNotNull(store);
HStore store = Preconditions.checkNotNull(getStoreWithName(tableName));
Assert.assertEquals(2, store.getStorefilesCount());

TEST_UTIL.getAdmin().majorCompact(tableName);
for (int i = 0; i < 100; i++) {
if (store.getStorefilesCount() > 1) {
Thread.sleep(100);
} else {
break;
}
}
TEST_UTIL.waitFor(testWaitTimeoutMs,
(Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);

Assert.assertEquals(1, store.getStorefilesCount());
HStoreFile sf = store.getStorefiles().iterator().next();
Assert.assertNotNull(sf);
HStoreFile sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
Assert.assertEquals(0, sf.getReader().getEntries());

put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
Expand All @@ -241,17 +238,11 @@ public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
Assert.assertEquals(2, store.getStorefilesCount());

TEST_UTIL.getAdmin().majorCompact(tableName);
for (int i = 0; i < 100; i++) {
if (store.getStorefilesCount() > 1) {
Thread.sleep(100);
} else {
break;
}
}
TEST_UTIL.waitFor(testWaitTimeoutMs,
(Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);

Assert.assertEquals(1, store.getStorefilesCount());
sf = store.getStorefiles().iterator().next();
Assert.assertNotNull(sf);
sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
Assert.assertEquals(0, sf.getReader().getEntries());
}
}

0 comments on commit c33b190

Please sign in to comment.