Skip to content

Commit

Permalink
HBASE-18038 Rename StoreFile to HStoreFile and add a StoreFile interf…
Browse files Browse the repository at this point in the history
…ace for CP
  • Loading branch information
Apache9 committed Jun 6, 2017
1 parent 8bfa8aa commit ee0f148
Show file tree
Hide file tree
Showing 67 changed files with 1,257 additions and 1,042 deletions.
Expand Up @@ -56,7 +56,7 @@
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
import org.apache.hadoop.hbase.procedure2.ProcedureMetrics; import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
Expand Down Expand Up @@ -605,11 +605,8 @@ private void mergeStoreFiles(
final CacheConfig cacheConf = new CacheConfig(conf, hcd); final CacheConfig cacheConf = new CacheConfig(conf, hcd);
for (StoreFileInfo storeFileInfo: storeFiles) { for (StoreFileInfo storeFileInfo: storeFiles) {
// Create reference file(s) of the region in mergedDir // Create reference file(s) of the region in mergedDir
regionFs.mergeStoreFile( regionFs.mergeStoreFile(mergedRegion, family, new HStoreFile(mfs.getFileSystem(),
mergedRegion, storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true),
family,
new StoreFile(
mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()),
mergedDir); mergedDir);
} }
} }
Expand Down
Expand Up @@ -18,6 +18,8 @@


package org.apache.hadoop.hbase.master.assignment; package org.apache.hadoop.hbase.master.assignment;


import com.google.common.annotations.VisibleForTesting;

import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
Expand Down Expand Up @@ -62,6 +64,7 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
Expand All @@ -71,8 +74,6 @@
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;


import com.google.common.annotations.VisibleForTesting;

/** /**
* The procedure to split a region in a table. * The procedure to split a region in a table.
* Takes lock on the parent region. * Takes lock on the parent region.
Expand Down Expand Up @@ -525,11 +526,9 @@ private Pair<Integer, Integer> splitStoreFiles(
if (storeFiles != null && storeFiles.size() > 0) { if (storeFiles != null && storeFiles.size() > 0) {
final CacheConfig cacheConf = new CacheConfig(conf, hcd); final CacheConfig cacheConf = new CacheConfig(conf, hcd);
for (StoreFileInfo storeFileInfo: storeFiles) { for (StoreFileInfo storeFileInfo: storeFiles) {
StoreFileSplitter sfs = new StoreFileSplitter( StoreFileSplitter sfs =
regionFs, new StoreFileSplitter(regionFs, family.getBytes(), new HStoreFile(mfs.getFileSystem(),
family.getBytes(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType(), true));
new StoreFile(
mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()));
futures.add(threadPool.submit(sfs)); futures.add(threadPool.submit(sfs));
} }
} }
Expand Down
Expand Up @@ -28,7 +28,6 @@
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.Random; import java.util.Random;


import com.google.common.collect.Lists;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
Expand All @@ -51,6 +50,8 @@
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;


import com.google.common.collect.Lists;

/** /**
* <p>This is a best effort load balancer. Given a Cost function F(C) =&gt; x It will * <p>This is a best effort load balancer. Given a Cost function F(C) =&gt; x It will
* randomly try and mutate the cluster to Cprime. If F(Cprime) &lt; F(C) then the * randomly try and mutate the cluster to Cprime. If F(Cprime) &lt; F(C) then the
Expand Down
Expand Up @@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;


/** /**
Expand All @@ -46,7 +47,7 @@ public static CachedMobFile create(FileSystem fs, Path path, Configuration conf,
CacheConfig cacheConf) throws IOException { CacheConfig cacheConf) throws IOException {
// XXX: primaryReplica is only used for constructing the key of block cache so it is not a // XXX: primaryReplica is only used for constructing the key of block cache so it is not a
// critical problem if we pass the wrong value, so here we always pass true. Need to fix later. // critical problem if we pass the wrong value, so here we always pass true. Need to fix later.
StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE, true); StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
return new CachedMobFile(sf); return new CachedMobFile(sf);
} }


Expand Down
Expand Up @@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileScanner;


Expand Down Expand Up @@ -146,7 +147,7 @@ public static MobFile create(FileSystem fs, Path path, Configuration conf, Cache
throws IOException { throws IOException {
// XXX: primaryReplica is only used for constructing the key of block cache so it is not a // XXX: primaryReplica is only used for constructing the key of block cache so it is not a
// critical problem if we pass the wrong value, so here we always pass true. Need to fix later. // critical problem if we pass the wrong value, so here we always pass true. Need to fix later.
StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE, true); StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, true);
return new MobFile(sf); return new MobFile(sf);
} }
} }
Expand Up @@ -69,6 +69,7 @@
import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactor; import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactor;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
Expand Down Expand Up @@ -334,7 +335,7 @@ public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, Table
LOG.debug(fileName + " is an expired file"); LOG.debug(fileName + " is an expired file");
} }
filesToClean filesToClean
.add(new StoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true)); .add(new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true));
} }
} catch (Exception e) { } catch (Exception e) {
LOG.error("Cannot parse the fileName " + fileName, e); LOG.error("Cannot parse the fileName " + fileName, e);
Expand Down Expand Up @@ -722,7 +723,7 @@ private static void validateMobFile(Configuration conf, FileSystem fs, Path path
CacheConfig cacheConfig, boolean primaryReplica) throws IOException { CacheConfig cacheConfig, boolean primaryReplica) throws IOException {
StoreFile storeFile = null; StoreFile storeFile = null;
try { try {
storeFile = new StoreFile(fs, path, conf, cacheConfig, BloomType.NONE, primaryReplica); storeFile = new HStoreFile(fs, path, conf, cacheConfig, BloomType.NONE, primaryReplica);
storeFile.initReader(); storeFile.initReader();
} catch (IOException e) { } catch (IOException e) {
LOG.error("Failed to open mob file[" + path + "], keep it in temp directory.", e); LOG.error("Failed to open mob file[" + path + "], keep it in temp directory.", e);
Expand Down
Expand Up @@ -36,7 +36,6 @@
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future; import java.util.concurrent.Future;


import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
Expand Down Expand Up @@ -74,6 +73,7 @@
import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId; import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartitionId;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanInfo;
import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScanType;
import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.ScannerContext;
Expand All @@ -87,6 +87,8 @@
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;


import com.google.common.annotations.VisibleForTesting;

/** /**
* An implementation of {@link MobCompactor} that compacts the mob files in partitions. * An implementation of {@link MobCompactor} that compacts the mob files in partitions.
*/ */
Expand Down Expand Up @@ -335,7 +337,7 @@ protected List<Path> performCompaction(PartitionedMobCompactionRequest request)
for (CompactionDelPartition delPartition : request.getDelPartitions()) { for (CompactionDelPartition delPartition : request.getDelPartitions()) {
for (Path newDelPath : delPartition.listDelFiles()) { for (Path newDelPath : delPartition.listDelFiles()) {
StoreFile sf = StoreFile sf =
new StoreFile(fs, newDelPath, conf, compactionCacheConfig, BloomType.NONE, true); new HStoreFile(fs, newDelPath, conf, compactionCacheConfig, BloomType.NONE, true);
// pre-create reader of a del file to avoid race condition when opening the reader in each // pre-create reader of a del file to avoid race condition when opening the reader in each
// partition. // partition.
sf.initReader(); sf.initReader();
Expand Down Expand Up @@ -551,7 +553,7 @@ private List<Path> compactMobFilePartition(PartitionedMobCompactionRequest reque
// add the selected mob files and del files into filesToCompact // add the selected mob files and del files into filesToCompact
List<StoreFile> filesToCompact = new ArrayList<>(); List<StoreFile> filesToCompact = new ArrayList<>();
for (int i = offset; i < batch + offset; i++) { for (int i = offset; i < batch + offset; i++) {
StoreFile sf = new StoreFile(fs, files.get(i).getPath(), conf, compactionCacheConfig, StoreFile sf = new HStoreFile(fs, files.get(i).getPath(), conf, compactionCacheConfig,
BloomType.NONE, true); BloomType.NONE, true);
filesToCompact.add(sf); filesToCompact.add(sf);
} }
Expand Down Expand Up @@ -733,7 +735,7 @@ protected List<Path> compactDelFiles(PartitionedMobCompactionRequest request,
continue; continue;
} }
for (int i = offset; i < batch + offset; i++) { for (int i = offset; i < batch + offset; i++) {
batchedDelFiles.add(new StoreFile(fs, delFilePaths.get(i), conf, compactionCacheConfig, batchedDelFiles.add(new HStoreFile(fs, delFilePaths.get(i), conf, compactionCacheConfig,
BloomType.NONE, true)); BloomType.NONE, true));
} }
// compact the del files in a batch. // compact the del files in a batch.
Expand Down
Expand Up @@ -27,41 +27,39 @@


import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
import org.apache.hadoop.hbase.mapreduce.JobUtil; import org.apache.hadoop.hbase.mapreduce.JobUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;


/* /*
* The CompactionTool allows to execute a compaction specifying a: * The CompactionTool allows to execute a compaction specifying a:
Expand Down
Expand Up @@ -58,7 +58,7 @@ protected void createComponents(Configuration conf, Store store, CellComparator
throws IOException { throws IOException {
this.compactionPolicy = new DateTieredCompactionPolicy(conf, store); this.compactionPolicy = new DateTieredCompactionPolicy(conf, store);
this.storeFileManager = this.storeFileManager =
new DefaultStoreFileManager(kvComparator, StoreFile.Comparators.SEQ_ID_MAX_TIMESTAMP, conf, new DefaultStoreFileManager(kvComparator, StoreFileComparators.SEQ_ID_MAX_TIMESTAMP, conf,
compactionPolicy.getConf()); compactionPolicy.getConf());
this.storeFlusher = new DefaultStoreFlusher(conf, store); this.storeFlusher = new DefaultStoreFlusher(conf, store);
this.compactor = new DateTieredCompactor(conf, store); this.compactor = new DateTieredCompactor(conf, store);
Expand Down
Expand Up @@ -69,7 +69,7 @@ protected void createComponents(
createCompactionPolicy(conf, store); createCompactionPolicy(conf, store);
createStoreFlusher(conf, store); createStoreFlusher(conf, store);
storeFileManager = storeFileManager =
new DefaultStoreFileManager(kvComparator, StoreFile.Comparators.SEQ_ID, conf, new DefaultStoreFileManager(kvComparator, StoreFileComparators.SEQ_ID, conf,
compactionPolicy.getConf()); compactionPolicy.getConf());
} }


Expand Down
Expand Up @@ -25,6 +25,7 @@
import java.util.Comparator; import java.util.Comparator;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Optional;


import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
Expand Down Expand Up @@ -172,10 +173,13 @@ public Iterator<StoreFile> updateCandidateFilesForRowKeyBefore(


@Override @Override
public final byte[] getSplitPoint() throws IOException { public final byte[] getSplitPoint() throws IOException {
if (this.storefiles.isEmpty()) { List<StoreFile> storefiles = this.storefiles;
if (storefiles.isEmpty()) {
return null; return null;
} }
return StoreUtils.getLargestFile(this.storefiles).getFileSplitPoint(this.kvComparator); Optional<StoreFile> largestFile = StoreUtils.getLargestFile(storefiles);
return largestFile.isPresent()
? StoreUtils.getFileSplitPoint(largestFile.get(), kvComparator).orElse(null) : null;
} }


@Override @Override
Expand Down
Expand Up @@ -293,7 +293,7 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio
private void validateMobFile(Path path) throws IOException { private void validateMobFile(Path path) throws IOException {
StoreFile storeFile = null; StoreFile storeFile = null;
try { try {
storeFile = new StoreFile(region.getFilesystem(), path, conf, this.mobCacheConfig, storeFile = new HStoreFile(region.getFilesystem(), path, conf, this.mobCacheConfig,
BloomType.NONE, isPrimaryReplicaStore()); BloomType.NONE, isPrimaryReplicaStore());
storeFile.initReader(); storeFile.initReader();
} catch (IOException e) { } catch (IOException e) {
Expand Down
Expand Up @@ -1456,7 +1456,7 @@ public boolean isLoadingCfsOnDemandDefault() {
* time-sensitive thread. * time-sensitive thread.
* *
* @return Vector of all the storage files that the HRegion's component * @return Vector of all the storage files that the HRegion's component
* HStores make use of. It's a list of all HStoreFile objects. Returns empty * HStores make use of. It's a list of all StoreFile objects. Returns empty
* vector if already closed and null if judged that it should not close. * vector if already closed and null if judged that it should not close.
* *
* @throws IOException e * @throws IOException e
Expand Down Expand Up @@ -1497,7 +1497,7 @@ public Map<byte[], List<StoreFile>> close() throws IOException {
* *
* @param abort true if server is aborting (only during testing) * @param abort true if server is aborting (only during testing)
* @return Vector of all the storage files that the HRegion's component * @return Vector of all the storage files that the HRegion's component
* HStores make use of. It's a list of HStoreFile objects. Can be null if * HStores make use of. It's a list of StoreFile objects. Can be null if
* we are not to close at this time or we are already closed. * we are not to close at this time or we are already closed.
* *
* @throws IOException e * @throws IOException e
Expand Down Expand Up @@ -4204,7 +4204,7 @@ protected long replayRecoveredEditsIfAny(final Path regiondir,
Set<StoreFile> fakeStoreFiles = new HashSet<>(files.size()); Set<StoreFile> fakeStoreFiles = new HashSet<>(files.size());
for (Path file: files) { for (Path file: files) {
fakeStoreFiles.add( fakeStoreFiles.add(
new StoreFile(getRegionFileSystem().getFileSystem(), file, this.conf, null, null, true)); new HStoreFile(getRegionFileSystem().getFileSystem(), file, this.conf, null, null, true));
} }
getRegionFileSystem().removeStoreFiles(fakeFamilyName, fakeStoreFiles); getRegionFileSystem().removeStoreFiles(fakeFamilyName, fakeStoreFiles);
} else { } else {
Expand Down
Expand Up @@ -18,13 +18,6 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;


import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;

import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
Expand Down Expand Up @@ -53,7 +46,15 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.FailedArchiveException; import org.apache.hadoop.hbase.backup.FailedArchiveException;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
Expand Down Expand Up @@ -90,6 +91,13 @@
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;


import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;

/** /**
* A Store holds a column family in a Region. Its a memstore and a set of zero * A Store holds a column family in a Region. Its a memstore and a set of zero
* or more StoreFiles, which stretch backwards over time. * or more StoreFiles, which stretch backwards over time.
Expand Down Expand Up @@ -455,12 +463,12 @@ public HColumnDescriptor getFamily() {
*/ */
@Override @Override
public long getMaxSequenceId() { public long getMaxSequenceId() {
return StoreFile.getMaxSequenceIdInList(this.getStorefiles()); return StoreUtils.getMaxSequenceIdInList(this.getStorefiles());
} }


@Override @Override
public long getMaxMemstoreTS() { public long getMaxMemstoreTS() {
return StoreFile.getMaxMemstoreTSInList(this.getStorefiles()); return StoreUtils.getMaxMemstoreTSInList(this.getStorefiles());
} }


/** /**
Expand Down Expand Up @@ -655,7 +663,7 @@ private StoreFile createStoreFileAndReader(final Path p) throws IOException {


private StoreFile createStoreFileAndReader(final StoreFileInfo info) throws IOException { private StoreFile createStoreFileAndReader(final StoreFileInfo info) throws IOException {
info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf, StoreFile storeFile = new HStoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
this.family.getBloomFilterType(), isPrimaryReplicaStore()); this.family.getBloomFilterType(), isPrimaryReplicaStore());
storeFile.initReader(); storeFile.initReader();
return storeFile; return storeFile;
Expand Down

0 comments on commit ee0f148

Please sign in to comment.