Skip to content

Commit

Permalink
HDFS-8372. Erasure coding: compute storage type quotas for striped fi…
Browse files Browse the repository at this point in the history
…les, to be consistent with HDFS-8327. Contributed by Zhe Zhang.
  • Loading branch information
Jing9 authored and Zhe Zhang committed May 26, 2015
1 parent 8d3030f commit 97a2396
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 26 deletions.
3 changes: 3 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
Expand Up @@ -198,3 +198,6 @@

HDFS-7678. Erasure coding: DFSInputStream with decode functionality (pread).
(Zhe Zhang)

HDFS-8372. Erasure coding: compute storage type quotas for striped files,
to be consistent with HDFS-8327. (Zhe Zhang via jing9)
Expand Up @@ -21,6 +21,7 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;

/**
* Feature for file with striped blocks
Expand Down Expand Up @@ -78,20 +79,23 @@ void addBlock(BlockInfoStriped newBlock) {
}
}

boolean removeLastBlock(Block oldblock) {
BlockInfoStripedUnderConstruction removeLastBlock(
Block oldblock) {
if (blocks == null || blocks.length == 0) {
return false;
return null;
}
int newSize = blocks.length - 1;
if (!blocks[newSize].equals(oldblock)) {
return false;
return null;
}

BlockInfoStripedUnderConstruction uc =
(BlockInfoStripedUnderConstruction) blocks[newSize];
//copy to a new list
BlockInfoStriped[] newlist = new BlockInfoStriped[newSize];
System.arraycopy(blocks, 0, newlist, 0, newSize);
setBlocks(newlist);
return true;
return uc;
}

void truncateStripedBlocks(int n) {
Expand Down
Expand Up @@ -43,6 +43,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
Expand Down Expand Up @@ -295,7 +296,7 @@ public void convertLastBlockToUC(BlockInfo lastBlock,
* Remove a block from the block list. This block should be
* the last one on the list.
*/
BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) {
BlockInfoUnderConstruction removeLastBlock(Block oldblock) {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
FileWithStripedBlocksFeature sb = getStripedBlocksFeature();
Expand All @@ -317,7 +318,7 @@ BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) {
return uc;
} else {
assert hasNoContiguousBlock();
return null;
return sb.removeLastBlock(oldblock);
}
}

Expand Down Expand Up @@ -676,7 +677,7 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
final long ssDeltaNoReplication;
short replication;
if (isStriped()) {
return computeQuotaUsageWithStriped(bsps, counts);
return computeQuotaUsageWithStriped(bsp, counts);
}

if (last < lastSnapshotId) {
Expand All @@ -702,11 +703,15 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
}

/**
* Compute quota of striped file
* Compute quota of striped file. Note that currently EC files do not support
* append/hflush/hsync, thus the file length recorded in snapshots should be
* the same with the current file length.
*/
public final QuotaCounts computeQuotaUsageWithStriped(
BlockStoragePolicySuite bsps, QuotaCounts counts) {
return null;
BlockStoragePolicy bsp, QuotaCounts counts) {
counts.addNameSpace(1);
counts.add(storagespaceConsumed(bsp));
return counts;
}

@Override
Expand Down Expand Up @@ -828,21 +833,44 @@ public final long computeFileSize(boolean includesLastUcBlock,
* Use preferred block size for the last block if it is under construction.
*/
public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
QuotaCounts counts = new QuotaCounts.Builder().build();
if (isStriped()) {
return storagespaceConsumedWithStriped(bsp);
return storagespaceConsumedWithStriped();
} else {
return storagespaceConsumedWithReplication(bsp);
}
}

public final QuotaCounts storagespaceConsumedWithStriped(
BlockStoragePolicy bsp) {
return null;
// TODO: support EC with heterogeneous storage
public final QuotaCounts storagespaceConsumedWithStriped() {
QuotaCounts counts = new QuotaCounts.Builder().build();
BlockInfo[] blockInfos = getBlocks();
if (blockInfos == null || blockInfos.length == 0) {
return counts;
}

long size;
final int last = blockInfos.length - 1;
if (blockInfos[last] instanceof BlockInfoStripedUnderConstruction) {
BlockInfoStripedUnderConstruction blockInfoStripedUC
=(BlockInfoStripedUnderConstruction)blockInfos[last];
size = getPreferredBlockSize() * blockInfoStripedUC.getTotalBlockNum();
} else {
// In case of last block is complete
BlockInfoStriped blockInfoStriped = (BlockInfoStriped)blockInfos[last];
size = blockInfoStriped.spaceConsumed();
}
for (int i = 0; i < last; i++) {
BlockInfoStriped blockInfoStriped = (BlockInfoStriped)blockInfos[i];
size += blockInfoStriped.spaceConsumed();
}

counts.addStorageSpace(size);
return counts;
}

public final QuotaCounts storagespaceConsumedWithReplication(
BlockStoragePolicy bsp) { QuotaCounts counts = new QuotaCounts.Builder().build();
BlockStoragePolicy bsp) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final Iterable<BlockInfo> blocks;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
Expand Down Expand Up @@ -965,6 +993,7 @@ public long collectBlocksBeyondMax(final long max,
/**
* compute the quota usage change for a truncate op
* @param newLength the length for truncation
* TODO: properly handle striped blocks (HDFS-7622)
**/
void computeQuotaDeltaForTruncate(
long newLength, BlockStoragePolicy bsps,
Expand Down
Expand Up @@ -27,6 +27,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
Expand All @@ -45,6 +46,11 @@ public class TestStripedINodeFile {
private static final PermissionStatus perm = new PermissionStatus(
"userName", null, FsPermission.getDefault());

private final BlockStoragePolicySuite defaultSuite =
BlockStoragePolicySuite.createDefaultSuite();
private final BlockStoragePolicy defaultPolicy =
defaultSuite.getDefaultPolicy();

private static INodeFile createStripedINodeFile() {
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
null, (short)0, 1024L, HdfsServerConstants.COLD_STORAGE_POLICY_ID);
Expand Down Expand Up @@ -109,8 +115,8 @@ public void testBlockStripedConsumedSpace()
// a. <Cell Size> * (<Num Stripes> - 1) * <Total Block Num> = 0
// b. <Num Bytes> % <Num Bytes per Stripes> = 1
// c. <Last Stripe Length> * <Parity Block Num> = 1 * 3
assertEquals(4, inf.storagespaceConsumedWithStriped(null));
assertEquals(4, inf.storagespaceConsumed(null));
assertEquals(4, inf.storagespaceConsumedWithStriped().getStorageSpace());
assertEquals(4, inf.storagespaceConsumed(defaultPolicy).getStorageSpace());
}

@Test
Expand All @@ -134,8 +140,8 @@ public void testMultipleBlockStripedConsumedSpace()
inf.addBlock(blockInfoStriped1);
inf.addBlock(blockInfoStriped2);
// This is the double size of one block in above case.
assertEquals(4 * 2, inf.storagespaceConsumedWithStriped(null));
assertEquals(4 * 2, inf.storagespaceConsumed(null));
assertEquals(4 * 2, inf.storagespaceConsumedWithStriped().getStorageSpace());
assertEquals(4 * 2, inf.storagespaceConsumed(defaultPolicy).getStorageSpace());
}

@Test
Expand Down Expand Up @@ -188,10 +194,8 @@ public void testBlockStripedComputeQuotaUsage()
blockInfoStriped.setNumBytes(100);
inf.addBlock(blockInfoStriped);

BlockStoragePolicySuite suite =
BlockStoragePolicySuite.createDefaultSuite();
QuotaCounts counts =
inf.computeQuotaUsageWithStriped(suite,
inf.computeQuotaUsageWithStriped(defaultPolicy,
new QuotaCounts.Builder().build());
assertEquals(1, counts.getNameSpace());
// The total consumed space is the sum of
Expand All @@ -215,10 +219,8 @@ public void testBlockStripedUCComputeQuotaUsage()
bInfoStripedUC.setNumBytes(100);
inf.addBlock(bInfoStripedUC);

BlockStoragePolicySuite suite
= BlockStoragePolicySuite.createDefaultSuite();
QuotaCounts counts
= inf.computeQuotaUsageWithStriped(suite,
= inf.computeQuotaUsageWithStriped(defaultPolicy,
new QuotaCounts.Builder().build());
assertEquals(1024, inf.getPreferredBlockSize());
assertEquals(1, counts.getNameSpace());
Expand Down

0 comments on commit 97a2396

Please sign in to comment.