Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
Expand All @@ -65,7 +64,6 @@
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.metadata.DeleteTransactionStore;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
Expand Down Expand Up @@ -646,15 +644,14 @@ private void updateMetaData(KeyValueContainerData containerData,
pendingDeleteBlocks);

// Update pending deletion blocks count, blocks bytes and delete transaction ID in in-memory container status.
// Persist pending bytes only if the feature is finalized.
if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION) && delTX.hasTotalBlockSize()) {
long pendingBytes = containerData.getBlockPendingDeletionBytes();
long pendingBytes = containerData.getBlockPendingDeletionBytes();
if (delTX.hasTotalBlockSize()) {
pendingBytes += delTX.getTotalBlockSize();
metadataTable
.putWithBatch(batchOperation,
containerData.getPendingDeleteBlockBytesKey(),
pendingBytes);
}
metadataTable
.putWithBatch(batchOperation,
containerData.getPendingDeleteBlockBytesKey(),
pendingBytes);
containerData.incrPendingDeletionBlocks(newDeletionBlocks,
delTX.hasTotalBlockSize() ? delTX.getTotalBlockSize() : 0);
containerData.updateDeleteTransactionId(delTX.getTxID());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,13 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.yaml.snakeyaml.nodes.Tag;

/**
Expand Down Expand Up @@ -387,10 +385,8 @@ public void updateAndCommitDBCounters(DBHandle db,
metadataTable.putWithBatch(batchOperation, getBlockCountKey(), b.getCount() - deletedBlockCount);
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockCountKey(),
b.getPendingDeletion() - deletedBlockCount);
if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) {
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockBytesKey(),
b.getPendingDeletionBytes() - releasedBytes);
}
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockBytesKey(),
b.getPendingDeletionBytes() - releasedBytes);

db.getStore().getBatchHandler().commitBatchOperation(batchOperation);
}
Expand All @@ -401,9 +397,7 @@ public void resetPendingDeleteBlockCount(DBHandle db) throws IOException {
// Reset the metadata on disk.
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
metadataTable.put(getPendingDeleteBlockCountKey(), 0L);
if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) {
metadataTable.put(getPendingDeleteBlockBytesKey(), 0L);
}
metadataTable.put(getPendingDeleteBlockBytesKey(), 0L);
}

// NOTE: Below are some helper functions to format keys according
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.server.JsonUtils;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.OzoneConsts;
Expand All @@ -46,7 +45,6 @@
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreWithIncrementalChunkList;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -438,30 +436,28 @@ private boolean checkAndRepair(ObjectNode parent,
errors.add(deleteCountError);
}

if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) {
// check and repair if db delete bytes mismatches delete transaction
JsonNode pendingDeletionBlockSize = dBMetadata.path(
OzoneConsts.PENDING_DELETE_BLOCK_BYTES);
final long dbDeleteBytes = jsonToLong(pendingDeletionBlockSize);
final JsonNode pendingDeleteBytesAggregate = aggregates.path(PendingDelete.BYTES);
final long deleteTransactionBytes = jsonToLong(pendingDeleteBytesAggregate);
if (dbDeleteBytes != deleteTransactionBytes) {
passed = false;
final BooleanSupplier deleteBytesRepairAction = () -> {
final String key = containerData.getPendingDeleteBlockBytesKey();
try {
metadataTable.put(key, deleteTransactionBytes);
} catch (IOException ex) {
LOG.error("Failed to reset {} for container {}.",
key, containerData.getContainerID(), ex);
}
return false;
};
final ObjectNode deleteBytesError = buildErrorAndRepair(
"dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
pendingDeleteBytesAggregate, pendingDeletionBlockSize, deleteBytesRepairAction);
errors.add(deleteBytesError);
}
// check and repair if db delete bytes mismatches delete transaction
JsonNode pendingDeletionBlockSize = dBMetadata.path(
OzoneConsts.PENDING_DELETE_BLOCK_BYTES);
final long dbDeleteBytes = jsonToLong(pendingDeletionBlockSize);
final JsonNode pendingDeleteBytesAggregate = aggregates.path(PendingDelete.BYTES);
final long deleteTransactionBytes = jsonToLong(pendingDeleteBytesAggregate);
if (dbDeleteBytes != deleteTransactionBytes) {
passed = false;
final BooleanSupplier deleteBytesRepairAction = () -> {
final String key = containerData.getPendingDeleteBlockBytesKey();
try {
metadataTable.put(key, deleteTransactionBytes);
} catch (IOException ex) {
LOG.error("Failed to reset {} for container {}.",
key, containerData.getContainerID(), ex);
}
return false;
};
final ObjectNode deleteBytesError = buildErrorAndRepair(
"dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
pendingDeleteBytesAggregate, pendingDeletionBlockSize, deleteBytesRepairAction);
errors.add(deleteBytesError);
}

// check and repair chunks dir.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerChecksumInfo;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
Expand All @@ -48,7 +47,6 @@
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -369,32 +367,17 @@ private static PendingDelete populatePendingDeletionMetadata(
Long pendingDeletionBlockBytes = metadataTable.get(kvContainerData.getPendingDeleteBlockBytesKey());
Long pendingDeleteBlockCount = metadataTable.get(kvContainerData.getPendingDeleteBlockCountKey());

if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) {
return handlePreDataDistributionFeature(pendingDeleteBlockCount, metadataTable, store, kvContainerData);
} else if (pendingDeleteBlockCount != null) {
return handlePostDataDistributionFeature(pendingDeleteBlockCount, pendingDeletionBlockBytes,
metadataTable, store, kvContainerData);
} else {
LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them from delete txn tables",
metadataTable.getName());
return getAggregatePendingDelete(store, kvContainerData, kvContainerData.getSchemaVersion());
}
}

private static PendingDelete handlePreDataDistributionFeature(
Long pendingDeleteBlockCount, Table<String, Long> metadataTable,
DatanodeStore store, KeyValueContainerData kvContainerData) throws IOException {

if (pendingDeleteBlockCount != null) {
return new PendingDelete(pendingDeleteBlockCount, 0L);
return handlePendingDeletionBlockCountAndBytes(pendingDeleteBlockCount, pendingDeletionBlockBytes,
metadataTable, store, kvContainerData);
} else {
LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them from delete txn tables",
metadataTable.getName());
return getAggregatePendingDelete(store, kvContainerData, kvContainerData.getSchemaVersion());
}
}

private static PendingDelete handlePostDataDistributionFeature(
private static PendingDelete handlePendingDeletionBlockCountAndBytes(
Long pendingDeleteBlockCount, Long pendingDeletionBlockBytes,
Table<String, Long> metadataTable, DatanodeStore store,
KeyValueContainerData kvContainerData) throws IOException {
Expand Down