From 42591c7de80f584fc7cbff1f0ae27f4fd746c48c Mon Sep 17 00:00:00 2001 From: Priyesh Karatha Date: Wed, 29 Oct 2025 12:19:11 +0530 Subject: [PATCH] HDDS-13853. Removing HDDSLayout feature dependency from DNs. --- .../DeleteBlocksCommandHandler.java | 15 +++--- .../keyvalue/KeyValueContainerData.java | 12 ++--- .../KeyValueContainerMetadataInspector.java | 48 +++++++++---------- .../helpers/KeyValueContainerUtil.java | 23 ++------- 4 files changed, 34 insertions(+), 64 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 73be8a113342..6495c6af5883 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.metrics2.lib.MetricsRegistry; @@ -65,7 +64,6 @@ import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.metadata.DeleteTransactionStore; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.hadoop.ozone.protocol.commands.CommandStatus; import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; @@ -646,15 +644,14 @@ private void updateMetaData(KeyValueContainerData containerData, pendingDeleteBlocks); // Update pending deletion blocks count, blocks bytes and delete transaction ID in in-memory container status. - // Persist pending bytes only if the feature is finalized. - if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION) && delTX.hasTotalBlockSize()) { - long pendingBytes = containerData.getBlockPendingDeletionBytes(); + long pendingBytes = containerData.getBlockPendingDeletionBytes(); + if (delTX.hasTotalBlockSize()) { pendingBytes += delTX.getTotalBlockSize(); - metadataTable - .putWithBatch(batchOperation, - containerData.getPendingDeleteBlockBytesKey(), - pendingBytes); } + metadataTable + .putWithBatch(batchOperation, + containerData.getPendingDeleteBlockBytesKey(), + pendingBytes); containerData.incrPendingDeletionBlocks(newDeletionBlocks, delTX.hasTotalBlockSize() ? delTX.getTotalBlockSize() : 0); containerData.updateDeleteTransactionId(delTX.getTxID()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index b33c78d72fc0..68af4d72bc21 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; @@ -59,7 +58,6 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.yaml.snakeyaml.nodes.Tag; /** @@ -387,10 +385,8 @@ public void updateAndCommitDBCounters(DBHandle db, metadataTable.putWithBatch(batchOperation, getBlockCountKey(), b.getCount() - deletedBlockCount); metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockCountKey(), b.getPendingDeletion() - deletedBlockCount); - if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) { - metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockBytesKey(), - b.getPendingDeletionBytes() - releasedBytes); - } + metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockBytesKey(), + b.getPendingDeletionBytes() - releasedBytes); db.getStore().getBatchHandler().commitBatchOperation(batchOperation); } @@ -401,9 +397,7 @@ public void resetPendingDeleteBlockCount(DBHandle db) throws IOException { // Reset the metadata on disk. Table metadataTable = db.getStore().getMetadataTable(); metadataTable.put(getPendingDeleteBlockCountKey(), 0L); - if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) { - metadataTable.put(getPendingDeleteBlockBytesKey(), 0L); - } + metadataTable.put(getPendingDeleteBlockBytesKey(), 0L); } // NOTE: Below are some helper functions to format keys according diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java index 3cbc8f9c5219..43b24b86dbb3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.server.JsonUtils; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; @@ -46,7 +45,6 @@ import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreWithIncrementalChunkList; -import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -438,30 +436,28 @@ private boolean checkAndRepair(ObjectNode parent, errors.add(deleteCountError); } - if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) { - // check and repair if db delete bytes mismatches delete transaction - JsonNode pendingDeletionBlockSize = dBMetadata.path( - OzoneConsts.PENDING_DELETE_BLOCK_BYTES); - final long dbDeleteBytes = jsonToLong(pendingDeletionBlockSize); - final JsonNode pendingDeleteBytesAggregate = aggregates.path(PendingDelete.BYTES); - final long deleteTransactionBytes = jsonToLong(pendingDeleteBytesAggregate); - if (dbDeleteBytes != deleteTransactionBytes) { - passed = false; - final BooleanSupplier deleteBytesRepairAction = () -> { - final String key = containerData.getPendingDeleteBlockBytesKey(); - try { - metadataTable.put(key, deleteTransactionBytes); - } catch (IOException ex) { - LOG.error("Failed to reset {} for container {}.", - key, containerData.getContainerID(), ex); - } - return false; - }; - final ObjectNode deleteBytesError = buildErrorAndRepair( - "dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_BYTES, - pendingDeleteBytesAggregate, pendingDeletionBlockSize, deleteBytesRepairAction); - errors.add(deleteBytesError); - } + // check and repair if db delete bytes mismatches delete transaction + JsonNode pendingDeletionBlockSize = dBMetadata.path( + OzoneConsts.PENDING_DELETE_BLOCK_BYTES); + final long dbDeleteBytes = jsonToLong(pendingDeletionBlockSize); + final JsonNode pendingDeleteBytesAggregate = aggregates.path(PendingDelete.BYTES); + final long deleteTransactionBytes = jsonToLong(pendingDeleteBytesAggregate); + if (dbDeleteBytes != deleteTransactionBytes) { + passed = false; + final BooleanSupplier deleteBytesRepairAction = () -> { + final String key = containerData.getPendingDeleteBlockBytesKey(); + try { + metadataTable.put(key, deleteTransactionBytes); + } catch (IOException ex) { + LOG.error("Failed to reset {} for container {}.", + key, containerData.getContainerID(), ex); + } + return false; + }; + final ObjectNode deleteBytesError = buildErrorAndRepair( + "dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_BYTES, + pendingDeleteBytesAggregate, pendingDeletionBlockSize, deleteBytesRepairAction); + errors.add(deleteBytesError); } // check and repair chunks dir. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index b30fe57db893..0150355ed7ce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerChecksumInfo; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager; @@ -48,7 +47,6 @@ import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl; -import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -369,24 +367,9 @@ private static PendingDelete populatePendingDeletionMetadata( Long pendingDeletionBlockBytes = metadataTable.get(kvContainerData.getPendingDeleteBlockBytesKey()); Long pendingDeleteBlockCount = metadataTable.get(kvContainerData.getPendingDeleteBlockCountKey()); - if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.DATA_DISTRIBUTION)) { - return handlePreDataDistributionFeature(pendingDeleteBlockCount, metadataTable, store, kvContainerData); - } else if (pendingDeleteBlockCount != null) { - return handlePostDataDistributionFeature(pendingDeleteBlockCount, pendingDeletionBlockBytes, - metadataTable, store, kvContainerData); - } else { - LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them from delete txn tables", - metadataTable.getName()); - return getAggregatePendingDelete(store, kvContainerData, kvContainerData.getSchemaVersion()); - } - } - - private static PendingDelete handlePreDataDistributionFeature( - Long pendingDeleteBlockCount, Table metadataTable, - DatanodeStore store, KeyValueContainerData kvContainerData) throws IOException { - if (pendingDeleteBlockCount != null) { - return new PendingDelete(pendingDeleteBlockCount, 0L); + return handlePendingDeletionBlockCountAndBytes(pendingDeleteBlockCount, pendingDeletionBlockBytes, + metadataTable, store, kvContainerData); } else { LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them from delete txn tables", metadataTable.getName()); @@ -394,7 +377,7 @@ private static PendingDelete handlePreDataDistributionFeature( } } - private static PendingDelete handlePostDataDistributionFeature( + private static PendingDelete handlePendingDeletionBlockCountAndBytes( Long pendingDeleteBlockCount, Long pendingDeletionBlockBytes, Table metadataTable, DatanodeStore store, KeyValueContainerData kvContainerData) throws IOException {