Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

package org.apache.hadoop.ozone.container.keyvalue;

import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
Expand Down Expand Up @@ -69,6 +70,7 @@
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerDummyImpl;
import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
Expand Down Expand Up @@ -98,6 +100,7 @@
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest;
import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion;
import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;

import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.slf4j.Logger;
Expand Down Expand Up @@ -460,6 +463,20 @@ ContainerCommandResponseProto handlePutBlock(
ContainerProtos.BlockData data = request.getPutBlock().getBlockData();
BlockData blockData = BlockData.getFromProtoBuf(data);
Preconditions.checkNotNull(blockData);
ChunkLayOutVersion layoutVersion =
ChunkLayOutVersion.getConfiguredVersion(conf);
if (layoutVersion == FILE_PER_BLOCK &&
// ChunkManagerDummyImpl doesn't persist to disk, don't check here
!chunkManager.getClass()
.isAssignableFrom(ChunkManagerDummyImpl.class)) {
Comment on lines +468 to +471
Copy link
Contributor

@adoroszlai adoroszlai Aug 17, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think layout-specific code would be better added to each ChunkManager implementation instead of such == and isAssignableFrom checks. Take a look at finishWriteChunks() and shutdown().

File blockFile = layoutVersion
.getChunkFile(kvContainer.getContainerData(),
blockData.getBlockID(), null);
// ensure that the putBlock length <= blockFile length
Preconditions.checkArgument(blockFile.length() >= blockData.getSize(),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Under what conditions could this be violated?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@smengcl This is just a safety check. The ozone streaming write path will not use the current ratisAsyncApi which ensures sequential transactions i.e putBlock would always occur after writeChunk. To be on a safer side, this check is added. We may remove this after the complete streaming implementation if this check is redundant
cc : @mukul1987

"BlockData in putBlock() has more length "
+ "than the actual data written into the disk");
}

boolean incrKeyCount = false;
if (!request.getPutBlock().hasEof() || request.getPutBlock().getEof()) {
Expand All @@ -478,7 +495,7 @@ ContainerCommandResponseProto handlePutBlock(
metrics.incContainerBytesStats(Type.PutBlock, numBytes);
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
} catch (IOException | IllegalArgumentException ex) {
return ContainerUtils.logAndReturnError(LOG,
new StorageContainerException("Put Key failed", ex, IO_EXCEPTION),
request);
Expand Down