diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java index 13a5e26423b90..d2998b6e9d1af 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java @@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.DataChecksum; +import org.apache.htrace.NullScope; +import org.apache.htrace.TraceScope; import java.io.IOException; import java.io.OutputStream; @@ -194,16 +196,26 @@ protected int getChecksumSize() { return sum.getChecksumSize(); } + protected TraceScope createWriteTraceScope() { + return NullScope.INSTANCE; + } + /** Generate checksums for the given data chunks and output chunks & checksums * to the underlying output stream. */ private void writeChecksumChunks(byte b[], int off, int len) throws IOException { sum.calculateChunkedSums(b, off, len, checksum, 0); - for (int i = 0; i < len; i += sum.getBytesPerChecksum()) { - int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i); - int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize(); - writeChunk(b, off + i, chunkLen, checksum, ckOffset, getChecksumSize()); + TraceScope scope = createWriteTraceScope(); + try { + for (int i = 0; i < len; i += sum.getBytesPerChecksum()) { + int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i); + int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize(); + writeChunk(b, off + i, chunkLen, checksum, ckOffset, + getChecksumSize()); + } + } finally { + scope.close(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 435fdd7b8150d..b5591e0723dfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -378,6 +378,9 @@ Release 2.8.0 - UNRELEASED OPTIMIZATIONS + HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than + DFSOutputStream#writeChunk (cmccabe) + BUG FIXES HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 933d8e6a78677..c88639da030d5 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -372,21 +372,14 @@ private void computePacketChunkSize(int psize, int csize) { } } + protected TraceScope createWriteTraceScope() { + return dfsClient.getPathTraceScope("DFSOutputStream#write", src); + } + // @see FSOutputSummer#writeChunk() @Override protected synchronized void writeChunk(byte[] b, int offset, int len, byte[] checksum, int ckoff, int cklen) throws IOException { - TraceScope scope = - dfsClient.getPathTraceScope("DFSOutputStream#writeChunk", src); - try { - writeChunkImpl(b, offset, len, checksum, ckoff, cklen); - } finally { - scope.close(); - } - } - - private synchronized void writeChunkImpl(byte[] b, int offset, int len, - byte[] checksum, int ckoff, int cklen) throws IOException { dfsClient.checkOpen(); checkClosed(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java index 3720abe660312..01361b5d8a22e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java @@ -89,7 +89,7 @@ public void testWriteTraceHooks() throws Exception { "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", - "DFSOutputStream#writeChunk", + "DFSOutputStream#write", "DFSOutputStream#close", "dataStreamer", "OpWriteBlockProto", @@ -117,7 +117,7 @@ public void testWriteTraceHooks() throws Exception { "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", - "DFSOutputStream#writeChunk", + "DFSOutputStream#write", "DFSOutputStream#close", }; for (String desc : spansInTopTrace) {