Skip to content

Commit

Permalink
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than DFSOu…
Browse files Browse the repository at this point in the history
…tputStream#writeChunk (cmccabe)
  • Loading branch information
Colin Patrick Mccabe committed Apr 1, 2015
1 parent 8366a36 commit c94d594
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 17 deletions.
Expand Up @@ -21,6 +21,8 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.NullScope;
import org.apache.htrace.TraceScope;

import java.io.IOException;
import java.io.OutputStream;
Expand Down Expand Up @@ -194,16 +196,26 @@ protected int getChecksumSize() {
return sum.getChecksumSize();
}

protected TraceScope createWriteTraceScope() {
return NullScope.INSTANCE;
}

/** Generate checksums for the given data chunks and output chunks & checksums
* to the underlying output stream.
*/
private void writeChecksumChunks(byte b[], int off, int len)
throws IOException {
sum.calculateChunkedSums(b, off, len, checksum, 0);
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
writeChunk(b, off + i, chunkLen, checksum, ckOffset, getChecksumSize());
TraceScope scope = createWriteTraceScope();
try {
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
writeChunk(b, off + i, chunkLen, checksum, ckOffset,
getChecksumSize());
}
} finally {
scope.close();
}
}

Expand Down
3 changes: 3 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Expand Up @@ -378,6 +378,9 @@ Release 2.8.0 - UNRELEASED

OPTIMIZATIONS

HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
DFSOutputStream#writeChunk (cmccabe)

BUG FIXES

HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
Expand Down
Expand Up @@ -372,21 +372,14 @@ private void computePacketChunkSize(int psize, int csize) {
}
}

protected TraceScope createWriteTraceScope() {
return dfsClient.getPathTraceScope("DFSOutputStream#write", src);
}

// @see FSOutputSummer#writeChunk()
@Override
protected synchronized void writeChunk(byte[] b, int offset, int len,
byte[] checksum, int ckoff, int cklen) throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("DFSOutputStream#writeChunk", src);
try {
writeChunkImpl(b, offset, len, checksum, ckoff, cklen);
} finally {
scope.close();
}
}

private synchronized void writeChunkImpl(byte[] b, int offset, int len,
byte[] checksum, int ckoff, int cklen) throws IOException {
dfsClient.checkOpen();
checkClosed();

Expand Down
Expand Up @@ -89,7 +89,7 @@ public void testWriteTraceHooks() throws Exception {
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#writeChunk",
"DFSOutputStream#write",
"DFSOutputStream#close",
"dataStreamer",
"OpWriteBlockProto",
Expand Down Expand Up @@ -117,7 +117,7 @@ public void testWriteTraceHooks() throws Exception {
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#writeChunk",
"DFSOutputStream#write",
"DFSOutputStream#close",
};
for (String desc : spansInTopTrace) {
Expand Down

0 comments on commit c94d594

Please sign in to comment.