Skip to content

Commit

Permalink
Review round
Browse files Browse the repository at this point in the history
  • Loading branch information
jpountz committed May 29, 2015
1 parent b1139c2 commit 2bc10bf
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 16 deletions.
Expand Up @@ -19,9 +19,10 @@

package org.elasticsearch.common.compress;

/** Exception indicating that we were expecting something compressed, which
* was not compressed or corrupted so that the compression format could not
* be detected. */
import org.elasticsearch.common.xcontent.XContent;

/** Exception indicating that we were expecting some {@link XContent} but could
* not detect its type. */
public class NotXContentException extends RuntimeException {

public NotXContentException(String message) {
Expand Down
Expand Up @@ -49,6 +49,9 @@ public class DeflateCompressor implements Compressor {
private static final byte[] HEADER = new byte[] { 'D', 'F', 'L', '\0' };
// 3 is a good trade-off between speed and compression ratio
private static final int LEVEL = 3;
// We use buffering on the input and ouput of in/def-laters in order to
// limit the number of JNI calls
private static final int BUFFER_SIZE = 4096;

@Override
public boolean isCompressed(BytesReference bytes) {
Expand Down Expand Up @@ -94,9 +97,8 @@ public StreamInput streamInput(StreamInput in) throws IOException {

final boolean nowrap = true;
final Inflater inflater = new Inflater(nowrap);
InputStream decompressedIn = new InflaterInputStream(in, inflater);
// Buffering is important to avoid JNI calls on every call to read
decompressedIn = new BufferedInputStream(decompressedIn);
InputStream decompressedIn = new InflaterInputStream(in, inflater, BUFFER_SIZE);
decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE);
return new InputStreamStreamInput(decompressedIn) {
private boolean closed = false;

Expand All @@ -120,9 +122,8 @@ public StreamOutput streamOutput(StreamOutput out) throws IOException {
final boolean nowrap = true;
final Deflater deflater = new Deflater(LEVEL, nowrap);
final boolean syncFlush = true;
OutputStream compressedOut = new DeflaterOutputStream(out, deflater, syncFlush);
// Buffering is important to avoid JNI calls on every call to write
compressedOut = new BufferedOutputStream(compressedOut);
OutputStream compressedOut = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush);
compressedOut = new BufferedOutputStream(compressedOut, BUFFER_SIZE);
return new OutputStreamStreamOutput(compressedOut) {
private boolean closed = false;

Expand Down
Expand Up @@ -417,10 +417,10 @@ public static void writeRawField(String field, BytesReference source, XContentBu
Compressor compressor = CompressorFactory.compressor(source);
if (compressor != null) {
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
if (compressedStreamInput.markSupported() == false) {
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
}
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
if (contentType == builder.contentType()) {
builder.rawField(field, compressedStreamInput);
} else {
Expand Down
Expand Up @@ -37,8 +37,6 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.compress.NotXContentException;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.XContentParser;
Expand Down Expand Up @@ -150,10 +148,12 @@ public BytesReference value(Object value) {
try {
return CompressorFactory.uncompressIfNeeded(bytes);
} catch (NotXContentException e) {
// This is a BUG! We try to decompress by detecting a header in
// the stored bytes but since we accept arbitrary bytes, we have
// no guarantee that uncompressed bytes will be detected as
// compressed!
// NOTE: previous versions of Elasticsearch used to try to detect if
// data were compressed. However this is illegal as a user may have
// submitted arbitrary data which looks like it is compressed to
// elasticsearch but is not. So we removed the ability to compress
// binary fields and keep this empty catch block for backward
// compatibility with 1.x
}
}
return bytes;
Expand Down

0 comments on commit 2bc10bf

Please sign in to comment.