Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Reformat files in compress.lzf to use the formatting standards used b…

…y Voldemort.
  • Loading branch information...
commit f96f18dd8e0dbb4ba432e4d362c5f066c2cc019f 1 parent a345e2c
@ijuma ijuma authored
View
117 src/java/voldemort/store/compress/lzf/ChunkEncoder.java
@@ -1,29 +1,32 @@
-/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
- * file except in compliance with the License. You may obtain a copy of the License at
- *
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under
- * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
*/
package voldemort.store.compress.lzf;
/**
- * Class that handles actual encoding of individual chunks.
- * Resulting chunks can be compressed or non-compressed; compression
- * is only used if it actually reduces chunk size (including overhead
- * of additional header bytes)
+ * Class that handles actual encoding of individual chunks. Resulting chunks can
+ * be compressed or non-compressed; compression is only used if it actually
+ * reduces chunk size (including overhead of additional header bytes)
*<p>
- * Code adapted from H2 project (http://www.h2database.com) Java LZF implementation
- * by Thomas (which itself was inspired by original C code by Marc A Lehmann)
+ * Code adapted from H2 project (http://www.h2database.com) Java LZF
+ * implementation by Thomas (which itself was inspired by original C code by
+ * Marc A Lehmann)
*
* @author Thomas Mueller
* @author Tatu Saloranta
*/
-public class ChunkEncoder
-{
+public class ChunkEncoder {
+
// Beyond certain point we won't be able to compress:
private static final int MIN_BLOCK_TO_COMPRESS = 16;
@@ -33,29 +36,28 @@
private static final int MAX_OFF = 1 << 13; // 8k
private static final int MAX_REF = (1 << 8) + (1 << 3); // 264
-
+
// // Encoding tables
/**
* Buffer in which encoded content is stored during processing
*/
- private final byte[] _encodeBuffer;
-
+ private final byte[] _encodeBuffer;
+
private final int[] _hashTable;
-
+
private final int _hashModulo;
/**
- * @param totalLength Total encoded length; used for calculating size
- * of hash table to use
+ * @param totalLength Total encoded length; used for calculating size of
+ * hash table to use
*/
- public ChunkEncoder(int totalLength)
- {
+ public ChunkEncoder(int totalLength) {
int largestChunkLen = Math.max(totalLength, LZFChunk.MAX_CHUNK_LEN);
-
+
int hashLen = calcHashLen(largestChunkLen);
_hashTable = new int[hashLen];
- _hashModulo = hashLen-1;
+ _hashModulo = hashLen - 1;
// Ok, then, what's the worst case output buffer length?
// length indicator for each 32 literals, so:
int bufferLen = largestChunkLen + ((largestChunkLen + 31) >> 5);
@@ -65,37 +67,36 @@ public ChunkEncoder(int totalLength)
/**
* Method for compressing (or not) individual chunks
*/
- public LZFChunk encodeChunk(byte[] data, int offset, int len)
- {
- if (len >= MIN_BLOCK_TO_COMPRESS) {
- /* If we have non-trivial block, and can compress it by at least
- * 2 bytes (since header is 2 bytes longer), let's compress:
+ public LZFChunk encodeChunk(byte[] data, int offset, int len) {
+ if(len >= MIN_BLOCK_TO_COMPRESS) {
+ /*
+ * If we have non-trivial block, and can compress it by at least 2
+ * bytes (since header is 2 bytes longer), let's compress:
*/
- int compLen = tryCompress(data, offset, offset+len, _encodeBuffer, 0);
- if (compLen < (len-2)) { // nah; just return uncompressed
+ int compLen = tryCompress(data, offset, offset + len, _encodeBuffer, 0);
+ if(compLen < (len - 2)) { // nah; just return uncompressed
return LZFChunk.createCompressed(len, _encodeBuffer, 0, compLen);
}
}
// Otherwise leave uncompressed:
return LZFChunk.createNonCompressed(data, offset, len);
}
-
- private static int calcHashLen(int chunkSize)
- {
+
+ private static int calcHashLen(int chunkSize) {
// in general try get hash table size of 2x input size
chunkSize += chunkSize;
// but no larger than max size:
- if (chunkSize >= MAX_HASH_SIZE) {
+ if(chunkSize >= MAX_HASH_SIZE) {
return MAX_HASH_SIZE;
}
// otherwise just need to round up to nearest 2x
int hashLen = MIN_HASH_SIZE;
- while (hashLen < chunkSize) {
+ while(hashLen < chunkSize) {
hashLen += hashLen;
}
return hashLen;
}
-
+
private int first(byte[] in, int inPos) {
return (in[inPos] << 8) + (in[inPos + 1] & 255);
}
@@ -104,52 +105,48 @@ private static int next(int v, byte[] in, int inPos) {
return (v << 8) + (in[inPos + 2] & 255);
}
-
private int hash(int h) {
// or 184117; but this seems to give better hashing?
return ((h * 57321) >> 9) & _hashModulo;
// original lzf-c.c used this:
- //return (((h ^ (h << 5)) >> (24 - HLOG) - h*5) & _hashModulo;
+ // return (((h ^ (h << 5)) >> (24 - HLOG) - h*5) & _hashModulo;
// but that didn't seem to provide better matches
}
-
- private int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos)
- {
+
+ private int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos) {
int literals = 0;
outPos++;
int hash = first(in, 0);
inEnd -= 4;
- final int firstPos = inPos; // so that we won't have back references across block boundary
- while (inPos < inEnd) {
+ final int firstPos = inPos; // so that we won't have back references
+ // across block boundary
+ while(inPos < inEnd) {
byte p2 = in[inPos + 2];
// next
hash = (hash << 8) + (p2 & 255);
int off = hash(hash);
int ref = _hashTable[off];
_hashTable[off] = inPos;
- if (ref < inPos
- && ref >= firstPos
- && (off = inPos - ref - 1) < MAX_OFF
- && in[ref + 2] == p2
- && in[ref + 1] == (byte) (hash >> 8)
- && in[ref] == (byte) (hash >> 16)) {
+ if(ref < inPos && ref >= firstPos && (off = inPos - ref - 1) < MAX_OFF
+ && in[ref + 2] == p2 && in[ref + 1] == (byte) (hash >> 8)
+ && in[ref] == (byte) (hash >> 16)) {
// match
int maxLen = inEnd - inPos + 2;
- if (maxLen > MAX_REF) {
+ if(maxLen > MAX_REF) {
maxLen = MAX_REF;
}
- if (literals == 0) {
+ if(literals == 0) {
outPos--;
} else {
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
}
int len = 3;
- while (len < maxLen && in[ref + len] == in[inPos + len]) {
+ while(len < maxLen && in[ref + len] == in[inPos + len]) {
len++;
}
len -= 2;
- if (len < 7) {
+ if(len < 7) {
out[outPos++] = (byte) ((off >> 8) + (len << 5));
} else {
out[outPos++] = (byte) ((off >> 8) + (7 << 5));
@@ -166,7 +163,7 @@ private int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos)
} else {
out[outPos++] = in[inPos++];
literals++;
- if (literals == LZFChunk.MAX_LITERAL) {
+ if(literals == LZFChunk.MAX_LITERAL) {
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
outPos++;
@@ -174,17 +171,17 @@ private int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos)
}
}
inEnd += 4;
- while (inPos < inEnd) {
+ while(inPos < inEnd) {
out[outPos++] = in[inPos++];
literals++;
- if (literals == LZFChunk.MAX_LITERAL) {
+ if(literals == LZFChunk.MAX_LITERAL) {
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
outPos++;
}
}
out[outPos - literals - 1] = (byte) (literals - 1);
- if (literals == 0) {
+ if(literals == 0) {
outPos--;
}
return outPos;
View
73 src/java/voldemort/store/compress/lzf/LZF.java
@@ -1,76 +1,84 @@
-/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
- * file except in compliance with the License. You may obtain a copy of the License at
- *
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under
- * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
*/
package voldemort.store.compress.lzf;
-import java.io.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
/**
- * Simple command-line utility that can be used for testing LZF
- * compression.
+ * Simple command-line utility that can be used for testing LZF compression.
*
* @author Tatu Saloranta
*/
-public class LZF
-{
+public class LZF {
+
final static String SUFFIX = ".lzf";
- void process(String[] args) throws IOException
- {
- if (args.length == 2) {
+ void process(String[] args) throws IOException {
+ if(args.length == 2) {
String oper = args[0];
boolean compress = "-c".equals(oper);
- if (compress || "-d".equals(oper)) {
+ if(compress || "-d".equals(oper)) {
String filename = args[1];
File src = new File(filename);
- if (!src.exists()) {
- System.err.println("File '"+filename+"' does not exist.");
+ if(!src.exists()) {
+ System.err.println("File '" + filename + "' does not exist.");
System.exit(1);
}
- if (!compress && !filename.endsWith(SUFFIX)) {
- System.err.println("File '"+filename+"' does end with expected suffix ('"+SUFFIX+"', won't decompress.");
+ if(!compress && !filename.endsWith(SUFFIX)) {
+ System.err.println("File '" + filename + "' does end with expected suffix ('"
+ + SUFFIX + "', won't decompress.");
System.exit(1);
}
byte[] data = readData(src);
- System.out.println("Read "+data.length+" bytes.");
+ System.out.println("Read " + data.length + " bytes.");
byte[] result = compress ? LZFEncoder.encode(data) : LZFDecoder.decode(data);
- System.out.println("Processed into "+result.length+" bytes.");
- File resultFile = compress ? new File(filename+SUFFIX) : new File(filename.substring(0, filename.length() - SUFFIX.length()));
+ System.out.println("Processed into " + result.length + " bytes.");
+ File resultFile = compress ? new File(filename + SUFFIX)
+ : new File(filename.substring(0, filename.length()
+ - SUFFIX.length()));
FileOutputStream out = new FileOutputStream(resultFile);
out.write(result);
out.close();
- System.out.println("Wrote in file '"+resultFile.getAbsolutePath()+"'.");
+ System.out.println("Wrote in file '" + resultFile.getAbsolutePath() + "'.");
return;
}
}
- System.err.println("Usage: java "+getClass().getName()+" -c/-d file");
+ System.err.println("Usage: java " + getClass().getName() + " -c/-d file");
System.exit(1);
}
- private byte[] readData(File in) throws IOException
- {
+ private byte[] readData(File in) throws IOException {
int len = (int) in.length();
byte[] result = new byte[len];
int offset = 0;
FileInputStream fis = new FileInputStream(in);
- while (len > 0) {
+ while(len > 0) {
int count = fis.read(result, offset, len);
- if (count < 0) break;
+ if(count < 0)
+ break;
len -= count;
offset += count;
}
fis.close();
- if (len > 0) { // should never occur...
- throw new IOException("Could not read the whole file -- received EOF when there was "+len+" bytes left to read");
+ if(len > 0) { // should never occur...
+ throw new IOException("Could not read the whole file -- received EOF when there was "
+ + len + " bytes left to read");
}
return result;
}
@@ -79,4 +87,3 @@ public static void main(String[] args) throws IOException {
new LZF().process(args);
}
}
-
View
62 src/java/voldemort/store/compress/lzf/LZFChunk.java
@@ -1,24 +1,27 @@
-/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
- * file except in compliance with the License. You may obtain a copy of the License at
- *
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under
- * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
*/
package voldemort.store.compress.lzf;
/**
- * Helper class used to store LZF encoded segments (compressed and non-compressed)
- * that can be sequenced to produce LZF files/streams.
- *
+ * Helper class used to store LZF encoded segments (compressed and
+ * non-compressed) that can be sequenced to produce LZF files/streams.
+ *
* @author Tatu Saloranta
*/
-public class LZFChunk
-{
+public class LZFChunk {
+
/**
* Maximum length of literal run for LZF encoding.
*/
@@ -33,17 +36,17 @@
public final static int BLOCK_TYPE_NON_COMPRESSED = 0;
public final static int BLOCK_TYPE_COMPRESSED = 1;
-
final byte[] _data;
LZFChunk _next;
- private LZFChunk(byte[] data) { _data = data; }
+ private LZFChunk(byte[] data) {
+ _data = data;
+ }
/**
* Factory method for constructing compressed chunk
*/
- public static LZFChunk createCompressed(int origLen, byte[] encData, int encPtr, int encLen)
- {
+ public static LZFChunk createCompressed(int origLen, byte[] encData, int encPtr, int encLen) {
byte[] result = new byte[encLen + 7];
result[0] = BYTE_Z;
result[1] = BYTE_V;
@@ -59,8 +62,7 @@ public static LZFChunk createCompressed(int origLen, byte[] encData, int encPtr,
/**
* Factory method for constructing compressed chunk
*/
- public static LZFChunk createNonCompressed(byte[] plainData, int ptr, int len)
- {
+ public static LZFChunk createNonCompressed(byte[] plainData, int ptr, int len) {
byte[] result = new byte[len + 5];
result[0] = BYTE_Z;
result[1] = BYTE_V;
@@ -70,16 +72,26 @@ public static LZFChunk createNonCompressed(byte[] plainData, int ptr, int len)
System.arraycopy(plainData, ptr, result, 5, len);
return new LZFChunk(result);
}
-
- public void setNext(LZFChunk next) { _next = next; }
- public LZFChunk next() { return _next; }
- public int length() { return _data.length; }
- public byte[] getData() { return _data; }
+ public void setNext(LZFChunk next) {
+ _next = next;
+ }
+
+ public LZFChunk next() {
+ return _next;
+ }
+
+ public int length() {
+ return _data.length;
+ }
+
+ public byte[] getData() {
+ return _data;
+ }
public int copyTo(byte[] dst, int ptr) {
int len = _data.length;
System.arraycopy(_data, 0, dst, ptr, len);
- return ptr+len;
+ return ptr + len;
}
}
View
128 src/java/voldemort/store/compress/lzf/LZFDecoder.java
@@ -1,12 +1,15 @@
-/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
- * file except in compliance with the License. You may obtain a copy of the License at
- *
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under
- * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
*/
package voldemort.store.compress.lzf;
@@ -14,50 +17,51 @@
import java.io.IOException;
/**
- * Decoder that handles decoding of sequence of encoded LZF chunks,
- * combining them into a single contiguous result byte array
+ * Decoder that handles decoding of sequence of encoded LZF chunks, combining
+ * them into a single contiguous result byte array
*<p>
- * Code adapted from H2 project (http://www.h2database.com) Java LZF implementation
- * by Thomas (which itself was inspired by original C code by Marc A Lehmann)
+ * Code adapted from H2 project (http://www.h2database.com) Java LZF
+ * implementation by Thomas (which itself was inspired by original C code by
+ * Marc A Lehmann)
*
* @author Thomas Mueller
* @author tsaloranta@gmail.com
*/
-public class LZFDecoder
-{
- final static byte BYTE_NULL = 0;
+public class LZFDecoder {
+
+ final static byte BYTE_NULL = 0;
// static methods, no need to instantiate
- private LZFDecoder() { }
-
+ private LZFDecoder() {}
+
/**
- * Method for decompressing whole input data, which encoded in LZF
- * block structure (compatible with lzf command line utility),
- * and can consist of any number of blocks
+ * Method for decompressing whole input data, which encoded in LZF block
+ * structure (compatible with lzf command line utility), and can consist of
+ * any number of blocks
*/
- public static byte[] decode(byte[] data) throws IOException
- {
- /* First: let's calculate actual size, so we can allocate
- * exact result size. Also useful for basic sanity checking;
- * so that after call we know header structure is not corrupt
- * (to the degree that lengths etc seem valid)
+ public static byte[] decode(byte[] data) throws IOException {
+ /*
+ * First: let's calculate actual size, so we can allocate exact result
+ * size. Also useful for basic sanity checking; so that after call we
+ * know header structure is not corrupt (to the degree that lengths etc
+ * seem valid)
*/
byte[] result = new byte[calculateUncompressedSize(data)];
int inPtr = 0;
int outPtr = 0;
- while (inPtr < (data.length - 1)) { // -1 to offset possible end marker
+ while(inPtr < (data.length - 1)) { // -1 to offset possible end marker
inPtr += 2; // skip 'ZV' marker
int type = data[inPtr++];
int len = uint16(data, inPtr);
inPtr += 2;
- if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
+ if(type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
System.arraycopy(data, inPtr, result, outPtr, len);
outPtr += len;
} else { // compressed
int uncompLen = uint16(data, inPtr);
inPtr += 2;
- decompressChunk(data, inPtr, result, outPtr, outPtr+uncompLen);
+ decompressChunk(data, inPtr, result, outPtr, outPtr + uncompLen);
outPtr += uncompLen;
}
inPtr += len;
@@ -65,43 +69,47 @@ private LZFDecoder() { }
return result;
}
- private static int calculateUncompressedSize(byte[] data) throws IOException
- {
+ private static int calculateUncompressedSize(byte[] data) throws IOException {
int uncompressedSize = 0;
int ptr = 0;
int blockNr = 0;
- while (ptr < data.length) {
+ while(ptr < data.length) {
// can use optional end marker
- if (ptr == (data.length + 1) && data[ptr] == BYTE_NULL) {
+ if(ptr == (data.length + 1) && data[ptr] == BYTE_NULL) {
++ptr; // so that we'll be at end
break;
}
// simpler to handle bounds checks by catching exception here...
try {
- if (data[ptr] != LZFChunk.BYTE_Z || data[ptr+1] != LZFChunk.BYTE_V) {
- throw new IOException("Corrupt input data, block #"+blockNr+" (at offset "+ptr+"): did not start with 'ZV' signature bytes");
+ if(data[ptr] != LZFChunk.BYTE_Z || data[ptr + 1] != LZFChunk.BYTE_V) {
+ throw new IOException("Corrupt input data, block #" + blockNr + " (at offset "
+ + ptr + "): did not start with 'ZV' signature bytes");
}
- int type = (int) data[ptr+2];
- int blockLen = uint16(data, ptr+3);
- if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
+ int type = (int) data[ptr + 2];
+ int blockLen = uint16(data, ptr + 3);
+ if(type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
ptr += 5;
uncompressedSize += blockLen;
- } else if (type == LZFChunk.BLOCK_TYPE_COMPRESSED) { // compressed
- uncompressedSize += uint16(data, ptr+5);
+ } else if(type == LZFChunk.BLOCK_TYPE_COMPRESSED) { // compressed
+ uncompressedSize += uint16(data, ptr + 5);
ptr += 7;
- } else { // unknown... CRC-32 would be 2, but that's not implemented by cli tool
- throw new IOException("Corrupt input data, block #"+blockNr+" (at offset "+ptr+"): unrecognized block type "+(type & 0xFF));
+ } else { // unknown... CRC-32 would be 2, but that's not
+ // implemented by cli tool
+ throw new IOException("Corrupt input data, block #" + blockNr + " (at offset "
+ + ptr + "): unrecognized block type " + (type & 0xFF));
}
ptr += blockLen;
- } catch (ArrayIndexOutOfBoundsException e) {
- throw new IOException("Corrupt input data, block #"+blockNr+" (at offset "+ptr+"): truncated block header");
+ } catch(ArrayIndexOutOfBoundsException e) {
+ throw new IOException("Corrupt input data, block #" + blockNr + " (at offset "
+ + ptr + "): truncated block header");
}
++blockNr;
}
// one more sanity check:
- if (ptr != data.length) {
- throw new IOException("Corrupt input data: block #"+blockNr+" extends "+(data.length - ptr)+" beyond end of input");
+ if(ptr != data.length) {
+ throw new IOException("Corrupt input data: block #" + blockNr + " extends "
+ + (data.length - ptr) + " beyond end of input");
}
return uncompressedSize;
}
@@ -110,27 +118,26 @@ private static int calculateUncompressedSize(byte[] data) throws IOException
* Main decode method for individual chunks.
*/
public static void decompressChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd)
- throws IOException
- {
+ throws IOException {
do {
int ctrl = in[inPos++] & 255;
- if (ctrl < LZFChunk.MAX_LITERAL) { // literal run
+ if(ctrl < LZFChunk.MAX_LITERAL) { // literal run
ctrl += inPos;
do {
out[outPos++] = in[inPos];
- } while (inPos++ < ctrl);
+ } while(inPos++ < ctrl);
} else {
// back reference
int len = ctrl >> 5;
ctrl = -((ctrl & 0x1f) << 8) - 1;
- if (len == 7) {
+ if(len == 7) {
len += in[inPos++] & 255;
}
ctrl -= in[inPos++] & 255;
len += outPos + 2;
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
- while (outPos < len - 8) {
+ while(outPos < len - 8) {
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
@@ -140,18 +147,19 @@ public static void decompressChunk(byte[] in, int inPos, byte[] out, int outPos,
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
}
- while (outPos < len) {
+ while(outPos < len) {
out[outPos] = out[outPos++ + ctrl];
}
}
- } while (outPos < outEnd);
+ } while(outPos < outEnd);
// sanity check to guard against corrupt data:
- if (outPos != outEnd) throw new IOException("Corrupt data: overrun in decompress, input offset "+inPos+", output offset "+outPos);
+ if(outPos != outEnd)
+ throw new IOException("Corrupt data: overrun in decompress, input offset " + inPos
+ + ", output offset " + outPos);
+ }
+
+ private static int uint16(byte[] data, int ptr) {
+ return ((data[ptr] & 0xFF) << 8) + (data[ptr + 1] & 0xFF);
}
-
- private static int uint16(byte[] data, int ptr)
- {
- return ((data[ptr] & 0xFF) << 8) + (data[ptr+1] & 0xFF);
- }
}
View
53 src/java/voldemort/store/compress/lzf/LZFEncoder.java
@@ -1,12 +1,15 @@
-/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
- * file except in compliance with the License. You may obtain a copy of the License at
- *
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software distributed under
- * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific language
- * governing permissions and limitations under the License.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
*/
package voldemort.store.compress.lzf;
@@ -14,35 +17,35 @@
import java.io.IOException;
/**
- * Encoder that handles splitting of input into chunks to encode,
- * calls {@link ChunkEncoder} to compress individual chunks and
- * combines resulting chunks into contiguous output byte array.
+ * Encoder that handles splitting of input into chunks to encode, calls
+ * {@link ChunkEncoder} to compress individual chunks and combines resulting
+ * chunks into contiguous output byte array.
*<p>
- * Code adapted from H2 project (http://www.h2database.com) Java LZF implementation
- * by Thomas (which itself was inspired by original C code by Marc A Lehmann)
+ * Code adapted from H2 project (http://www.h2database.com) Java LZF
+ * implementation by Thomas (which itself was inspired by original C code by
+ * Marc A Lehmann)
*
* @author Thomas Mueller
* @author tsaloranta@gmail.com
*/
-public class LZFEncoder
-{
+public class LZFEncoder {
+
// Static methods only, no point in instantiating
- private LZFEncoder() { }
-
+ private LZFEncoder() {}
+
/**
- * Method for compressing given input data using LZF encoding and
- * block structure (compatible with lzf command line utility).
- * Result consists of a sequence of chunks.
+ * Method for compressing given input data using LZF encoding and block
+ * structure (compatible with lzf command line utility). Result consists of
+ * a sequence of chunks.
*/
- public static byte[] encode(byte[] data) throws IOException
- {
+ public static byte[] encode(byte[] data) throws IOException {
int left = data.length;
ChunkEncoder enc = new ChunkEncoder(left);
int chunkLen = Math.min(LZFChunk.MAX_CHUNK_LEN, left);
LZFChunk first = enc.encodeChunk(data, 0, chunkLen);
left -= chunkLen;
// shortcut: if it all fit in, no need to coalesce:
- if (left < 1) {
+ if(left < 1) {
return first.getData();
}
// otherwise need to get other chunks:
@@ -58,11 +61,11 @@ private LZFEncoder() { }
resultBytes += chunk.length();
last.setNext(chunk);
last = chunk;
- } while (left > 0);
+ } while(left > 0);
// and then coalesce returns into single contiguous byte array
byte[] result = new byte[resultBytes];
int ptr = 0;
- for (; first != null; first = first.next()) {
+ for(; first != null; first = first.next()) {
ptr = first.copyTo(result, ptr);
}
return result;
Please sign in to comment.
Something went wrong with that request. Please try again.