ThreadLocal
contains a {@link java.lang.ref.SoftReference}
* to a {@link BufferRecycler} used to provide a low-cost
* buffer recycling for buffers we need for encoding, decoding.
*/
final protected static ThreadLocal* NOTE: return value was added (from void to boolean) in 0.9.9 * * @return True, if caller should process and feed more data; false if * caller is not interested in more data and processing should be terminated * (and {@link #allDataHandled} should be called immediately) */ public boolean handleData(byte[] buffer, int offset, int len) throws IOException; /** * Method called after last call to {@link #handleData}, for successful * operation, if and when caller is informed about end of content * Note that if an exception thrown by {@link #handleData} has caused processing * to be aborted, this method might not get called. * Implementation may choose to free resources, flush state, or perform * validation at this point. */ public void allDataHandled() throws IOException; } compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/Uncompressor.java 0000664 0000000 0000000 00000003025 12373557273 0027350 0 ustar 00root root 0000000 0000000 package com.ning.compress; import java.io.IOException; /** * Abstract class that defines "push" style API for various uncompressors * (aka decompressors or decoders). Implements are alternatives to stream * based uncompressors (such as {@link com.ning.compress.lzf.LZFInputStream}) * in cases where "push" operation is important and/or blocking is not allowed; * for example, when handling asynchronous HTTP responses. *
* Note that API does not define the way that listener is attached: this is * typically passed through to constructor of the implementation. * * @author Tatu Saloranta (tatu.saloranta@iki.fi) */ public abstract class Uncompressor { /** * Method called to feed more compressed data to be uncompressed, and * sent to possible listeners. *
* NOTE: return value was added (from void to boolean) in 0.9.9
*
* @return True, if caller should process and feed more data; false if
* caller is not interested in more data and processing should be terminated.
* (and {@link #complete} should be called immediately)
*/
public abstract boolean feedCompressedData(byte[] comp, int offset, int len)
throws IOException;
/**
* Method called to indicate that all data to uncompress has already been fed.
* This typically results in last block of data being uncompressed, and results
* being sent to listener(s); but may also throw an exception if incomplete
* block was passed.
*/
public abstract void complete() throws IOException;
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/UncompressorOutputStream.java 0000664 0000000 0000000 00000002376 12373557273 0031755 0 ustar 00root root 0000000 0000000 package com.ning.compress;
import java.io.*;
/**
* Simple wrapper or wrapper around {@link Uncompressor}, to help
* with inter-operability.
*/
public class UncompressorOutputStream extends OutputStream
{
protected final Uncompressor _uncompressor;
private byte[] _singleByte = null;
public UncompressorOutputStream(Uncompressor uncomp)
{
_uncompressor = uncomp;
}
/**
* Call to this method will result in call to
* {@link Uncompressor#complete()}, which is idempotent
* (i.e. can be called multiple times without ill effects).
*/
@Override
public void close() throws IOException {
_uncompressor.complete();
}
@Override
public void flush() { }
@Override
public void write(byte[] b) throws IOException {
_uncompressor.feedCompressedData(b, 0, b.length);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
_uncompressor.feedCompressedData(b, off, len);
}
@Override
public void write(int b) throws IOException
{
if (_singleByte == null) {
_singleByte = new byte[1];
}
_singleByte[0] = (byte) b;
_uncompressor.feedCompressedData(_singleByte, 0, 1);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/gzip/ 0000775 0000000 0000000 00000000000 12373557273 0024757 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/gzip/GZIPException.java 0000664 0000000 0000000 00000000663 12373557273 0030257 0 ustar 00root root 0000000 0000000 package com.ning.compress.gzip;
import com.ning.compress.CompressionFormatException;
public class GZIPException extends CompressionFormatException
{
private static final long serialVersionUID = 1L;
public GZIPException(String message) {
super(message);
}
public GZIPException(Throwable t) {
super(t);
}
public GZIPException(String message, Throwable t) {
super(message, t);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/gzip/GZIPRecycler.java 0000664 0000000 0000000 00000003762 12373557273 0030074 0 ustar 00root root 0000000 0000000 package com.ning.compress.gzip;
import java.lang.ref.SoftReference;
import java.util.zip.Deflater;
import java.util.zip.Inflater;
/**
* GZIP-codec-specific "extension" to {@link com.ning.compress.BufferRecycler},
* used for recycling expensive objects.
*
* @author Tatu Saloranta (tatu.saloranta@iki.fi)
*/
public final class GZIPRecycler
{
final protected static ThreadLocal
* Note that instances have no state, so they are
* fully thread-safe and reusable.
*
* @author Tatu Saloranta (tatu.saloranta@iki.fi)
*/
public abstract class ChunkDecoder
{
protected final static byte BYTE_NULL = 0;
protected final static int HEADER_BYTES = 5;
public ChunkDecoder() { }
/*
///////////////////////////////////////////////////////////////////////
// Public API
///////////////////////////////////////////////////////////////////////
*/
/**
* Method for decompressing a block of input data encoded in LZF
* block structure (compatible with lzf command line utility),
* and can consist of any number of blocks.
* Note that input MUST consists of a sequence of one or more complete
* chunks; partial chunks can not be handled.
*/
public final byte[] decode(final byte[] inputBuffer) throws LZFException
{
byte[] result = new byte[calculateUncompressedSize(inputBuffer, 0, inputBuffer.length)];
decode(inputBuffer, 0, inputBuffer.length, result);
return result;
}
/**
* Method for decompressing a block of input data encoded in LZF
* block structure (compatible with lzf command line utility),
* and can consist of any number of blocks.
* Note that input MUST consists of a sequence of one or more complete
* chunks; partial chunks can not be handled.
*/
public final byte[] decode(final byte[] inputBuffer, int inputPtr, int inputLen) throws LZFException
{
byte[] result = new byte[calculateUncompressedSize(inputBuffer, inputPtr, inputLen)];
decode(inputBuffer, inputPtr, inputLen, result);
return result;
}
/**
* Method for decompressing a block of input data encoded in LZF
* block structure (compatible with lzf command line utility),
* and can consist of any number of blocks.
* Note that input MUST consists of a sequence of one or more complete
* chunks; partial chunks can not be handled.
*/
public final int decode(final byte[] inputBuffer, final byte[] targetBuffer) throws LZFException
{
return decode(inputBuffer, 0, inputBuffer.length, targetBuffer);
}
/**
* Method for decompressing a block of input data encoded in LZF
* block structure (compatible with LZF command line utility),
* and can consist of any number of blocks.
* Note that input MUST consists of a sequence of one or more complete
* chunks; partial chunks can not be handled.
*/
public int decode(final byte[] sourceBuffer, int inPtr, int inLength,
final byte[] targetBuffer) throws LZFException
{
int outPtr = 0;
int blockNr = 0;
final int end = inPtr + inLength - 1; // -1 to offset possible end marker
while (inPtr < end) {
// let's do basic sanity checks; no point in skimping with these checks
if (sourceBuffer[inPtr] != LZFChunk.BYTE_Z || sourceBuffer[inPtr+1] != LZFChunk.BYTE_V) {
throw new LZFException("Corrupt input data, block #"+blockNr+" (at offset "+inPtr+"): did not start with 'ZV' signature bytes");
}
inPtr += 2;
int type = sourceBuffer[inPtr++];
int len = uint16(sourceBuffer, inPtr);
inPtr += 2;
if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
if ((outPtr + len) > targetBuffer.length) {
_reportArrayOverflow(targetBuffer, outPtr, len);
}
System.arraycopy(sourceBuffer, inPtr, targetBuffer, outPtr, len);
outPtr += len;
} else { // compressed
int uncompLen = uint16(sourceBuffer, inPtr);
if ((outPtr + uncompLen) > targetBuffer.length) {
_reportArrayOverflow(targetBuffer, outPtr, uncompLen);
}
inPtr += 2;
decodeChunk(sourceBuffer, inPtr, targetBuffer, outPtr, outPtr+uncompLen);
outPtr += uncompLen;
}
inPtr += len;
++blockNr;
}
return outPtr;
}
/**
* Main decode from a stream. Decompressed bytes are placed in the outputBuffer, inputBuffer
* is a "scratch-area".
*
* @param is An input stream of LZF compressed bytes
* @param inputBuffer A byte array used as a scratch area.
* @param outputBuffer A byte array in which the result is returned
*
* @return The number of bytes placed in the outputBuffer.
*/
public abstract int decodeChunk(final InputStream is, final byte[] inputBuffer, final byte[] outputBuffer)
throws IOException;
/**
* Main decode method for individual chunks.
*/
public abstract void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd)
throws LZFException;
/**
* @return If positive number, number of bytes skipped; if -1, end-of-stream was
* reached; otherwise, amount of content
* decoded (using formula of
* Note that instances are stateful and hence
* not thread-safe; one instance is meant to be used
* for processing a sequence of chunks where total length
* is known.
*
* @author Tatu Saloranta (tatu.saloranta@iki.fi)
*/
public abstract class ChunkEncoder
implements Closeable
{
// // // Constants
// Beyond certain point we won't be able to compress; let's use 16 bytes as cut-off
protected static final int MIN_BLOCK_TO_COMPRESS = 16;
protected static final int MIN_HASH_SIZE = 256;
// Not much point in bigger tables, with 8k window
protected static final int MAX_HASH_SIZE = 16384;
protected static final int MAX_OFF = 1 << 13; // 8k
protected static final int MAX_REF = (1 << 8) + (1 << 3); // 264
/**
* How many tail bytes are we willing to just copy as is, to simplify
* loop end checks? 4 is bare minimum, may be raised to 8?
*/
protected static final int TAIL_LENGTH = 4;
// // // Encoding tables etc
protected final BufferRecycler _recycler;
/**
* Hash table contains lookup based on 3-byte sequence; key is hash
* of such triplet, value is offset in buffer.
*/
protected int[] _hashTable;
protected final int _hashModulo;
/**
* Buffer in which encoded content is stored during processing
*/
protected byte[] _encodeBuffer;
/**
* Small buffer passed to LZFChunk, needed for writing chunk header
*/
protected byte[] _headerBuffer;
/**
* Uses a ThreadLocal soft-referenced BufferRecycler instance.
*
* @param totalLength Total encoded length; used for calculating size
* of hash table to use
*/
protected ChunkEncoder(int totalLength)
{
this(totalLength, BufferRecycler.instance());
}
/**
* @param totalLength Total encoded length; used for calculating size
* of hash table to use
* @param bufferRecycler Buffer recycler instance, for usages where the
* caller manages the recycler instances
*/
protected ChunkEncoder(int totalLength, BufferRecycler bufferRecycler)
{
// Need room for at most a single full chunk
int largestChunkLen = Math.min(totalLength, LZFChunk.MAX_CHUNK_LEN);
int suggestedHashLen = calcHashLen(largestChunkLen);
_recycler = bufferRecycler;
_hashTable = bufferRecycler.allocEncodingHash(suggestedHashLen);
_hashModulo = _hashTable.length - 1;
// Ok, then, what's the worst case output buffer length?
// length indicator for each 32 literals, so:
// 21-Feb-2013, tatu: Plus we want to prepend chunk header in place:
int bufferLen = largestChunkLen + ((largestChunkLen + 31) >> 5) + LZFChunk.MAX_HEADER_LEN;
_encodeBuffer = bufferRecycler.allocEncodingBuffer(bufferLen);
}
/**
* Alternate constructor used when we want to avoid allocation encoding
* buffer, in cases where caller wants full control over allocations.
*/
protected ChunkEncoder(int totalLength, boolean bogus)
{
this(totalLength, BufferRecycler.instance(), bogus);
}
/**
* Alternate constructor used when we want to avoid allocation encoding
* buffer, in cases where caller wants full control over allocations.
*/
protected ChunkEncoder(int totalLength, BufferRecycler bufferRecycler, boolean bogus)
{
int largestChunkLen = Math.max(totalLength, LZFChunk.MAX_CHUNK_LEN);
int suggestedHashLen = calcHashLen(largestChunkLen);
_recycler = bufferRecycler;
_hashTable = bufferRecycler.allocEncodingHash(suggestedHashLen);
_hashModulo = _hashTable.length - 1;
_encodeBuffer = null;
}
private static int calcHashLen(int chunkSize)
{
// in general try get hash table size of 2x input size
chunkSize += chunkSize;
// but no larger than max size:
if (chunkSize >= MAX_HASH_SIZE) {
return MAX_HASH_SIZE;
}
// otherwise just need to round up to nearest 2x
int hashLen = MIN_HASH_SIZE;
while (hashLen < chunkSize) {
hashLen += hashLen;
}
return hashLen;
}
/*
///////////////////////////////////////////////////////////////////////
// Public API
///////////////////////////////////////////////////////////////////////
*/
/**
* Method to close once encoder is no longer in use. Note: after calling
* this method, further calls to {@link #encodeChunk} will fail
*/
@Override
public final void close()
{
byte[] buf = _encodeBuffer;
if (buf != null) {
_encodeBuffer = null;
_recycler.releaseEncodeBuffer(buf);
}
int[] ibuf = _hashTable;
if (ibuf != null) {
_hashTable = null;
_recycler.releaseEncodingHash(ibuf);
}
}
/**
* Method for compressing (or not) individual chunks
*/
public LZFChunk encodeChunk(byte[] data, int offset, int len)
{
if (len >= MIN_BLOCK_TO_COMPRESS) {
/* If we have non-trivial block, and can compress it by at least
* 2 bytes (since header is 2 bytes longer), let's compress:
*/
int compLen = tryCompress(data, offset, offset+len, _encodeBuffer, 0);
if (compLen < (len-2)) { // nah; just return uncompressed
return LZFChunk.createCompressed(len, _encodeBuffer, 0, compLen);
}
}
// Otherwise leave uncompressed:
return LZFChunk.createNonCompressed(data, offset, len);
}
/**
* Method for compressing individual chunk, if (and only if) it compresses down
* to specified ratio or less.
*
* @param maxResultRatio Value between 0.05 and 1.10 to indicate maximum relative size of
* the result to use, in order to append encoded chunk
*
* @return Encoded chunk if (and only if) input compresses down to specified ratio or less;
* otherwise returns null
*/
public LZFChunk encodeChunkIfCompresses(byte[] data, int offset, int inputLen,
double maxResultRatio)
{
if (inputLen >= MIN_BLOCK_TO_COMPRESS) {
final int maxSize = (int) (maxResultRatio * inputLen + LZFChunk.HEADER_LEN_COMPRESSED + 0.5);
int compLen = tryCompress(data, offset, offset+inputLen, _encodeBuffer, 0);
if (compLen <= maxSize) {
return LZFChunk.createCompressed(inputLen, _encodeBuffer, 0, compLen);
}
}
return null;
}
/**
* Alternate chunk compression method that will append encoded chunk in
* pre-allocated buffer. Note that caller must ensure that the buffer is
* large enough to hold not just encoded result but also intermediate
* result; latter may be up to 4% larger than input; caller may use
* {@link LZFEncoder#estimateMaxWorkspaceSize(int)} to calculate
* necessary buffer size.
*
* @return Offset in output buffer after appending the encoded chunk
*/
public int appendEncodedChunk(final byte[] input, final int inputPtr, final int inputLen,
final byte[] outputBuffer, final int outputPos)
{
if (inputLen >= MIN_BLOCK_TO_COMPRESS) {
/* If we have non-trivial block, and can compress it by at least
* 2 bytes (since header is 2 bytes longer), use as-is
*/
final int compStart = outputPos + LZFChunk.HEADER_LEN_COMPRESSED;
final int end = tryCompress(input, inputPtr, inputPtr+inputLen, outputBuffer, compStart);
final int uncompEnd = (outputPos + LZFChunk.HEADER_LEN_NOT_COMPRESSED) + inputLen;
if (end < uncompEnd) { // yes, compressed by at least one byte
final int compLen = end - compStart;
LZFChunk.appendCompressedHeader(inputLen, compLen, outputBuffer, outputPos);
return end;
}
}
// Otherwise append as non-compressed chunk instead (length + 5):
return LZFChunk.appendNonCompressed(input, inputPtr, inputLen, outputBuffer, outputPos);
}
/**
* Method similar to {@link #appendEncodedChunk}, but that will only append
* encoded chunk if it compresses down to specified ratio (also considering header that
* will be needed); otherwise will
* return
* Note that {@link ChunkEncoder} instance used is one produced by
* {@link ChunkEncoderFactory#optimalInstance}, which typically
* is "unsafe" instance if one can be used on current JVM.
*/
public static byte[] encode(byte[] data) {
return encode(data, 0, data.length);
}
/**
* Method that will use "safe" {@link ChunkEncoder}, as produced by
* {@link ChunkEncoderFactory#safeInstance}, for encoding. Safe here
* means that it does not use any non-compliant features beyond core JDK.
*/
public static byte[] safeEncode(byte[] data) {
return safeEncode(data, 0, data.length);
}
/**
* Method for compressing given input data using LZF encoding and
* block structure (compatible with lzf command line utility).
* Result consists of a sequence of chunks.
*
* Note that {@link ChunkEncoder} instance used is one produced by
* {@link ChunkEncoderFactory#optimalInstance}, which typically
* is "unsafe" instance if one can be used on current JVM.
*/
public static byte[] encode(byte[] data, int offset, int length)
{
ChunkEncoder enc = ChunkEncoderFactory.optimalInstance(length);
byte[] result = encode(enc, data, offset, length);
enc.close(); // important for buffer reuse!
return result;
}
/**
* Method that will use "safe" {@link ChunkEncoder}, as produced by
* {@link ChunkEncoderFactory#safeInstance}, for encoding. Safe here
* means that it does not use any non-compliant features beyond core JDK.
*/
public static byte[] safeEncode(byte[] data, int offset, int length)
{
ChunkEncoder enc = ChunkEncoderFactory.safeInstance(length);
byte[] result = encode(enc, data, offset, length);
enc.close();
return result;
}
/**
* Method for compressing given input data using LZF encoding and
* block structure (compatible with lzf command line utility).
* Result consists of a sequence of chunks.
*
* Note that {@link ChunkEncoder} instance used is one produced by
* {@link ChunkEncoderFactory#optimalInstance}, which typically
* is "unsafe" instance if one can be used on current JVM.
*/
public static byte[] encode(byte[] data, int offset, int length, BufferRecycler bufferRecycler)
{
ChunkEncoder enc = ChunkEncoderFactory.optimalInstance(length, bufferRecycler);
byte[] result = encode(enc, data, offset, length);
enc.close(); // important for buffer reuse!
return result;
}
/**
* Method that will use "safe" {@link ChunkEncoder}, as produced by
* {@link ChunkEncoderFactory#safeInstance}, for encoding. Safe here
* means that it does not use any non-compliant features beyond core JDK.
*/
public static byte[] safeEncode(byte[] data, int offset, int length, BufferRecycler bufferRecycler)
{
ChunkEncoder enc = ChunkEncoderFactory.safeInstance(length, bufferRecycler);
byte[] result = encode(enc, data, offset, length);
enc.close();
return result;
}
/**
* Compression method that uses specified {@link ChunkEncoder} for actual
* encoding.
*/
public static byte[] encode(ChunkEncoder enc, byte[] data, int length) {
return encode(enc, data, 0, length);
}
/**
* Method that encodes given input using provided {@link ChunkEncoder},
* and aggregating it into a single byte array and returning that.
*
* NOTE: method does NOT call {@link ChunkEncoder#close}; caller is responsible
* for doing that after it is done using the encoder.
*/
public static byte[] encode(ChunkEncoder enc, byte[] data, int offset, int length)
{
int left = length;
int chunkLen = Math.min(LZFChunk.MAX_CHUNK_LEN, left);
LZFChunk first = enc.encodeChunk(data, offset, chunkLen);
left -= chunkLen;
// shortcut: if it all fit in, no need to coalesce:
if (left < 1) {
return first.getData();
}
// otherwise need to get other chunks:
int resultBytes = first.length();
offset += chunkLen;
LZFChunk last = first;
do {
chunkLen = Math.min(left, LZFChunk.MAX_CHUNK_LEN);
LZFChunk chunk = enc.encodeChunk(data, offset, chunkLen);
offset += chunkLen;
left -= chunkLen;
resultBytes += chunk.length();
last.setNext(chunk);
last = chunk;
} while (left > 0);
// and then coalesce returns into single contiguous byte array
byte[] result = new byte[resultBytes];
int ptr = 0;
for (; first != null; first = first.next()) {
ptr = first.copyTo(result, ptr);
}
return result;
}
/*
///////////////////////////////////////////////////////////////////////
// Encoding methods, append in caller-provided buffer(s)
///////////////////////////////////////////////////////////////////////
*/
/**
* Alternate version that accepts pre-allocated output buffer.
*
* Note that {@link ChunkEncoder} instance used is one produced by
* {@link ChunkEncoderFactory#optimalNonAllocatingInstance}, which typically
* is "unsafe" instance if one can be used on current JVM.
*/
public static int appendEncoded(byte[] input, int inputPtr, int inputLength,
byte[] outputBuffer, int outputPtr) {
ChunkEncoder enc = ChunkEncoderFactory.optimalNonAllocatingInstance(inputLength);
int len = appendEncoded(enc, input, inputPtr, inputLength, outputBuffer, outputPtr);
enc.close();
return len;
}
/**
* Alternate version that accepts pre-allocated output buffer.
*
* Method that will use "safe" {@link ChunkEncoder}, as produced by
* {@link ChunkEncoderFactory#safeInstance}, for encoding. Safe here
* means that it does not use any non-compliant features beyond core JDK.
*/
public static int safeAppendEncoded(byte[] input, int inputPtr, int inputLength,
byte[] outputBuffer, int outputPtr) {
ChunkEncoder enc = ChunkEncoderFactory.safeNonAllocatingInstance(inputLength);
int len = appendEncoded(enc, input, inputPtr, inputLength, outputBuffer, outputPtr);
enc.close();
return len;
}
/**
* Alternate version that accepts pre-allocated output buffer.
*
* Note that {@link ChunkEncoder} instance used is one produced by
* {@link ChunkEncoderFactory#optimalNonAllocatingInstance}, which typically
* is "unsafe" instance if one can be used on current JVM.
*/
public static int appendEncoded(byte[] input, int inputPtr, int inputLength,
byte[] outputBuffer, int outputPtr, BufferRecycler bufferRecycler) {
ChunkEncoder enc = ChunkEncoderFactory.optimalNonAllocatingInstance(inputLength, bufferRecycler);
int len = appendEncoded(enc, input, inputPtr, inputLength, outputBuffer, outputPtr);
enc.close();
return len;
}
/**
* Alternate version that accepts pre-allocated output buffer.
*
* Method that will use "safe" {@link ChunkEncoder}, as produced by
* {@link ChunkEncoderFactory#safeInstance}, for encoding. Safe here
* means that it does not use any non-compliant features beyond core JDK.
*/
public static int safeAppendEncoded(byte[] input, int inputPtr, int inputLength,
byte[] outputBuffer, int outputPtr, BufferRecycler bufferRecycler) {
ChunkEncoder enc = ChunkEncoderFactory.safeNonAllocatingInstance(inputLength, bufferRecycler);
int len = appendEncoded(enc, input, inputPtr, inputLength, outputBuffer, outputPtr);
enc.close();
return len;
}
/**
* Alternate version that accepts pre-allocated output buffer.
*/
public static int appendEncoded(ChunkEncoder enc, byte[] input, int inputPtr, int inputLength,
byte[] outputBuffer, int outputPtr)
{
int left = inputLength;
int chunkLen = Math.min(LZFChunk.MAX_CHUNK_LEN, left);
outputPtr = enc.appendEncodedChunk(input, inputPtr, chunkLen, outputBuffer, outputPtr);
left -= chunkLen;
// shortcut: if it all fit in, no need to coalesce:
if (left < 1) {
return outputPtr;
}
// otherwise need to keep on encoding...
inputPtr += chunkLen;
do {
chunkLen = Math.min(left, LZFChunk.MAX_CHUNK_LEN);
outputPtr = enc.appendEncodedChunk(input, inputPtr, chunkLen, outputBuffer, outputPtr);
inputPtr += chunkLen;
left -= chunkLen;
} while (left > 0);
return outputPtr;
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFException.java 0000664 0000000 0000000 00000000656 12373557273 0027765 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import com.ning.compress.CompressionFormatException;
public class LZFException extends CompressionFormatException
{
private static final long serialVersionUID = 1L;
public LZFException(String message) {
super(message);
}
public LZFException(Throwable t) {
super(t);
}
public LZFException(String message, Throwable t) {
super(message, t);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFInputStream.java 0000664 0000000 0000000 00000027042 12373557273 0030300 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
/**
* Decorator {@link InputStream} implementation used for reading compressed data
* and uncompressing it on the fly, such that reads return uncompressed
* data. Its direct counterpart is {@link LZFOutputStream}; but there is
* also {@link LZFCompressingInputStream} which does reverse of this class.
*
* @author Tatu Saloranta
*
* @see com.ning.compress.lzf.util.LZFFileInputStream
* @see com.ning.compress.lzf.LZFCompressingInputStream
*/
public class LZFInputStream extends InputStream
{
/**
* Underlying decoder in use.
*/
protected final ChunkDecoder _decoder;
/**
* Object that handles details of buffer recycling
*/
protected final BufferRecycler _recycler;
/**
* stream to be decompressed
*/
protected final InputStream _inputStream;
/**
* Flag that indicates if we have already called 'inputStream.close()'
* (to avoid calling it multiple times)
*/
protected boolean _inputStreamClosed;
/**
* Flag that indicates whether we force full reads (reading of as many
* bytes as requested), or 'optimal' reads (up to as many as available,
* but at least one). Default is false, meaning that 'optimal' read
* is used.
*/
protected boolean _cfgFullReads = false;
/**
* the current buffer of compressed bytes (from which to decode)
* */
protected byte[] _inputBuffer;
/**
* the buffer of uncompressed bytes from which content is read
* */
protected byte[] _decodedBytes;
/**
* The current position (next char to output) in the uncompressed bytes buffer.
*/
protected int _bufferPosition = 0;
/**
* Length of the current uncompressed bytes buffer
*/
protected int _bufferLength = 0;
/*
///////////////////////////////////////////////////////////////////////
// Construction
///////////////////////////////////////////////////////////////////////
*/
public LZFInputStream(final InputStream inputStream) throws IOException
{
this(inputStream, false);
}
public LZFInputStream(final ChunkDecoder decoder, final InputStream in)
throws IOException
{
this(decoder, in, BufferRecycler.instance(), false);
}
/**
* @param in Underlying input stream to use
* @param fullReads Whether {@link #read(byte[])} should try to read exactly
* as many bytes as requested (true); or just however many happen to be
* available (false)
*/
public LZFInputStream(final InputStream in, boolean fullReads) throws IOException
{
this(ChunkDecoderFactory.optimalInstance(), in, BufferRecycler.instance(), fullReads);
}
public LZFInputStream(final ChunkDecoder decoder, final InputStream in, boolean fullReads)
throws IOException
{
this(decoder, in, BufferRecycler.instance(), fullReads);
}
public LZFInputStream(final InputStream inputStream, final BufferRecycler bufferRecycler) throws IOException
{
this(inputStream, bufferRecycler, false);
}
/**
* @param in Underlying input stream to use
* @param fullReads Whether {@link #read(byte[])} should try to read exactly
* as many bytes as requested (true); or just however many happen to be
* available (false)
* @param bufferRecycler Buffer recycler instance, for usages where the
* caller manages the recycler instances
*/
public LZFInputStream(final InputStream in, final BufferRecycler bufferRecycler, boolean fullReads) throws IOException
{
this(ChunkDecoderFactory.optimalInstance(), in, bufferRecycler, fullReads);
}
public LZFInputStream(final ChunkDecoder decoder, final InputStream in, final BufferRecycler bufferRecycler, boolean fullReads)
throws IOException
{
super();
_decoder = decoder;
_recycler = bufferRecycler;
_inputStream = in;
_inputStreamClosed = false;
_cfgFullReads = fullReads;
_inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
_decodedBytes = bufferRecycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
}
/**
* Method that can be used define whether reads should be "full" or
* "optimal": former means that full compressed blocks are read right
* away as needed, optimal that only smaller chunks are read at a time,
* more being read as needed.
*/
public void setUseFullReads(boolean b) {
_cfgFullReads = b;
}
/*
///////////////////////////////////////////////////////////////////////
// InputStream impl
///////////////////////////////////////////////////////////////////////
*/
/**
* Method is overridden to report number of bytes that can now be read
* from decoded data buffer, without reading bytes from the underlying
* stream.
* Never throws an exception; returns number of bytes available without
* further reads from underlying source; -1 if stream has been closed, or
* 0 if an actual read (and possible blocking) is needed to find out.
*/
@Override
public int available()
{
if (_inputStreamClosed) { // javadocs suggest 0 for closed as well (not -1)
return 0;
}
int left = (_bufferLength - _bufferPosition);
return (left <= 0) ? 0 : left;
}
@Override
public int read() throws IOException
{
if (!readyBuffer()) {
return -1;
}
return _decodedBytes[_bufferPosition++] & 255;
}
@Override
public int read(final byte[] buffer) throws IOException
{
return read(buffer, 0, buffer.length);
}
@Override
public int read(final byte[] buffer, int offset, int length) throws IOException
{
if (length < 1) {
return 0;
}
if (!readyBuffer()) {
return -1;
}
// First let's read however much data we happen to have...
int chunkLength = Math.min(_bufferLength - _bufferPosition, length);
System.arraycopy(_decodedBytes, _bufferPosition, buffer, offset, chunkLength);
_bufferPosition += chunkLength;
if (chunkLength == length || !_cfgFullReads) {
return chunkLength;
}
// Need more data, then
int totalRead = chunkLength;
do {
offset += chunkLength;
if (!readyBuffer()) {
break;
}
chunkLength = Math.min(_bufferLength - _bufferPosition, (length - totalRead));
System.arraycopy(_decodedBytes, _bufferPosition, buffer, offset, chunkLength);
_bufferPosition += chunkLength;
totalRead += chunkLength;
} while (totalRead < length);
return totalRead;
}
@Override
public void close() throws IOException
{
_bufferPosition = _bufferLength = 0;
byte[] buf = _inputBuffer;
if (buf != null) {
_inputBuffer = null;
_recycler.releaseInputBuffer(buf);
}
buf = _decodedBytes;
if (buf != null) {
_decodedBytes = null;
_recycler.releaseDecodeBuffer(buf);
}
if (!_inputStreamClosed) {
_inputStreamClosed = true;
_inputStream.close();
}
}
/**
* Overridden to implement efficient skipping by skipping full chunks whenever
* possible.
*/
@Override
public long skip(long n) throws IOException
{
if (_inputStreamClosed) {
return -1;
}
if (n <= 0L) {
return n;
}
long skipped;
// if any left to skip, just return that for simplicity
if (_bufferPosition < _bufferLength) {
int left = (_bufferLength - _bufferPosition);
if (n <= left) { // small skip, fulfilled from what we already got
_bufferPosition += (int) n;
return n;
}
_bufferPosition = _bufferLength;
skipped = left;
n -= left;
} else {
skipped = 0L;
}
// and then full-chunk skipping, if possible
while (true) {
int amount = _decoder.skipOrDecodeChunk(_inputStream, _inputBuffer, _decodedBytes, n);
if (amount >= 0) { // successful skipping of the chunk
skipped += amount;
n -= amount;
if (n <= 0L) {
return skipped;
}
continue;
}
if (amount == -1) { // EOF
close();
return skipped;
}
// decoded buffer-full, more than max skip
_bufferLength = -(amount+1);
skipped += n;
_bufferPosition = (int) n;
return skipped;
}
}
/*
///////////////////////////////////////////////////////////////////////
// Extended public API
///////////////////////////////////////////////////////////////////////
*/
/**
* Method that can be used to find underlying {@link InputStream} that
* we read from to get LZF encoded data to decode.
* Will never return null; although underlying stream may be closed
* (if this stream has been closed).
*/
public InputStream getUnderlyingInputStream() {
return _inputStream;
}
/**
* Method that can be called to discard any already buffered input, read
* from input source.
* Specialized method that only makes sense if the underlying {@link InputStream}
* can be repositioned reliably.
*/
public void discardBuffered()
{
_bufferPosition = _bufferLength = 0;
}
/**
* Convenience method that will read and uncompress all data available,
* and write it using given {@link OutputStream}. This avoids having to
* make an intermediate copy of uncompressed data which would be needed
* when doing the same manually.
*
* @param out OutputStream to use for writing content
*
* @return Number of bytes written (uncompressed)
*/
public int readAndWrite(OutputStream out) throws IOException
{
int total = 0;
while (readyBuffer()) {
int avail = _bufferLength - _bufferPosition;
out.write(_decodedBytes, _bufferPosition, avail);
_bufferPosition += avail; // to ensure it looks like we consumed it all
total += avail;
}
return total;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Fill the uncompressed bytes buffer by reading the underlying inputStream.
*
* @throws IOException
*
* @return True if there is now at least one byte to read in the buffer; false
* if there is no more content to read
*/
protected boolean readyBuffer() throws IOException
{
if (_bufferPosition < _bufferLength) {
return true;
}
if (_inputStreamClosed) {
return false;
}
_bufferLength = _decoder.decodeChunk(_inputStream, _inputBuffer, _decodedBytes);
if (_bufferLength < 0) {
close();
return false;
}
_bufferPosition = 0;
return (_bufferPosition < _bufferLength);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFOutputStream.java 0000664 0000000 0000000 00000022362 12373557273 0030501 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.nio.channels.WritableByteChannel;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.util.ChunkEncoderFactory;
/**
* Decorator {@link OutputStream} implementation that will compress
* output using LZF compression algorithm, given uncompressed input
* to write. Its counterpart is {@link LZFInputStream}; although
* in some ways {@link LZFCompressingInputStream} can be seen
* as the opposite.
*
* @author jon hartlaub
* @author Tatu Saloranta
*
* @see LZFInputStream
* @see LZFCompressingInputStream
*/
public class LZFOutputStream extends FilterOutputStream implements WritableByteChannel
{
private static final int DEFAULT_OUTPUT_BUFFER_SIZE = LZFChunk.MAX_CHUNK_LEN;
private final ChunkEncoder _encoder;
private final BufferRecycler _recycler;
protected byte[] _outputBuffer;
protected int _position = 0;
/**
* Configuration setting that governs whether basic 'flush()' should
* first complete a block or not.
*
* Default value is 'true'
*/
protected boolean _cfgFinishBlockOnFlush = true;
/**
* Flag that indicates if we have already called '_outputStream.close()'
* (to avoid calling it multiple times)
*/
protected boolean _outputStreamClosed;
/*
///////////////////////////////////////////////////////////////////////
// Construction, configuration
///////////////////////////////////////////////////////////////////////
*/
public LZFOutputStream(final OutputStream outputStream)
{
this(ChunkEncoderFactory.optimalInstance(DEFAULT_OUTPUT_BUFFER_SIZE), outputStream);
}
public LZFOutputStream(final ChunkEncoder encoder, final OutputStream outputStream)
{
this(encoder, outputStream, DEFAULT_OUTPUT_BUFFER_SIZE, encoder._recycler);
}
public LZFOutputStream(final OutputStream outputStream, final BufferRecycler bufferRecycler)
{
this(ChunkEncoderFactory.optimalInstance(bufferRecycler), outputStream, bufferRecycler);
}
public LZFOutputStream(final ChunkEncoder encoder, final OutputStream outputStream, final BufferRecycler bufferRecycler)
{
this(encoder, outputStream, DEFAULT_OUTPUT_BUFFER_SIZE, bufferRecycler);
}
public LZFOutputStream(final ChunkEncoder encoder, final OutputStream outputStream,
final int bufferSize, BufferRecycler bufferRecycler)
{
super(outputStream);
_encoder = encoder;
if (bufferRecycler==null) {
bufferRecycler = _encoder._recycler;
}
_recycler = bufferRecycler;
_outputBuffer = bufferRecycler.allocOutputBuffer(bufferSize);
_outputStreamClosed = false;
}
/**
* Method for defining whether call to {@link #flush} will also complete
* current block (similar to calling {@link #finishBlock()}) or not.
*/
public LZFOutputStream setFinishBlockOnFlush(boolean b) {
_cfgFinishBlockOnFlush = b;
return this;
}
/*
///////////////////////////////////////////////////////////////////////
// OutputStream impl
///////////////////////////////////////////////////////////////////////
*/
@Override
public void write(final int singleByte) throws IOException
{
checkNotClosed();
if (_position >= _outputBuffer.length) {
writeCompressedBlock();
}
_outputBuffer[_position++] = (byte) singleByte;
}
@Override
public void write(final byte[] buffer, int offset, int length) throws IOException
{
checkNotClosed();
final int BUFFER_LEN = _outputBuffer.length;
// simple case first: empty _outputBuffer and "big" input buffer: write first full blocks, if any, without copying
while (_position == 0 && length >= BUFFER_LEN) {
_encoder.encodeAndWriteChunk(buffer, offset, BUFFER_LEN, out);
offset += BUFFER_LEN;
length -= BUFFER_LEN;
}
// simple case first: buffering only (for trivially short writes)
int free = BUFFER_LEN - _position;
if (free > length) {
System.arraycopy(buffer, offset, _outputBuffer, _position, length);
_position += length;
return;
}
// otherwise, copy whatever we can, flush
System.arraycopy(buffer, offset, _outputBuffer, _position, free);
offset += free;
length -= free;
_position += free;
writeCompressedBlock();
// then write intermediate full blocks, if any, without copying:
while (length >= BUFFER_LEN) {
_encoder.encodeAndWriteChunk(buffer, offset, BUFFER_LEN, out);
offset += BUFFER_LEN;
length -= BUFFER_LEN;
}
// and finally, copy leftovers in buffer, if any
if (length > 0) {
System.arraycopy(buffer, offset, _outputBuffer, 0, length);
}
_position = length;
}
public void write(final InputStream in) throws IOException {
writeCompressedBlock(); // will flush _outputBuffer
int read;
while ((read = in.read(_outputBuffer)) >= 0) {
_position = read;
writeCompressedBlock();
}
}
public void write(final FileChannel in) throws IOException {
MappedByteBuffer src = in.map(MapMode.READ_ONLY, 0, in.size());
write(src);
}
@Override
public synchronized int write(final ByteBuffer src) throws IOException {
int r = src.remaining();
if (r <= 0) {
return r;
}
writeCompressedBlock(); // will flush _outputBuffer
if (src.hasArray()) {
// direct compression from backing array
write(src.array(), src.arrayOffset(), src.limit() - src.arrayOffset());
} else {
// need to copy to heap array first
while (src.hasRemaining()) {
int toRead = Math.min(src.remaining(), _outputBuffer.length);
src.get(_outputBuffer, 0, toRead);
_position = toRead;
writeCompressedBlock();
}
}
return r;
}
@Override
public void flush() throws IOException
{
checkNotClosed();
if (_cfgFinishBlockOnFlush && _position > 0) {
writeCompressedBlock();
}
super.flush();
}
@Override
public boolean isOpen() {
return ! _outputStreamClosed;
}
@Override
public void close() throws IOException
{
if (!_outputStreamClosed) {
if (_position > 0) {
writeCompressedBlock();
}
super.close(); // will flush beforehand
_encoder.close();
_outputStreamClosed = true;
byte[] buf = _outputBuffer;
if (buf != null) {
_outputBuffer = null;
_recycler.releaseOutputBuffer(buf);
}
}
}
/*
///////////////////////////////////////////////////////////////////////
// Additional public methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Method that can be used to find underlying {@link OutputStream} that
* we write encoded LZF encoded data into, after compressing it.
* Will never return null; although underlying stream may be closed
* (if this stream has been closed).
*/
public OutputStream getUnderlyingOutputStream() {
return out;
}
/**
* Accessor for checking whether call to "flush()" will first finish the
* current block or not.
*/
public boolean getFinishBlockOnFlush() {
return _cfgFinishBlockOnFlush;
}
/**
* Method that can be used to force completion of the current block,
* which means that all buffered data will be compressed into an
* LZF block. This typically results in lower compression ratio
* as larger blocks compress better; but may be necessary for
* network connections to ensure timely sending of data.
*/
public LZFOutputStream finishBlock() throws IOException
{
checkNotClosed();
if (_position > 0) {
writeCompressedBlock();
}
return this;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Compress and write the current block to the OutputStream
*/
protected void writeCompressedBlock() throws IOException
{
int left = _position;
_position = 0;
int offset = 0;
while (left > 0) {
int chunkLen = Math.min(LZFChunk.MAX_CHUNK_LEN, left);
_encoder.encodeAndWriteChunk(_outputBuffer, offset, chunkLen, out);
offset += chunkLen;
left -= chunkLen;
}
}
protected void checkNotClosed() throws IOException
{
if (_outputStreamClosed) {
throw new IOException(getClass().getName()+" already closed");
}
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFUncompressor.java 0000664 0000000 0000000 00000031055 12373557273 0030523 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.IOException;
import com.ning.compress.BufferRecycler;
import com.ning.compress.DataHandler;
import com.ning.compress.Uncompressor;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
/**
* {@link com.ning.compress.Uncompressor} implementation for uncompressing
* LZF encoded data in "push" mode, in which input is not
* read using {@link java.io.InputStream} but rather pushed to
* uncompressor in variable length chunks.
*/
public class LZFUncompressor extends Uncompressor
{
/*
///////////////////////////////////////////////////////////////////////
// State constants
///////////////////////////////////////////////////////////////////////
*/
/**
* State in which a new block or end-of-stream is expected.
*/
protected final static int STATE_INITIAL = 0;
protected final static int STATE_HEADER_Z_GOTTEN = 1;
protected final static int STATE_HEADER_ZV_GOTTEN = 2;
protected final static int STATE_HEADER_COMPRESSED_0 = 3;
protected final static int STATE_HEADER_COMPRESSED_1 = 4;
protected final static int STATE_HEADER_COMPRESSED_2 = 5;
protected final static int STATE_HEADER_COMPRESSED_3 = 6;
protected final static int STATE_HEADER_COMPRESSED_BUFFERING = 7;
protected final static int STATE_HEADER_UNCOMPRESSED_0 = 8;
protected final static int STATE_HEADER_UNCOMPRESSED_1 = 9;
protected final static int STATE_HEADER_UNCOMPRESSED_STREAMING = 10;
/*
///////////////////////////////////////////////////////////////////////
// Configuration, helper objects
///////////////////////////////////////////////////////////////////////
*/
/**
* Handler that will receive uncompressed data.
*/
protected final DataHandler _handler;
/**
* Underlying decompressor we use for chunk decompression.
*/
protected final ChunkDecoder _decoder;
protected final BufferRecycler _recycler;
/*
///////////////////////////////////////////////////////////////////////
// Decoder state
///////////////////////////////////////////////////////////////////////
*/
/**
* Current decoding state, which determines meaning of following byte(s).
*/
protected int _state = STATE_INITIAL;
/**
* Flag set if {@link DataHandler} indicates that processing should be
* terminated.
*/
protected boolean _terminated;
/**
* Number of bytes in current, compressed block
*/
protected int _compressedLength;
/**
* Number of bytes from current block, either after uncompressing data
* (for compressed blocks), or included in stream (for uncompressed).
*/
protected int _uncompressedLength;
/**
* Buffer in which compressed input is buffered if necessary, to get
* full chunks for decoding.
*/
protected byte[] _inputBuffer;
/**
* Buffer used for data uncompressed from
* Credits for the idea go to Dain Sundstrom, who kindly suggested this use,
* and is all-around great source for optimization tips and tricks.
* Big thanks also to LZ4-java developers, whose stellar performance made
* me go back and see what more I can do to optimize this code!
*/
@SuppressWarnings("restriction")
public class UnsafeChunkDecoder extends ChunkDecoder
{
private static final Unsafe unsafe;
static {
try {
Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
unsafe = (Unsafe) theUnsafe.get(null);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
private static final long BYTE_ARRAY_OFFSET = unsafe.arrayBaseOffset(byte[].class);
// private static final long SHORT_ARRAY_OFFSET = unsafe.arrayBaseOffset(short[].class);
// private static final long SHORT_ARRAY_STRIDE = unsafe.arrayIndexScale(short[].class);
public UnsafeChunkDecoder() { }
@Override
public final int decodeChunk(final InputStream is, final byte[] inputBuffer, final byte[] outputBuffer)
throws IOException
{
/* note: we do NOT read more than 5 bytes because otherwise might need to shuffle bytes
* for output buffer (could perhaps optimize in future?)
*/
int bytesRead = readHeader(is, inputBuffer);
if ((bytesRead < HEADER_BYTES)
|| inputBuffer[0] != LZFChunk.BYTE_Z || inputBuffer[1] != LZFChunk.BYTE_V) {
if (bytesRead == 0) { // probably fine, clean EOF
return -1;
}
_reportCorruptHeader();
}
int type = inputBuffer[2];
int compLen = uint16(inputBuffer, 3);
if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
readFully(is, false, outputBuffer, 0, compLen);
return compLen;
}
// compressed
readFully(is, true, inputBuffer, 0, 2+compLen); // first 2 bytes are uncompressed length
int uncompLen = uint16(inputBuffer, 0);
decodeChunk(inputBuffer, 2, outputBuffer, 0, uncompLen);
return uncompLen;
}
@Override
public final void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd)
throws LZFException
{
// We need to take care of end condition, leave last 32 bytes out
final int outputEnd8 = outEnd - 8;
final int outputEnd32 = outEnd - 32;
main_loop:
do {
int ctrl = in[inPos++] & 255;
while (ctrl < LZFChunk.MAX_LITERAL) { // literal run(s)
if (outPos > outputEnd32) {
System.arraycopy(in, inPos, out, outPos, ctrl+1);
} else {
copyUpTo32(in, inPos, out, outPos, ctrl);
}
++ctrl;
inPos += ctrl;
outPos += ctrl;
if (outPos >= outEnd) {
break main_loop;
}
ctrl = in[inPos++] & 255;
}
// back reference
int len = ctrl >> 5;
ctrl = -((ctrl & 0x1f) << 8) - 1;
// short back reference? 2 bytes; run lengths of 2 - 8 bytes
if (len < 7) {
ctrl -= in[inPos++] & 255;
if (ctrl < -7 && outPos < outputEnd8) { // non-overlapping? can use efficient bulk copy
final long rawOffset = BYTE_ARRAY_OFFSET + outPos;
unsafe.putLong(out, rawOffset, unsafe.getLong(out, rawOffset + ctrl));
// moveLong(out, outPos, outEnd, ctrl);
outPos += len+2;
continue;
}
// otherwise, byte-by-byte
outPos = copyOverlappingShort(out, outPos, ctrl, len);
continue;
}
// long back reference: 3 bytes, length of up to 264 bytes
len = (in[inPos++] & 255) + 9;
ctrl -= in[inPos++] & 255;
// First: ovelapping case can't use default handling, off line.
if ((ctrl > -9) || (outPos > outputEnd32)) {
outPos = copyOverlappingLong(out, outPos, ctrl, len-9);
continue;
}
// but non-overlapping is simple
if (len <= 32) {
copyUpTo32(out, outPos+ctrl, outPos, len-1);
outPos += len;
continue;
}
copyLong(out, outPos+ctrl, outPos, len, outputEnd32);
outPos += len;
} while (outPos < outEnd);
// sanity check to guard against corrupt data:
if (outPos != outEnd) {
throw new LZFException("Corrupt data: overrun in decompress, input offset "+inPos+", output offset "+outPos);
}
}
@Override
public int skipOrDecodeChunk(final InputStream is, final byte[] inputBuffer,
final byte[] outputBuffer, final long maxToSkip)
throws IOException
{
int bytesRead = readHeader(is, inputBuffer);
if ((bytesRead < HEADER_BYTES)
|| inputBuffer[0] != LZFChunk.BYTE_Z || inputBuffer[1] != LZFChunk.BYTE_V) {
if (bytesRead == 0) { // probably fine, clean EOF
return -1;
}
_reportCorruptHeader();
}
int type = inputBuffer[2];
int compLen = uint16(inputBuffer, 3);
if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed, simple
if (compLen <= maxToSkip) {
skipFully(is, compLen);
return compLen;
}
readFully(is, false, outputBuffer, 0, compLen);
return -(compLen+1);
}
// compressed: need 2 more bytes to know uncompressed length...
readFully(is, true, inputBuffer, 0, 2);
int uncompLen = uint16(inputBuffer, 0);
// can we just skip it wholesale?
if (uncompLen <= maxToSkip) { // awesome: skip N physical compressed bytes, which mean M logical (uncomp) bytes
skipFully(is, compLen);
return uncompLen;
}
// otherwise, read and uncompress the chunk normally
readFully(is, true, inputBuffer, 2, compLen); // first 2 bytes are uncompressed length
decodeChunk(inputBuffer, 2, outputBuffer, 0, uncompLen);
return -(uncompLen+1);
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
private final int copyOverlappingShort(final byte[] out, int outPos, final int offset, int len)
{
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
switch (len) {
case 6:
out[outPos] = out[outPos++ + offset];
case 5:
out[outPos] = out[outPos++ + offset];
case 4:
out[outPos] = out[outPos++ + offset];
case 3:
out[outPos] = out[outPos++ + offset];
case 2:
out[outPos] = out[outPos++ + offset];
case 1:
out[outPos] = out[outPos++ + offset];
}
return outPos;
}
private final static int copyOverlappingLong(final byte[] out, int outPos, final int offset, int len)
{
// otherwise manual copy: so first just copy 9 bytes we know are needed
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
// then loop
// Odd: after extensive profiling, looks like magic number
// for unrolling is 4: with 8 performance is worse (even
// bit less than with no unrolling).
len += outPos;
final int end = len - 3;
while (outPos < end) {
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
out[outPos] = out[outPos++ + offset];
}
switch (len - outPos) {
case 3:
out[outPos] = out[outPos++ + offset];
case 2:
out[outPos] = out[outPos++ + offset];
case 1:
out[outPos] = out[outPos++ + offset];
}
return outPos;
}
private final static void copyUpTo32(byte[] buffer, int inputIndex, int outputIndex, int lengthMinusOne)
{
long inPtr = BYTE_ARRAY_OFFSET + inputIndex;
long outPtr = BYTE_ARRAY_OFFSET + outputIndex;
unsafe.putLong(buffer, outPtr, unsafe.getLong(buffer, inPtr));
if (lengthMinusOne > 7) {
inPtr += 8;
outPtr += 8;
unsafe.putLong(buffer, outPtr, unsafe.getLong(buffer, inPtr));
if (lengthMinusOne > 15) {
inPtr += 8;
outPtr += 8;
unsafe.putLong(buffer, outPtr, unsafe.getLong(buffer, inPtr));
if (lengthMinusOne > 23) {
inPtr += 8;
outPtr += 8;
unsafe.putLong(buffer, outPtr, unsafe.getLong(buffer, inPtr));
}
}
}
}
private final static void copyUpTo32(byte[] in, int inputIndex, byte[] out, int outputIndex, int lengthMinusOne)
{
long inPtr = BYTE_ARRAY_OFFSET + inputIndex;
long outPtr = BYTE_ARRAY_OFFSET + outputIndex;
unsafe.putLong(out, outPtr, unsafe.getLong(in, inPtr));
if (lengthMinusOne > 7) {
inPtr += 8;
outPtr += 8;
unsafe.putLong(out, outPtr, unsafe.getLong(in, inPtr));
if (lengthMinusOne > 15) {
inPtr += 8;
outPtr += 8;
unsafe.putLong(out, outPtr, unsafe.getLong(in, inPtr));
if (lengthMinusOne > 23) {
inPtr += 8;
outPtr += 8;
unsafe.putLong(out, outPtr, unsafe.getLong(in, inPtr));
}
}
}
}
private final static void copyLong(byte[] buffer, int inputIndex, int outputIndex, int length,
int outputEnd8)
{
if ((outputIndex + length) > outputEnd8) {
copyLongTail(buffer, inputIndex,outputIndex, length);
return;
}
long inPtr = BYTE_ARRAY_OFFSET + inputIndex;
long outPtr = BYTE_ARRAY_OFFSET + outputIndex;
while (length >= 8) {
unsafe.putLong(buffer, outPtr, unsafe.getLong(buffer, inPtr));
inPtr += 8;
outPtr += 8;
length -= 8;
}
if (length > 4) {
unsafe.putLong(buffer, outPtr, unsafe.getLong(buffer, inPtr));
} else if (length > 0) {
unsafe.putInt(buffer, outPtr, unsafe.getInt(buffer, inPtr));
}
}
private final static void copyLongTail(byte[] buffer, int inputIndex, int outputIndex, int length)
{
for (final int inEnd = inputIndex + length; inputIndex < inEnd; ) {
buffer[outputIndex++] = buffer[inputIndex++];
}
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/UnsafeChunkEncoder.java 0000664 0000000 0000000 00000013524 12373557273 0032124 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.impl;
import com.ning.compress.BufferRecycler;
import java.lang.reflect.Field;
import sun.misc.Unsafe;
import com.ning.compress.lzf.ChunkEncoder;
import com.ning.compress.lzf.LZFChunk;
/**
* {@link ChunkEncoder} implementation that handles actual encoding of individual chunks,
* using Sun's
* This class uses a parallel implementation to make use of all available cores,
* modulo system load.
*
* @author Tatu Saloranta
* @author Cédrik Lime
*
* @see com.ning.compress.lzf.LZFInputStream
* @see com.ning.compress.lzf.LZFCompressingInputStream
* @see com.ning.compress.lzf.LZFOutputStream
*/
public class PLZFOutputStream extends FilterOutputStream implements WritableByteChannel
{
private static final int DEFAULT_OUTPUT_BUFFER_SIZE = LZFChunk.MAX_CHUNK_LEN;
protected byte[] _outputBuffer;
protected int _position = 0;
/**
* Flag that indicates if we have already called '_outputStream.close()'
* (to avoid calling it multiple times)
*/
protected boolean _outputStreamClosed;
private BlockManager blockManager;
private final ExecutorService compressExecutor;
private final ExecutorService writeExecutor;
volatile Exception writeException = null;
/*
///////////////////////////////////////////////////////////////////////
// Construction, configuration
///////////////////////////////////////////////////////////////////////
*/
public PLZFOutputStream(final OutputStream outputStream) {
this(outputStream, DEFAULT_OUTPUT_BUFFER_SIZE, getNThreads());
}
protected PLZFOutputStream(final OutputStream outputStream, int nThreads) {
this(outputStream, DEFAULT_OUTPUT_BUFFER_SIZE, nThreads);
}
protected PLZFOutputStream(final OutputStream outputStream, final int bufferSize, int nThreads) {
super(outputStream);
_outputStreamClosed = false;
compressExecutor = new ThreadPoolExecutor(nThreads, nThreads, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue
The main abstraction to use is {@link com.ning.compress.lzf.parallel.PLZFOutputStream},
which orchestrates operation of multi-thread compression.
*/
package com.ning.compress.lzf.parallel;
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/util/ 0000775 0000000 0000000 00000000000 12373557273 0025556 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/util/ChunkDecoderFactory.java 0000664 0000000 0000000 00000004546 12373557273 0032320 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.util;
import com.ning.compress.lzf.ChunkDecoder;
import com.ning.compress.lzf.impl.VanillaChunkDecoder;
import com.ning.compress.lzf.impl.UnsafeChunkDecoder;
/**
* Simple helper class used for loading
* {@link ChunkDecoder} implementations, based on criteria
* such as "fastest available".
*
* Yes, it looks butt-ugly, but does the job. Nonetheless, if anyone
* has lipstick for this pig, let me know.
*/
public class ChunkDecoderFactory
{
private final static ChunkDecoderFactory _instance;
static {
Class> impl = null;
try {
// first, try loading optimal one, which uses Sun JDK Unsafe...
impl = (Class>) Class.forName(UnsafeChunkDecoder.class.getName());
} catch (Throwable t) { }
if (impl == null) {
impl = VanillaChunkDecoder.class;
}
_instance = new ChunkDecoderFactory(impl);
}
private final Class extends ChunkDecoder> _implClass;
@SuppressWarnings("unchecked")
private ChunkDecoderFactory(Class> imp)
{
_implClass = (Class extends ChunkDecoder>) imp;
}
/*
///////////////////////////////////////////////////////////////////////
// Public API
///////////////////////////////////////////////////////////////////////
*/
/**
* Method to use for getting decoder instance that uses the most optimal
* available methods for underlying data access. It should be safe to call
* this method as implementations are dynamically loaded; however, on some
* non-standard platforms it may be necessary to either directly load
* instances, or use {@link #safeInstance()}.
*/
public static ChunkDecoder optimalInstance() {
try {
return _instance._implClass.newInstance();
} catch (Exception e) {
throw new IllegalStateException("Failed to load a ChunkDecoder instance ("+e.getClass().getName()+"): "
+e.getMessage(), e);
}
}
/**
* Method that can be used to ensure that a "safe" decoder instance is loaded.
* Safe here means that it should work on any and all Java platforms.
*/
public static ChunkDecoder safeInstance() {
// this will always succeed loading; no need to use dynamic class loading or instantiation
return new VanillaChunkDecoder();
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/util/ChunkEncoderFactory.java 0000664 0000000 0000000 00000014704 12373557273 0032327 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.util;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.ChunkEncoder;
import com.ning.compress.lzf.LZFChunk;
import com.ning.compress.lzf.impl.UnsafeChunkEncoders;
import com.ning.compress.lzf.impl.VanillaChunkEncoder;
/**
* Simple helper class used for loading
* {@link ChunkEncoder} implementations, based on criteria
* such as "fastest available" or "safe to run anywhere".
*/
public class ChunkEncoderFactory
{
/*
///////////////////////////////////////////////////////////////////////
// Public API
///////////////////////////////////////////////////////////////////////
*/
/**
* Convenience method, equivalent to:
*
* Uses a ThreadLocal soft-referenced BufferRecycler instance.
*/
public static ChunkEncoder optimalNonAllocatingInstance(int totalLength) {
try {
return UnsafeChunkEncoders.createNonAllocatingEncoder(totalLength);
} catch (Exception e) {
return safeNonAllocatingInstance(totalLength);
}
}
/**
* Convenience method, equivalent to:
*
* Uses a ThreadLocal soft-referenced BufferRecycler instance.
*
* @param totalLength Expected total length of content to compress; only matters
* for content that is smaller than maximum chunk size (64k), to optimize
* encoding hash tables
*/
public static ChunkEncoder safeInstance(int totalLength) {
return new VanillaChunkEncoder(totalLength);
}
/**
* Factory method for constructing encoder that is always passed buffer
* externally, so that it will not (nor need) allocate encoding buffer.
* Uses a ThreadLocal soft-referenced BufferRecycler instance.
*/
public static ChunkEncoder safeNonAllocatingInstance(int totalLength) {
return VanillaChunkEncoder.nonAllocatingEncoder(totalLength);
}
/**
* Convenience method, equivalent to:
*
* Note that use of this class is not recommended unless you absolutely must
* use a {@link FileInputStream} instance; otherwise basic {@link LZFInputStream}
* (which uses aggregation for underlying streams) is more appropriate
*
*
* Implementation note: much of the code is just copied from {@link LZFInputStream},
* so care must be taken to keep implementations in sync if there are fixes.
*/
public class LZFFileInputStream
extends FileInputStream
{
/**
* Underlying decoder in use.
*/
protected final ChunkDecoder _decompressor;
/**
* Object that handles details of buffer recycling
*/
protected final BufferRecycler _recycler;
/**
* Flag that indicates if we have already called 'inputStream.close()'
* (to avoid calling it multiple times)
*/
protected boolean _inputStreamClosed;
/**
* Flag that indicates whether we force full reads (reading of as many
* bytes as requested), or 'optimal' reads (up to as many as available,
* but at least one). Default is false, meaning that 'optimal' read
* is used.
*/
protected boolean _cfgFullReads = false;
/**
* the current buffer of compressed bytes (from which to decode)
* */
protected byte[] _inputBuffer;
/**
* the buffer of uncompressed bytes from which content is read
* */
protected byte[] _decodedBytes;
/**
* The current position (next char to output) in the uncompressed bytes buffer.
* */
protected int _bufferPosition = 0;
/**
* Length of the current uncompressed bytes buffer
* */
protected int _bufferLength = 0;
/**
* Wrapper object we use to allow decoder to read directly from the
* stream, without ending in infinite loop...
*/
protected final Wrapper _wrapper;
/*
///////////////////////////////////////////////////////////////////////
// Construction, configuration
///////////////////////////////////////////////////////////////////////
*/
public LZFFileInputStream(File file) throws FileNotFoundException {
this(file, ChunkDecoderFactory.optimalInstance(), BufferRecycler.instance());
}
public LZFFileInputStream(FileDescriptor fdObj) {
this(fdObj, ChunkDecoderFactory.optimalInstance(), BufferRecycler.instance());
}
public LZFFileInputStream(String name) throws FileNotFoundException {
this(name, ChunkDecoderFactory.optimalInstance(), BufferRecycler.instance());
}
public LZFFileInputStream(File file, ChunkDecoder decompressor) throws FileNotFoundException
{
this(file, decompressor, BufferRecycler.instance());
}
public LZFFileInputStream(FileDescriptor fdObj, ChunkDecoder decompressor)
{
this(fdObj, decompressor, BufferRecycler.instance());
}
public LZFFileInputStream(String name, ChunkDecoder decompressor) throws FileNotFoundException
{
this(name, decompressor, BufferRecycler.instance());
}
public LZFFileInputStream(File file, ChunkDecoder decompressor, BufferRecycler bufferRecycler) throws FileNotFoundException
{
super(file);
_decompressor = decompressor;
_recycler = bufferRecycler;
_inputStreamClosed = false;
_inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
_decodedBytes = bufferRecycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
_wrapper = new Wrapper();
}
public LZFFileInputStream(FileDescriptor fdObj, ChunkDecoder decompressor, BufferRecycler bufferRecycler)
{
super(fdObj);
_decompressor = decompressor;
_recycler = bufferRecycler;
_inputStreamClosed = false;
_inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
_decodedBytes = bufferRecycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
_wrapper = new Wrapper();
}
public LZFFileInputStream(String name, ChunkDecoder decompressor, BufferRecycler bufferRecycler) throws FileNotFoundException
{
super(name);
_decompressor = decompressor;
_recycler = bufferRecycler;
_inputStreamClosed = false;
_inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
_decodedBytes = bufferRecycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
_wrapper = new Wrapper();
}
/**
* Method that can be used define whether reads should be "full" or
* "optimal": former means that full compressed blocks are read right
* away as needed, optimal that only smaller chunks are read at a time,
* more being read as needed.
*/
public void setUseFullReads(boolean b) {
_cfgFullReads = b;
}
/*
///////////////////////////////////////////////////////////////////////
// FileInputStream overrides
///////////////////////////////////////////////////////////////////////
*/
@Override
public int available()
{
if (_inputStreamClosed) { // javadocs suggest 0 for closed as well (not -1)
return 0;
}
int left = (_bufferLength - _bufferPosition);
return (left <= 0) ? 0 : left;
}
@Override
public void close() throws IOException
{
_bufferPosition = _bufferLength = 0;
byte[] buf = _inputBuffer;
if (buf != null) {
_inputBuffer = null;
_recycler.releaseInputBuffer(buf);
}
buf = _decodedBytes;
if (buf != null) {
_decodedBytes = null;
_recycler.releaseDecodeBuffer(buf);
}
if (!_inputStreamClosed) {
_inputStreamClosed = true;
super.close();
}
}
// fine as is: don't override
// public FileChannel getChannel();
// final, can't override:
//public FileDescriptor getFD();
@Override
public int read() throws IOException
{
if (!readyBuffer()) {
return -1;
}
return _decodedBytes[_bufferPosition++] & 255;
}
@Override
public int read(byte[] b) throws IOException
{
return read(b, 0, b.length);
}
@Override
public int read(byte[] buffer, int offset, int length) throws IOException
{
if (!readyBuffer()) {
return -1;
}
if (length < 1) {
return 0;
}
// First let's read however much data we happen to have...
int chunkLength = Math.min(_bufferLength - _bufferPosition, length);
System.arraycopy(_decodedBytes, _bufferPosition, buffer, offset, chunkLength);
_bufferPosition += chunkLength;
if (chunkLength == length || !_cfgFullReads) {
return chunkLength;
}
// Need more data, then
int totalRead = chunkLength;
do {
offset += chunkLength;
if (!readyBuffer()) {
break;
}
chunkLength = Math.min(_bufferLength - _bufferPosition, (length - totalRead));
System.arraycopy(_decodedBytes, _bufferPosition, buffer, offset, chunkLength);
_bufferPosition += chunkLength;
totalRead += chunkLength;
} while (totalRead < length);
return totalRead;
}
/**
* Overridden to just skip at most a single chunk at a time
*/
/*
@Override
public long skip(long n) throws IOException
{
if (!readyBuffer()) {
return -1L;
}
int left = (_bufferLength - _bufferPosition);
// either way, just skip whatever we have decoded
if (left > n) {
left = (int) n;
}
_bufferPosition += left;
return left;
}
*/
/**
* Overridden to implement efficient skipping by skipping full chunks whenever
* possible.
*/
@Override
public long skip(long n) throws IOException
{
if (_inputStreamClosed) {
return -1;
}
if (n <= 0L) {
return n;
}
long skipped;
// if any left to skip, just return that for simplicity
if (_bufferPosition < _bufferLength) {
int left = (_bufferLength - _bufferPosition);
if (n <= left) { // small skip, fulfilled from what we already got
_bufferPosition += (int) n;
return n;
}
_bufferPosition = _bufferLength;
skipped = left;
n -= left;
} else {
skipped = 0L;
}
// and then full-chunk skipping, if possible
while (true) {
int amount = _decompressor.skipOrDecodeChunk(_wrapper, _inputBuffer, _decodedBytes, n);
if (amount >= 0) { // successful skipping of the chunk
skipped += amount;
n -= amount;
if (n <= 0L) {
return skipped;
}
continue;
}
if (amount == -1) { // EOF
close();
return skipped;
}
// decoded buffer-full, more than max skip
_bufferLength = -(amount+1);
skipped += n;
_bufferPosition = (int) n;
return skipped;
}
}
/*
///////////////////////////////////////////////////////////////////////
// Extended public API
///////////////////////////////////////////////////////////////////////
*/
/**
* Convenience method that will read and uncompress all data available,
* and write it using given {@link OutputStream}. This avoids having to
* make an intermediate copy of uncompressed data which would be needed
* when doing the same manually.
*
* @param out OutputStream to use for writing content
*
* @return Number of bytes written (uncompressed)
*/
public int readAndWrite(OutputStream out) throws IOException
{
int total = 0;
while (readyBuffer()) {
int avail = _bufferLength - _bufferPosition;
out.write(_decodedBytes, _bufferPosition, avail);
_bufferPosition += avail; // to ensure it looks like we consumed it all
total += avail;
}
return total;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Fill the uncompressed bytes buffer by reading the underlying inputStream.
* @throws IOException
*/
protected boolean readyBuffer() throws IOException
{
if (_inputStreamClosed) {
throw new IOException("Input stream closed");
}
if (_bufferPosition < _bufferLength) {
return true;
}
_bufferLength = _decompressor.decodeChunk(_wrapper, _inputBuffer, _decodedBytes);
if (_bufferLength < 0) {
close();
return false;
}
_bufferPosition = 0;
return (_bufferPosition < _bufferLength);
}
protected final int readRaw(byte[] buffer, int offset, int length) throws IOException {
return super.read(buffer, offset, length);
}
protected final long skipRaw(long amount) throws IOException {
return super.skip(amount);
}
/*
///////////////////////////////////////////////////////////////////////
// Helper class(es)
///////////////////////////////////////////////////////////////////////
*/
/**
* This simple wrapper is needed to re-route read calls so that they will
* use "raw" reads
*/
private final class Wrapper extends InputStream
{
@Override
public void close() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int read() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int read(byte[] buffer, int offset, int length) throws IOException {
return readRaw(buffer, offset, length);
}
@Override
public int read(byte[] buffer) throws IOException {
return readRaw(buffer, 0, buffer.length);
}
@Override
public long skip(long n) throws IOException {
return skipRaw(n);
}
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/util/LZFFileOutputStream.java 0000664 0000000 0000000 00000031600 12373557273 0032251 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.util;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.nio.channels.WritableByteChannel;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.ChunkEncoder;
import com.ning.compress.lzf.LZFChunk;
import com.ning.compress.lzf.LZFOutputStream;
/**
* Helper class that allows use of LZF compression even if a library requires
* use of {@link FileOutputStream}.
*
* Note that use of this class is not recommended unless you absolutely must
* use a {@link FileOutputStream} instance; otherwise basic {@link LZFOutputStream}
* (which uses aggregation for underlying streams) is more appropriate
*
* Implementation note: much of the code is just copied from {@link LZFOutputStream},
* so care must be taken to keep implementations in sync if there are fixes.
*/
public class LZFFileOutputStream extends FileOutputStream implements WritableByteChannel
{
private static final int OUTPUT_BUFFER_SIZE = LZFChunk.MAX_CHUNK_LEN;
private final ChunkEncoder _encoder;
private final BufferRecycler _recycler;
protected byte[] _outputBuffer;
protected int _position = 0;
/**
* Configuration setting that governs whether basic 'flush()' should
* first complete a block or not.
*
* Default value is 'true'.
*/
protected boolean _cfgFinishBlockOnFlush = true;
/**
* Flag that indicates if we have already called '_outputStream.close()'
* (to avoid calling it multiple times)
*/
protected boolean _outputStreamClosed;
/**
* Wrapper object we use to allow decoder to write directly to the
* stream, without ending in infinite loop...
*/
private final Wrapper _wrapper;
/*
///////////////////////////////////////////////////////////////////////
// Construction, configuration
///////////////////////////////////////////////////////////////////////
*/
public LZFFileOutputStream(File file) throws FileNotFoundException {
this(ChunkEncoderFactory.optimalInstance(OUTPUT_BUFFER_SIZE), file);
}
public LZFFileOutputStream(File file, boolean append) throws FileNotFoundException {
this(ChunkEncoderFactory.optimalInstance(OUTPUT_BUFFER_SIZE), file, append);
}
public LZFFileOutputStream(FileDescriptor fdObj) {
this(ChunkEncoderFactory.optimalInstance(OUTPUT_BUFFER_SIZE), fdObj);
}
public LZFFileOutputStream(String name) throws FileNotFoundException {
this(ChunkEncoderFactory.optimalInstance(OUTPUT_BUFFER_SIZE), name);
}
public LZFFileOutputStream(String name, boolean append) throws FileNotFoundException {
this(ChunkEncoderFactory.optimalInstance(OUTPUT_BUFFER_SIZE), name, append);
}
public LZFFileOutputStream(ChunkEncoder encoder, File file) throws FileNotFoundException {
this(encoder, file, encoder.getBufferRecycler());
}
public LZFFileOutputStream(ChunkEncoder encoder, File file, boolean append) throws FileNotFoundException {
this(encoder, file, append, encoder.getBufferRecycler());
}
public LZFFileOutputStream(ChunkEncoder encoder, FileDescriptor fdObj) {
this(encoder, fdObj, encoder.getBufferRecycler());
}
public LZFFileOutputStream(ChunkEncoder encoder, String name) throws FileNotFoundException {
this(encoder, name, encoder.getBufferRecycler());
}
public LZFFileOutputStream(ChunkEncoder encoder, String name, boolean append) throws FileNotFoundException {
this(encoder, name, append, encoder.getBufferRecycler());
}
public LZFFileOutputStream(ChunkEncoder encoder, File file, BufferRecycler bufferRecycler) throws FileNotFoundException {
super(file);
_encoder = encoder;
if (bufferRecycler==null) {
bufferRecycler = encoder.getBufferRecycler();
}
_recycler = bufferRecycler;
_outputBuffer = bufferRecycler.allocOutputBuffer(OUTPUT_BUFFER_SIZE);
_wrapper = new Wrapper();
}
public LZFFileOutputStream(ChunkEncoder encoder, File file, boolean append, BufferRecycler bufferRecycler) throws FileNotFoundException {
super(file, append);
_encoder = encoder;
_recycler = bufferRecycler;
_outputBuffer = bufferRecycler.allocOutputBuffer(OUTPUT_BUFFER_SIZE);
_wrapper = new Wrapper();
}
public LZFFileOutputStream(ChunkEncoder encoder, FileDescriptor fdObj, BufferRecycler bufferRecycler) {
super(fdObj);
_encoder = encoder;
_recycler = bufferRecycler;
_outputBuffer = bufferRecycler.allocOutputBuffer(OUTPUT_BUFFER_SIZE);
_wrapper = new Wrapper();
}
public LZFFileOutputStream(ChunkEncoder encoder, String name, BufferRecycler bufferRecycler) throws FileNotFoundException {
super(name);
_encoder = encoder;
_recycler = bufferRecycler;
_outputBuffer = bufferRecycler.allocOutputBuffer(OUTPUT_BUFFER_SIZE);
_wrapper = new Wrapper();
}
public LZFFileOutputStream(ChunkEncoder encoder, String name, boolean append, BufferRecycler bufferRecycler) throws FileNotFoundException {
super(name, append);
_encoder = encoder;
_recycler = bufferRecycler;
_outputBuffer = bufferRecycler.allocOutputBuffer(OUTPUT_BUFFER_SIZE);
_wrapper = new Wrapper();
}
/**
* Method for defining whether call to {@link #flush} will also complete
* current block (similar to calling {@link #finishBlock()}) or not.
*/
public LZFFileOutputStream setFinishBlockOnFlush(boolean b) {
_cfgFinishBlockOnFlush = b;
return this;
}
/*
///////////////////////////////////////////////////////////////////////
// FileOutputStream overrides
///////////////////////////////////////////////////////////////////////
*/
@Override
public boolean isOpen() {
return ! _outputStreamClosed;
}
@Override
public void close() throws IOException
{
if (!_outputStreamClosed) {
if (_position > 0) {
writeCompressedBlock();
}
super.flush();
super.close();
_outputStreamClosed = true;
_encoder.close();
byte[] buf = _outputBuffer;
if (buf != null) {
_outputBuffer = null;
_recycler.releaseOutputBuffer(buf);
}
}
}
@Override
public void flush() throws IOException
{
checkNotClosed();
if (_cfgFinishBlockOnFlush && _position > 0) {
writeCompressedBlock();
}
super.flush();
}
// fine as is: don't override
// public FileChannel getChannel();
// final, can't override:
// public FileDescriptor getFD();
@Override
public void write(byte[] b) throws IOException
{
write(b, 0, b.length);
}
@Override
public void write(byte[] buffer, int offset, int length) throws IOException
{
checkNotClosed();
final int BUFFER_LEN = _outputBuffer.length;
// simple case first: empty _outputBuffer and "big" input buffer: write first full blocks, if any, without copying
while (_position == 0 && length >= BUFFER_LEN) {
_encoder.encodeAndWriteChunk(buffer, offset, BUFFER_LEN, _wrapper);
offset += BUFFER_LEN;
length -= BUFFER_LEN;
}
// simple case first: buffering only (for trivially short writes)
int free = BUFFER_LEN - _position;
if (free > length) {
System.arraycopy(buffer, offset, _outputBuffer, _position, length);
_position += length;
return;
}
// otherwise, copy whatever we can, flush
System.arraycopy(buffer, offset, _outputBuffer, _position, free);
offset += free;
length -= free;
_position += free;
writeCompressedBlock();
// then write intermediate full blocks, if any, without copying:
while (length >= BUFFER_LEN) {
_encoder.encodeAndWriteChunk(buffer, offset, BUFFER_LEN, _wrapper);
offset += BUFFER_LEN;
length -= BUFFER_LEN;
}
// and finally, copy leftovers in buffer, if any
if (length > 0) {
System.arraycopy(buffer, offset, _outputBuffer, 0, length);
}
_position = length;
}
@Override
public void write(int b) throws IOException
{
checkNotClosed();
if (_position >= _outputBuffer.length) {
writeCompressedBlock();
}
_outputBuffer[_position++] = (byte) b;
}
public void write(final InputStream in) throws IOException {
writeCompressedBlock(); // will flush _outputBuffer
int read;
while ((read = in.read(_outputBuffer)) >= 0) {
_position = read;
writeCompressedBlock();
}
}
/*
///////////////////////////////////////////////////////////////////////
// WritableByteChannel implementation
///////////////////////////////////////////////////////////////////////
*/
/* 26-Nov-2013, tatu: Why is this synchronized? Pretty much nothing else is,
* so why this method?
*/
@Override
public synchronized int write(final ByteBuffer src) throws IOException {
int r = src.remaining();
if (r <= 0) {
return r;
}
writeCompressedBlock(); // will flush _outputBuffer
if (src.hasArray()) {
// direct compression from backing array
write(src.array(), src.arrayOffset(), src.limit() - src.arrayOffset());
} else {
// need to copy to heap array first
while (src.hasRemaining()) {
int toRead = Math.min(src.remaining(), _outputBuffer.length);
src.get(_outputBuffer, 0, toRead);
_position = toRead;
writeCompressedBlock();
}
}
return r;
}
public void write(final FileChannel in) throws IOException {
MappedByteBuffer src = in.map(MapMode.READ_ONLY, 0, in.size());
write(src);
}
/*
///////////////////////////////////////////////////////////////////////
// Additional public methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Accessor for checking whether call to "flush()" will first finish the
* current block or not
*/
public boolean getFinishBlockOnFlush() {
return _cfgFinishBlockOnFlush;
}
/**
* Method that can be used to force completion of the current block,
* which means that all buffered data will be compressed into an
* LZF block. This typically results in lower compression ratio
* as larger blocks compress better; but may be necessary for
* network connections to ensure timely sending of data.
*/
public LZFFileOutputStream finishBlock() throws IOException
{
checkNotClosed();
if (_position > 0) {
writeCompressedBlock();
}
return this;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Compress and write the current block to the OutputStream
*/
protected void writeCompressedBlock() throws IOException
{
int left = _position;
_position = 0;
int offset = 0;
while (left > 0) {
int chunkLen = Math.min(LZFChunk.MAX_CHUNK_LEN, left);
_encoder.encodeAndWriteChunk(_outputBuffer, offset, chunkLen, _wrapper);
offset += chunkLen;
left -= chunkLen;
}
}
protected void rawWrite(byte[] buffer, int offset, int length) throws IOException
{
super.write(buffer, offset, length);
}
protected void checkNotClosed() throws IOException
{
if (_outputStreamClosed) {
throw new IOException(getClass().getName()+" already closed");
}
}
/*
///////////////////////////////////////////////////////////////////////
// Helper class(es)
///////////////////////////////////////////////////////////////////////
*/
/**
* This simple wrapper is needed to re-route read calls so that they will
* use "raw" writes
*/
private final class Wrapper extends OutputStream
{
@Override
public void write(int arg0) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void write(byte[] buffer, int offset, int length) throws IOException
{
rawWrite(buffer, offset, length);
}
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/util/package-info.java 0000664 0000000 0000000 00000000144 12373557273 0030744 0 ustar 00root root 0000000 0000000 /**
Package that contains helper classes uses by LZF codec.
*/
package com.ning.compress.lzf.util;
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/package-info.java 0000664 0000000 0000000 00000000206 12373557273 0027173 0 ustar 00root root 0000000 0000000 /**
Package that contains part of public API that is shared between all different
compression codecs.
*/
package com.ning.compress;
compress-compress-lzf-1.0.3/src/main/resources/ 0000775 0000000 0000000 00000000000 12373557273 0021533 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/main/resources/META-INF/ 0000775 0000000 0000000 00000000000 12373557273 0022673 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/main/resources/META-INF/LICENSE 0000664 0000000 0000000 00000000457 12373557273 0023706 0 ustar 00root root 0000000 0000000 This copy of Compress-LZF library is licensed under the
Apache (Software) License, version 2.0 ("the License").
See the License for details about distribution rights, and the
specific rights regarding derivate works.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
compress-compress-lzf-1.0.3/src/main/resources/META-INF/NOTICE 0000664 0000000 0000000 00000001447 12373557273 0023605 0 ustar 00root root 0000000 0000000 # Compress LZF
This library contains efficient implementation of LZF compression format,
as well as additional helper classes that build on JDK-provided gzip (deflat)
codec.
## Licensing
Library is licensed under Apache License 2.0, as per accompanying LICENSE file.
## Credit
Library has been written by Tatu Saloranta (tatu.saloranta@iki.fi).
It was started at Ning, inc., as an official Open Source process used by
platform backend, but after initial versions has been developed outside of
Ning by supporting community.
Other contributors include:
* Jon Hartlaub (first versions of streaming reader/writer; unit tests)
* Cedrik Lime: parallel LZF implementation
Various community members have contributed bug reports, and suggested minor
fixes; these can be found from file "VERSION.txt" in SCM.
compress-compress-lzf-1.0.3/src/test/ 0000775 0000000 0000000 00000000000 12373557273 0017554 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/ 0000775 0000000 0000000 00000000000 12373557273 0020475 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/com/ 0000775 0000000 0000000 00000000000 12373557273 0021253 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/com/ning/ 0000775 0000000 0000000 00000000000 12373557273 0022206 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/ 0000775 0000000 0000000 00000000000 12373557273 0024041 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/BaseForTests.java 0000664 0000000 0000000 00000004424 12373557273 0027254 0 ustar 00root root 0000000 0000000 package com.ning.compress;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Random;
import com.ning.compress.lzf.LZFDecoder;
import com.ning.compress.lzf.LZFEncoder;
import com.ning.compress.lzf.LZFException;
public class BaseForTests
{
private final static byte[] ABCD = new byte[] { 'a', 'b', 'c', 'd' };
protected byte[] constructFluff(int length)
{
Random rnd = new Random(length);
ByteArrayOutputStream bytes = new ByteArrayOutputStream(length + 100);
while (bytes.size() < length) {
int num = rnd.nextInt();
switch (num & 3) {
case 0:
try {
bytes.write(ABCD);
} catch (IOException e) {
throw new RuntimeException(e);
}
break;
case 1:
bytes.write(num);
break;
default:
bytes.write((num >> 3) & 0x7);
break;
}
}
return bytes.toByteArray();
}
protected byte[] constructUncompressable(int length)
{
byte[] result = new byte[length];
Random rnd = new Random(length);
// SecureRandom is "more random", but not reproduceable, so use default instead:
// SecureRandom.getInstance("SHA1PRNG").nextBytes(result);
rnd.nextBytes(result);
return result;
}
protected byte[] readAll(InputStream in) throws IOException
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream(1024);
byte[] buf = new byte[1024];
int count;
while ((count = in.read(buf)) > 0) {
bytes.write(buf, 0, count);
}
in.close();
return bytes.toByteArray();
}
protected byte[] compress(byte[] input) {
return LZFEncoder.encode(input);
}
protected byte[] compress(byte[] input, int offset, int len) {
return LZFEncoder.encode(input, offset, len);
}
protected byte[] uncompress(byte[] input) throws LZFException {
return LZFDecoder.safeDecode(input);
}
protected byte[] uncompress(byte[] input, int offset, int len) throws LZFException {
return LZFDecoder.safeDecode(input, offset, len);
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/gzip/ 0000775 0000000 0000000 00000000000 12373557273 0025012 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/gzip/TestGzipStreams.java 0000664 0000000 0000000 00000003772 12373557273 0030776 0 ustar 00root root 0000000 0000000 package com.ning.compress.gzip;
import java.io.*;
import java.util.zip.*;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
public class TestGzipStreams extends BaseForTests
{
private final static String INPUT_STR = "Some somewhat short text string -- but enough repetition to overcome shortness of input";
private final static byte[] INPUT_BYTES;
static {
try {
INPUT_BYTES = INPUT_STR.getBytes("UTF-8");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void testReusableInputStreams() throws IOException
{
// Create known good gzip via JDK
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
GZIPOutputStream comp = new GZIPOutputStream(bytes);
comp.write(INPUT_BYTES);
comp.close();
// then decode with 'our' thing, twice:
byte[] raw = bytes.toByteArray();
OptimizedGZIPInputStream re = new OptimizedGZIPInputStream(new ByteArrayInputStream(raw));
byte[] b = _readAll(re);
Assert.assertArrayEquals(INPUT_BYTES, b);
}
@Test
public void testReusableOutputStreams() throws IOException
{
// first use custom stream
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
OptimizedGZIPOutputStream re = new OptimizedGZIPOutputStream(bytes);
re.write(INPUT_BYTES);
re.close();
byte[] raw = bytes.toByteArray();
byte[] b = _readAll(new GZIPInputStream(new ByteArrayInputStream(raw)));
Assert.assertArrayEquals(INPUT_BYTES, b);
}
private byte[] _readAll(InputStream in) throws IOException
{
byte[] buffer = new byte[1000];
ByteArrayOutputStream bytes = new ByteArrayOutputStream(1000);
int count;
while ((count = in.read(buffer)) > 0) {
bytes.write(buffer, 0, count);
}
in.close();
return bytes.toByteArray();
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/gzip/TestGzipUncompressor.java 0000664 0000000 0000000 00000007645 12373557273 0032062 0 ustar 00root root 0000000 0000000 package com.ning.compress.gzip;
import java.io.*;
import java.util.Random;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
import com.ning.compress.DataHandler;
import com.ning.compress.UncompressorOutputStream;
public class TestGzipUncompressor extends BaseForTests
{
@Test
public void testSimpleSmall1by1() throws IOException
{
byte[] fluff = constructFluff(4000);
byte[] comp = gzipAll(fluff);
Collector co = new Collector();
GZIPUncompressor uncomp = new GZIPUncompressor(co);
for (int i = 0, end = comp.length; i < end; ++i) {
uncomp.feedCompressedData(comp, i, 1);
}
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleSmallAsChunk() throws IOException
{
byte[] fluff = constructFluff(4000);
byte[] comp = gzipAll(fluff);
// and then uncompress, first byte by bytes
Collector co = new Collector();
GZIPUncompressor uncomp = new GZIPUncompressor(co);
uncomp.feedCompressedData(comp, 0, comp.length);
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleBiggerVarLength() throws IOException
{
byte[] fluff = constructFluff(190000);
byte[] comp = gzipAll(fluff);
// and then uncompress with arbitrary-sized blocks...
Random rnd = new Random(123);
Collector co = new Collector();
GZIPUncompressor uncomp = new GZIPUncompressor(co);
for (int i = 0, end = comp.length; i < end; ) {
int size = Math.min(end-i, 1+rnd.nextInt(7));
uncomp.feedCompressedData(comp, i, size);
i += size;
}
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleBiggerOneChunk() throws IOException
{
byte[] fluff = constructFluff(275000);
byte[] comp = gzipAll(fluff);
// and then uncompress in one chunk
Collector co = new Collector();
GZIPUncompressor uncomp = new GZIPUncompressor(co);
uncomp.feedCompressedData(comp, 0, comp.length);
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleBiggerAsStream() throws IOException
{
byte[] fluff = constructFluff(277000);
byte[] comp = gzipAll(fluff);
Collector co = new Collector();
UncompressorOutputStream out = new UncompressorOutputStream(new GZIPUncompressor(co));
out.write(comp, 0, comp.length);
out.close();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
/*
///////////////////////////////////////////////////////////////////////
// Helper methods
///////////////////////////////////////////////////////////////////////
*/
private byte[] gzipAll(byte[] input) throws IOException
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream(16 + input.length>>2);
OptimizedGZIPOutputStream gz = new OptimizedGZIPOutputStream(bytes);
gz.write(input);
gz.close();
return bytes.toByteArray();
}
private final static class Collector implements DataHandler
{
private final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
@Override
public boolean handleData(byte[] buffer, int offset, int len) throws IOException {
bytes.write(buffer, offset, len);
return true;
}
@Override
public void allDataHandled() throws IOException { }
public byte[] getBytes() { return bytes.toByteArray(); }
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/ 0000775 0000000 0000000 00000000000 12373557273 0024634 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/TestLZFCompressingInputStream.java 0000664 0000000 0000000 00000002461 12373557273 0033403 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.*;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
public class TestLZFCompressingInputStream extends BaseForTests
{
@Test
public void testSimpleCompression() throws IOException
{
// produce multiple chunks, about 3 here:
byte[] source = constructFluff(140000);
LZFCompressingInputStream compIn = new LZFCompressingInputStream(new ByteArrayInputStream(source));
byte[] comp = readAll(compIn);
byte[] uncomp = uncompress(comp);
Assert.assertEquals(uncomp, source);
// and then check that size is about same as with static methods
byte[] comp2 = compress(source);
Assert.assertEquals(comp2.length, comp.length);
}
@Test
public void testSimpleNonCompressed() throws IOException
{
// produce two chunks as well
byte[] source = this.constructUncompressable(89000);
LZFCompressingInputStream compIn = new LZFCompressingInputStream(new ByteArrayInputStream(source));
byte[] comp = readAll(compIn);
// 2 non-compressed chunks with headers:
Assert.assertEquals(comp.length, 89000 + 5 + 5);
byte[] uncomp = uncompress(comp);
Assert.assertEquals(uncomp, source);
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/TestLZFDecoder.java 0000664 0000000 0000000 00000006263 12373557273 0030267 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.*;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
public class TestLZFDecoder extends BaseForTests
{
@Test
public void testSimple() throws IOException {
_testSimple(ChunkDecoderFactory.safeInstance());
_testSimple(ChunkDecoderFactory.optimalInstance());
}
@Test
public void testLonger() throws IOException {
_testLonger(ChunkDecoderFactory.safeInstance());
_testLonger(ChunkDecoderFactory.optimalInstance());
}
@Test
public void testChunks() throws IOException {
_testChunks(ChunkDecoderFactory.safeInstance());
_testChunks(ChunkDecoderFactory.optimalInstance());
}
/*
///////////////////////////////////////////////////////////////////////
// Second-level test methods
///////////////////////////////////////////////////////////////////////
*/
private void _testSimple(ChunkDecoder decoder) throws IOException
{
byte[] orig = "Another trivial test".getBytes("UTF-8");
byte[] compressed = compress(orig);
byte[] result = decoder.decode(compressed);
Assert.assertEquals(result, orig);
// also, ensure that offset, length are passed
byte[] compressed2 = new byte[compressed.length + 4];
System.arraycopy(compressed, 0, compressed2, 2, compressed.length);
result = decoder.decode(compressed2, 2, compressed.length);
Assert.assertEquals(result, orig);
// two ways to do that as well:
result = LZFDecoder.decode(compressed2, 2, compressed.length);
Assert.assertEquals(result, orig);
}
private void _testLonger(ChunkDecoder decoder) throws IOException
{
byte[] orig = this.constructFluff(250000); // 250k
byte[] compressed = compress(orig);
byte[] result = decoder.decode(compressed);
Assert.assertEquals(result, orig);
// also, ensure that offset, length are passed
byte[] compressed2 = new byte[compressed.length + 4];
System.arraycopy(compressed, 0, compressed2, 2, compressed.length);
result = decoder.decode(compressed2, 2, compressed.length);
Assert.assertEquals(result, orig);
// two ways to do that as well:
result = LZFDecoder.decode(compressed2, 2, compressed.length);
Assert.assertEquals(result, orig);
}
private void _testChunks(ChunkDecoder decoder) throws IOException
{
byte[] orig1 = "Another trivial test".getBytes("UTF-8");
byte[] orig2 = " with some of repepepepepetitition too!".getBytes("UTF-8");
ByteArrayOutputStream out = new ByteArrayOutputStream();
out.write(orig1);
out.write(orig2);
byte[] orig = out.toByteArray();
byte[] compressed1 = compress(orig1);
byte[] compressed2 = compress(orig2);
out = new ByteArrayOutputStream();
out.write(compressed1);
out.write(compressed2);
byte[] compressed = out.toByteArray();
byte[] result = decoder.decode(compressed);
Assert.assertEquals(result, orig);
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/TestLZFEncoder.java 0000664 0000000 0000000 00000013620 12373557273 0030274 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.*;
import java.util.Arrays;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
import com.ning.compress.lzf.util.ChunkEncoderFactory;
public class TestLZFEncoder extends BaseForTests
{
@Test
public void testSizeEstimate()
{
int max = LZFEncoder.estimateMaxWorkspaceSize(10000);
// somewhere between 103 and 105%
if (max < 10300 || max > 10500) {
Assert.fail("Expected ratio to be 1010 <= x <= 1050, was: "+max);
}
}
@Test
public void testCompressableChunksSingle() throws Exception
{
byte[] source = constructFluff(55000);
_testCompressableChunksSingle(source, ChunkEncoderFactory.safeInstance());
_testCompressableChunksSingle(source, ChunkEncoderFactory.optimalInstance());
}
private void _testCompressableChunksSingle(byte[] source, ChunkEncoder encoder) throws Exception
{
byte[] buffer = new byte[LZFEncoder.estimateMaxWorkspaceSize(source.length)];
int compLen = LZFEncoder.appendEncoded(encoder, source, 0, source.length, buffer, 0);
// and make sure we get identical compression
byte[] bufferAsBlock = Arrays.copyOf(buffer, compLen);
byte[] asBlockStd = LZFEncoder.encode(source);
Assert.assertEquals(compLen, asBlockStd.length);
Assert.assertEquals(bufferAsBlock, asBlockStd);
// then uncompress, verify
byte[] uncomp = uncompress(buffer, 0, compLen);
Assert.assertEquals(uncomp.length, source.length);
Assert.assertEquals(uncomp, source);
}
@Test
public void testCompressableChunksMulti() throws Exception
{
// let's do bit over 256k, to get multiple chunks
byte[] source = constructFluff(4 * 0xFFFF + 4000);
_testCompressableChunksMulti(source, ChunkEncoderFactory.safeInstance());
_testCompressableChunksMulti(source, ChunkEncoderFactory.optimalInstance());
}
private void _testCompressableChunksMulti(byte[] source, ChunkEncoder encoder) throws Exception
{
byte[] buffer = new byte[LZFEncoder.estimateMaxWorkspaceSize(source.length)];
int compLen = LZFEncoder.appendEncoded(encoder, source, 0, source.length, buffer, 0);
// and make sure we get identical compression
byte[] bufferAsBlock = Arrays.copyOf(buffer, compLen);
byte[] asBlockStd = LZFEncoder.encode(encoder, source, 0, source.length);
Assert.assertEquals(compLen, asBlockStd.length);
Assert.assertEquals(bufferAsBlock, asBlockStd);
// then uncompress, verify
byte[] uncomp = uncompress(buffer, 0, compLen);
Assert.assertEquals(uncomp.length, source.length);
Assert.assertEquals(uncomp, source);
}
@Test
public void testNonCompressableChunksSingle() throws Exception
{
byte[] source = constructUncompressable(4000);
_testNonCompressableChunksSingle(source, ChunkEncoderFactory.safeInstance());
_testNonCompressableChunksSingle(source, ChunkEncoderFactory.optimalInstance());
}
private void _testNonCompressableChunksSingle(byte[] source, ChunkEncoder encoder) throws Exception
{
byte[] buffer = new byte[LZFEncoder.estimateMaxWorkspaceSize(source.length)];
int compLen = LZFEncoder.appendEncoded(source, 0, source.length, buffer, 0);
// and make sure we get identical compression
byte[] bufferAsBlock = Arrays.copyOf(buffer, compLen);
byte[] asBlockStd = LZFEncoder.encode(encoder, source, 0, source.length);
Assert.assertEquals(compLen, asBlockStd.length);
Assert.assertEquals(bufferAsBlock, asBlockStd);
// then uncompress, verify
byte[] uncomp = uncompress(buffer, 0, compLen);
Assert.assertEquals(uncomp.length, source.length);
Assert.assertEquals(uncomp, source);
}
@Test
public void testConditionalCompression() throws Exception
{
final byte[] input = constructFluff(52000);
_testConditionalCompression(ChunkEncoderFactory.safeInstance(), input);
_testConditionalCompression(ChunkEncoderFactory.optimalInstance(), input);
}
private void _testConditionalCompression(ChunkEncoder enc, final byte[] input) throws IOException
{
// double-check expected compression ratio
byte[] comp = enc.encodeChunk(input, 0, input.length).getData();
int pct = (int) (100.0 * comp.length / input.length);
// happens to compress to about 61%, good
Assert.assertEquals(pct, 61);
// should be ok if we only require down to 70% compression
byte[] buf = new byte[60000];
int offset = enc.appendEncodedIfCompresses(input, 0.70, 0, input.length, buf, 0);
Assert.assertEquals(offset, comp.length);
// but not to 60%
offset = enc.appendEncodedIfCompresses(input, 0.60, 0, input.length, buf, 0);
Assert.assertEquals(offset, -1);
// // // Second part: OutputStream alternatives
ByteArrayOutputStream bytes = new ByteArrayOutputStream(60000);
Assert.assertTrue(enc.encodeAndWriteChunkIfCompresses(input, 0, input.length, bytes, 0.70));
Assert.assertEquals(comp.length, bytes.size());
byte[] output = bytes.toByteArray();
Assert.assertEquals(output, comp);
bytes = new ByteArrayOutputStream(60000);
Assert.assertFalse(enc.encodeAndWriteChunkIfCompresses(input, 0, input.length, bytes, 0.60));
Assert.assertEquals(0, bytes.size());
// // // Third part: chunk creation
LZFChunk chunk = enc.encodeChunkIfCompresses(input, 0, input.length, 0.70);
Assert.assertNotNull(chunk);
Assert.assertEquals(chunk.length(), comp.length);
Assert.assertEquals(chunk.getData(), comp);
chunk = enc.encodeChunkIfCompresses(input, 0, input.length, 0.60);
Assert.assertNull(chunk);
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/TestLZFInputStream.java 0000664 0000000 0000000 00000021241 12373557273 0031166 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.*;
import java.util.Random;
import java.security.SecureRandom;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
public class TestLZFInputStream extends BaseForTests
{
private static int BUFFER_SIZE = LZFChunk.MAX_CHUNK_LEN * 64;
private byte[] nonEncodableBytesToWrite = new byte[BUFFER_SIZE];
private byte[] bytesToWrite = new byte[BUFFER_SIZE];
private byte[] nonCompressableBytes;
private int compressableInputLength = BUFFER_SIZE;
private byte[] compressedBytes;
@BeforeTest(alwaysRun = true)
public void setUp() throws Exception
{
SecureRandom.getInstance("SHA1PRNG").nextBytes(nonEncodableBytesToWrite);
String phrase = "all work and no play make Jack a dull boy";
byte[] bytes = phrase.getBytes();
int cursor = 0;
while(cursor <= bytesToWrite.length) {
System.arraycopy(bytes, 0, bytesToWrite, cursor, (bytes.length+cursor < bytesToWrite.length)?bytes.length:bytesToWrite.length-cursor);
cursor += bytes.length;
}
ByteArrayOutputStream nonCompressed = new ByteArrayOutputStream();
OutputStream os = new LZFOutputStream(nonCompressed);
os.write(nonEncodableBytesToWrite);
os.close();
nonCompressableBytes = nonCompressed.toByteArray();
ByteArrayOutputStream compressed = new ByteArrayOutputStream();
os = new LZFOutputStream(compressed);
os.write(bytesToWrite);
os.close();
compressedBytes = compressed.toByteArray();
}
@Test
public void testDecompressNonEncodableReadByte() throws IOException {
doDecompressReadByte(nonCompressableBytes, nonEncodableBytesToWrite);
}
@Test
public void testDecompressNonEncodableReadBlock() throws IOException {
doDecompressReadBlock(nonCompressableBytes, nonEncodableBytesToWrite);
}
@Test
public void testDecompressEncodableReadByte() throws IOException {
doDecompressReadByte(compressedBytes, bytesToWrite);
}
@Test
public void testDecompressEncodableReadBlock() throws IOException {
doDecompressReadBlock(compressedBytes, bytesToWrite);
}
@Test
public void testRead0() throws IOException
{
ByteArrayInputStream bis = new ByteArrayInputStream(compressedBytes);
InputStream is = new LZFInputStream(bis);
Assert.assertEquals(0, is.available());
byte[] buffer = new byte[65536+23];
int val = is.read(buffer, 0, 0);
// read of 0 or less should return a 0-byte read.
Assert.assertEquals(0, val);
val = is.read(buffer, 0, -1);
Assert.assertEquals(0, val);
// close should work.
is.close();
}
@Test
public void testAvailable() throws IOException
{
ByteArrayInputStream bis = new ByteArrayInputStream(compressedBytes);
LZFInputStream is = new LZFInputStream(bis);
Assert.assertSame(is.getUnderlyingInputStream(), bis);
Assert.assertEquals(0, is.available());
// read one byte; should decode bunch more, make available
is.read();
int total = 1; // since we read one byte already
Assert.assertEquals(is.available(), 65534);
// and after we skip through all of it, end with -1 for EOF
long count;
while ((count = is.skip(16384L)) > 0L) {
total += (int) count;
}
// nothing more available; but we haven't yet closed so:
Assert.assertEquals(is.available(), 0);
// and then we close it:
is.close();
Assert.assertEquals(is.available(), 0);
Assert.assertEquals(total, compressableInputLength);
}
@Test void testIncrementalWithFullReads() throws IOException {
doTestIncremental(true);
}
@Test void testIncrementalWithMinimalReads() throws IOException {
doTestIncremental(false);
}
@Test
public void testReadAndWrite() throws Exception
{
byte[] fluff = constructFluff(132000);
byte[] comp = LZFEncoder.encode(fluff);
ByteArrayOutputStream bytes = new ByteArrayOutputStream(fluff.length);
LZFInputStream in = new LZFInputStream(new ByteArrayInputStream(comp));
in.readAndWrite(bytes);
in.close();
byte[] actual = bytes.toByteArray();
Assert.assertEquals(actual.length, fluff.length);
Assert.assertEquals(actual, fluff);
}
// Mostly for [Issue#19]
@Test
public void testLongSkips() throws Exception
{
// 64k per block, 200k gives 3 full, one small
byte[] fluff = constructFluff(200000);
byte[] comp = LZFEncoder.encode(fluff);
// we get about 200k, maybe byte or two more, so:
final int LENGTH = fluff.length;
LZFInputStream in = new LZFInputStream(new ByteArrayInputStream(comp));
// read one byte for fun
Assert.assertEquals(in.read(), fluff[0] & 0xFF);
// then skip all but one
long amt = in.skip(LENGTH-2);
Assert.assertEquals(amt, (long) (LENGTH-2));
Assert.assertEquals(in.read(), fluff[LENGTH-1] & 0xFF);
Assert.assertEquals(in.read(), -1);
in.close();
}
/*
///////////////////////////////////////////////////////////////////
// Helper methods
///////////////////////////////////////////////////////////////////
*/
/**
* Test that creates a longer piece of content, compresses it, and reads
* back in arbitrary small reads.
*/
private void doTestIncremental(boolean fullReads) throws IOException
{
// first need to compress something...
String[] words = new String[] { "what", "ever", "some", "other", "words", "too" };
StringBuilder sb = new StringBuilder(258000);
Random rnd = new Random(123);
while (sb.length() < 256000) {
int i = (rnd.nextInt() & 31);
if (i < words.length) {
sb.append(words[i]);
} else {
sb.append(i);
}
}
byte[] uncomp = sb.toString().getBytes("UTF-8");
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
LZFOutputStream lzOut = new LZFOutputStream(bytes);
lzOut.write(uncomp);
lzOut.close();
byte[] comp = bytes.toByteArray();
// read back, in chunks
bytes = new ByteArrayOutputStream(uncomp.length);
byte[] buffer = new byte[500];
LZFInputStream lzIn = new LZFInputStream(new ByteArrayInputStream(comp), fullReads);
int pos = 0;
while (true) {
int len = 1 + ((rnd.nextInt() & 0x7FFFFFFF) % buffer.length);
int offset = buffer.length - len;
int count = lzIn.read(buffer, offset, len);
if (count < 0) {
break;
}
if (count > len) {
Assert.fail("Requested "+len+" bytes (offset "+offset+", array length "+buffer.length+"), got "+count);
}
pos += count;
// with full reads, ought to get full results
if (count != len) {
if (fullReads) {
// Except at the end, with last incomplete chunk
if (pos != uncomp.length) {
Assert.fail("Got partial read (when requested full read!), position "+pos+" (of full "+uncomp.length+")");
}
}
}
bytes.write(buffer, offset, count);
}
byte[] result = bytes.toByteArray();
Assert.assertEquals(result.length, uncomp.length);
Assert.assertEquals(result, uncomp);
lzIn.close();
}
private void doDecompressReadByte(byte[] bytes, byte[] reference) throws IOException
{
ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
InputStream is = new LZFInputStream(bis);
int i = 0;
int testVal = 0;
while((testVal=is.read()) != -1) {
int rVal = ((int)reference[i]) & 255;
Assert.assertEquals(rVal, testVal);
++i;
}
is.close();
}
private void doDecompressReadBlock(byte[] bytes, byte[] reference) throws IOException
{
ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
int outputBytes = 0;
InputStream is = new LZFInputStream(bis);
int val;
byte[] buffer = new byte[65536+23];
while((val=is.read(buffer)) != -1) {
for(int i = 0; i < val; i++) {
byte testVal = buffer[i];
Assert.assertTrue(testVal == reference[outputBytes]);
++outputBytes;
}
}
Assert.assertTrue(outputBytes == reference.length);
is.close();
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/TestLZFOutputStream.java 0000664 0000000 0000000 00000010265 12373557273 0031373 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
public class TestLZFOutputStream extends BaseForTests
{
private static int BUFFER_SIZE = LZFChunk.MAX_CHUNK_LEN * 64;
private byte[] nonEncodableBytesToWrite;
private byte[] bytesToWrite;
@BeforeTest(alwaysRun = true)
public void setUp() throws Exception
{
nonEncodableBytesToWrite = constructUncompressable(BUFFER_SIZE);
String phrase = "all work and no play make Jack a dull boy";
bytesToWrite = new byte[BUFFER_SIZE];
byte[] bytes = phrase.getBytes();
int cursor = 0;
while(cursor <= bytesToWrite.length) {
System.arraycopy(bytes, 0, bytesToWrite, cursor, (bytes.length+cursor < bytesToWrite.length)?bytes.length:bytesToWrite.length-cursor);
cursor += bytes.length;
}
}
@Test
public void testUnencodable() throws Exception
{
ByteArrayOutputStream bos = new ByteArrayOutputStream();
OutputStream os = new LZFOutputStream(bos);
os.write(nonEncodableBytesToWrite);
os.close();
Assert.assertTrue(bos.toByteArray().length > nonEncodableBytesToWrite.length);
verifyOutputStream(bos, nonEncodableBytesToWrite);
}
@Test
public void testStreaming() throws Exception
{
ByteArrayOutputStream bos = new ByteArrayOutputStream();
OutputStream os = new LZFOutputStream(bos);
os.write(bytesToWrite);
os.close();
int len = bos.toByteArray().length;
int max = bytesToWrite.length/2;
if (len <= 10 || len >= max) {
Assert.fail("Sanity check: should have 10 < len < "+max+"; len = "+len);
}
verifyOutputStream(bos, bytesToWrite);
}
@Test
public void testSingleByte() throws Exception
{
ByteArrayOutputStream bos = new ByteArrayOutputStream();
OutputStream os = new LZFOutputStream(bos);
int idx = 0;
for(; idx < BUFFER_SIZE; idx++) {
os.write(bytesToWrite[idx]);
if(idx % 1023 == 0 && idx > BUFFER_SIZE/2) {
os.flush();
}
}
os.close();
int len = bos.toByteArray().length;
int max = bytesToWrite.length/2;
if (len <= 10 || len >= max) {
Assert.fail("Sanity check: should have 10 < len < "+max+"; len = "+len);
}
verifyOutputStream(bos, bytesToWrite);
}
@Test
public void testPartialBuffer() throws Exception
{
int offset = 255;
int len = 1<<17;
ByteArrayOutputStream bos = new ByteArrayOutputStream();
OutputStream os = new LZFOutputStream(bos);
os.write(bytesToWrite, offset, len);
os.close();
Assert.assertTrue(bos.toByteArray().length > 10);
Assert.assertTrue(bos.toByteArray().length < bytesToWrite.length*.5);
int bytesToCopy = Math.min(len, bytesToWrite.length);
byte[] compareBytes = new byte[bytesToCopy];
System.arraycopy(bytesToWrite, offset, compareBytes, 0, bytesToCopy);
verifyOutputStream(bos, compareBytes);
}
@Test
public void testEmptyBuffer() throws Exception
{
byte[] input = new byte[0];
ByteArrayOutputStream bos = new ByteArrayOutputStream();
OutputStream os = new LZFOutputStream(bos);
os.write(input);
os.close();
int len = bos.toByteArray().length;
if (len != 0) {
Assert.fail("Sanity check: should have len == 0; len = "+len);
}
verifyOutputStream(bos, input);
}
private void verifyOutputStream(ByteArrayOutputStream bos, byte[] reference) throws Exception
{
ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
LZFInputStream lzfi = new LZFInputStream(bis);
int val =0;
int idx = 0;
while((val = lzfi.read()) != -1) {
int refVal = ((int)reference[idx++]) & 255;
Assert.assertEquals(refVal, val);
}
lzfi.close();
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/TestLZFRoundTrip.java 0000664 0000000 0000000 00000015111 12373557273 0030640 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.*;
import java.util.Arrays;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
import com.ning.compress.lzf.LZFChunk;
import com.ning.compress.lzf.impl.UnsafeChunkDecoder;
import com.ning.compress.lzf.impl.VanillaChunkDecoder;
import com.ning.compress.lzf.util.ChunkEncoderFactory;
public class TestLZFRoundTrip
{
private final static String[] FILES = {
"/shakespeare.tar",
"/shakespeare/hamlet.xml",
"/shakespeare/macbeth.xml",
"/shakespeare/play.dtd",
"/shakespeare/r_and_j.xml"
,"/binary/help.bin"
,"/binary/word.doc"
};
@Test
public void testVanillaCodec() throws Exception
{
_testUsingBlock(new VanillaChunkDecoder());
_testUsingReader(new VanillaChunkDecoder());
}
@Test
public void testUnsafeCodec() throws IOException
{
_testUsingBlock(new UnsafeChunkDecoder());
_testUsingReader(new UnsafeChunkDecoder());
}
@Test
public void testLZFCompressionOnTestFiles() throws IOException {
for (int i = 0; i < 100; i++) {
testLZFCompressionOnDir(new File("src/test/resources/shakespeare"));
}
}
private void testLZFCompressionOnDir(File dir) throws IOException
{
File[] files = dir.listFiles();
for (int i = 0; i < files.length; i++) {
File file = files[i];
if (!file.isDirectory()) {
testLZFCompressionOnFile(file);
} else {
testLZFCompressionOnDir(file);
}
}
}
private void testLZFCompressionOnFile(File file) throws IOException
{
final ChunkDecoder decoder = new UnsafeChunkDecoder();
// File compressedFile = createEmptyFile("test.lzf");
File compressedFile = new File("/tmp/test.lzf");
InputStream in = new BufferedInputStream(new FileInputStream(file));
OutputStream out = new LZFOutputStream(new BufferedOutputStream(
new FileOutputStream(compressedFile)));
byte[] buf = new byte[64 * 1024];
int len;
while ((len = in.read(buf, 0, buf.length)) >= 0) {
out.write(buf, 0, len);
}
in.close();
out.close();
// decompress and verify bytes haven't changed
in = new BufferedInputStream(new FileInputStream(file));
DataInputStream compressedIn = new DataInputStream(new LZFInputStream(decoder,
new FileInputStream(compressedFile), false));
while ((len = in.read(buf, 0, buf.length)) >= 0) {
byte[] buf2 = new byte[len];
compressedIn.readFully(buf2, 0, len);
byte[] trimmedBuf = new byte[len];
System.arraycopy(buf, 0, trimmedBuf, 0, len);
Assert.assertEquals(trimmedBuf, buf2);
}
Assert.assertEquals(-1, compressedIn.read());
in.close();
compressedIn.close();
}
@Test
public void testHashCollision() throws IOException
{
// this test generates a hash collision: [0,1,153,64] hashes the same as [1,153,64,64]
// and then leverages the bug s/inPos/0/ to corrupt the array
// the first array is used to insert a reference from this hash to offset 6
// and then the hash table is reused and still thinks that there is such a hash at position 6
// and at position 7, it finds a sequence with the same hash
// so it inserts a buggy reference
final byte[] b1 = new byte[] {0,1,2,3,4,(byte)153,64,64,64,9,9,9,9,9,9,9,9,9,9};
final byte[] b2 = new byte[] {1,(byte)153,0,0,0,0,(byte)153,64,64,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
final int off = 6;
ChunkEncoder encoder = ChunkEncoderFactory.safeInstance();
ChunkDecoder decoder = new VanillaChunkDecoder();
_testCollision(encoder, decoder, b1, 0, b1.length);
_testCollision(encoder, decoder, b2, off, b2.length - off);
encoder = ChunkEncoderFactory.optimalInstance();
decoder = new UnsafeChunkDecoder();
_testCollision(encoder, decoder, b1, 0, b1.length);
_testCollision(encoder, decoder, b2, off, b2.length - off);
}
private void _testCollision(ChunkEncoder encoder, ChunkDecoder decoder, byte[] bytes, int offset, int length) throws IOException
{
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
byte[] expected = new byte[length];
byte[] buffer = new byte[LZFChunk.MAX_CHUNK_LEN];
byte[] output = new byte[length];
System.arraycopy(bytes, offset, expected, 0, length);
encoder.encodeAndWriteChunk(bytes, offset, length, outputStream);
InputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
Assert.assertEquals(decoder.decodeChunk(inputStream, buffer, output), length);
Assert.assertEquals(expected, output);
}
/*
///////////////////////////////////////////////////////////////////////
// Helper method
///////////////////////////////////////////////////////////////////////
*/
protected void _testUsingBlock(ChunkDecoder decoder) throws IOException
{
for (String name : FILES) {
byte[] data = readResource(name);
byte[] lzf = LZFEncoder.encode(data);
byte[] decoded = decoder.decode(lzf);
Assert.assertEquals(decoded.length, data.length);
Assert.assertEquals(decoded, data,
String.format("File '%s', %d->%d bytes", name, data.length, lzf.length));
}
}
protected void _testUsingReader(ChunkDecoder decoder) throws IOException
{
for (String name : FILES) {
byte[] data = readResource(name);
byte[] lzf = LZFEncoder.encode(data);
LZFInputStream comp = new LZFInputStream(decoder, new ByteArrayInputStream(lzf), false);
byte[] decoded = readAll(comp);
Assert.assertEquals(decoded.length, data.length);
Assert.assertEquals(decoded, data);
}
}
protected byte[] readResource(String name) throws IOException
{
return readAll(getClass().getResourceAsStream(name));
}
protected byte[] readAll(InputStream in) throws IOException
{
Assert.assertNotNull(in);
byte[] buffer = new byte[4000];
int count;
ByteArrayOutputStream bytes = new ByteArrayOutputStream(4000);
while ((count = in.read(buffer)) > 0) {
bytes.write(buffer, 0, count);
}
in.close();
return bytes.toByteArray();
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/TestLZFUncompressor.java 0000664 0000000 0000000 00000006705 12373557273 0031422 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.*;
import java.util.Random;
import org.junit.Assert;
import org.testng.annotations.Test;
import com.ning.compress.BaseForTests;
import com.ning.compress.DataHandler;
import com.ning.compress.UncompressorOutputStream;
public class TestLZFUncompressor extends BaseForTests
{
@Test
public void testSimpleSmall1by1() throws IOException
{
byte[] fluff = constructFluff(4000);
byte[] comp = LZFEncoder.encode(fluff);
Collector co = new Collector();
LZFUncompressor uncomp = new LZFUncompressor(co);
for (int i = 0, end = comp.length; i < end; ++i) {
uncomp.feedCompressedData(comp, i, 1);
}
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleSmallAsChunk() throws IOException
{
byte[] fluff = constructFluff(4000);
byte[] comp = LZFEncoder.encode(fluff);
// and then uncompress, first byte by bytes
Collector co = new Collector();
LZFUncompressor uncomp = new LZFUncompressor(co);
uncomp.feedCompressedData(comp, 0, comp.length);
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleBiggerVarLength() throws IOException
{
byte[] fluff = constructFluff(190000);
byte[] comp = LZFEncoder.encode(fluff);
// and then uncompress with arbitrary-sized blocks...
Random rnd = new Random(123);
Collector co = new Collector();
LZFUncompressor uncomp = new LZFUncompressor(co);
for (int i = 0, end = comp.length; i < end; ) {
int size = Math.min(end-i, 1+rnd.nextInt(7));
uncomp.feedCompressedData(comp, i, size);
i += size;
}
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleBiggerOneChunk() throws IOException
{
byte[] fluff = constructFluff(275000);
byte[] comp = LZFEncoder.encode(fluff);
// and then uncompress in one chunk
Collector co = new Collector();
LZFUncompressor uncomp = new LZFUncompressor(co);
uncomp.feedCompressedData(comp, 0, comp.length);
uncomp.complete();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
@Test
public void testSimpleBiggerAsStream() throws IOException
{
byte[] fluff = constructFluff(277000);
byte[] comp = LZFEncoder.encode(fluff);
Collector co = new Collector();
UncompressorOutputStream out = new UncompressorOutputStream(new LZFUncompressor(co));
out.write(comp, 0, comp.length);
out.close();
byte[] result = co.getBytes();
Assert.assertArrayEquals(fluff, result);
}
private final static class Collector implements DataHandler
{
private final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
@Override
public boolean handleData(byte[] buffer, int offset, int len) throws IOException {
bytes.write(buffer, offset, len);
return true;
}
@Override
public void allDataHandled() throws IOException { }
public byte[] getBytes() { return bytes.toByteArray(); }
}
}
compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/util/ 0000775 0000000 0000000 00000000000 12373557273 0025611 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/com/ning/compress/lzf/util/TestFileStreams.java 0000664 0000000 0000000 00000003207 12373557273 0031534 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.util;
import java.io.*;
import org.testng.annotations.Test;
import org.testng.Assert;
import com.ning.compress.BaseForTests;
public class TestFileStreams extends BaseForTests
{
@Test
public void testStreams() throws Exception
{
File f = File.createTempFile("lzf-test", ".lzf");
f.deleteOnExit();
// First, write encoded stuff (won't compress, but produces something)
byte[] input = "Whatever stuff...".getBytes("UTF-8");
LZFFileOutputStream out = new LZFFileOutputStream(f);
out.write(input);
out.close();
long len = f.length();
// happens to be 22; 17 bytes uncompressed, with 5 byte header
Assert.assertEquals(len, 22L);
LZFFileInputStream in = new LZFFileInputStream(f);
for (int i = 0; i < input.length; ++i) {
Assert.assertEquals(in.read(), input[i] & 0xFF);
}
Assert.assertEquals(in.read(), -1);
in.close();
}
@Test
public void testReadAndWrite() throws Exception
{
File f = File.createTempFile("lzf-test", ".lzf");
f.deleteOnExit();
byte[] fluff = constructFluff(132000);
LZFFileOutputStream fout = new LZFFileOutputStream(f);
fout.write(fluff);
fout.close();
LZFFileInputStream in = new LZFFileInputStream(f);
ByteArrayOutputStream bytes = new ByteArrayOutputStream(fluff.length);
in.readAndWrite(bytes);
in.close();
byte[] actual = bytes.toByteArray();
Assert.assertEquals(actual.length, fluff.length);
Assert.assertEquals(actual, fluff);
}
}
compress-compress-lzf-1.0.3/src/test/java/perf/ 0000775 0000000 0000000 00000000000 12373557273 0021431 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/java/perf/ManualCompressComparison.java 0000664 0000000 0000000 00000031260 12373557273 0027262 0 ustar 00root root 0000000 0000000 package perf;
import java.io.*;
import java.util.zip.DeflaterOutputStream;
import com.ning.compress.gzip.OptimizedGZIPOutputStream;
import com.ning.compress.lzf.*;
/**
* Simple manual performance micro-benchmark that compares compress and
* decompress speeds of this LZF implementation with other codecs.
*/
public class ManualCompressComparison
{
protected int size = 0;
protected int totalSize;
protected int REPS;
// 10 megs per cycle
protected final static int PER_ITERATION_LENGTH = 10 * 1000 * 1000;
private ManualCompressComparison(int totalSize)
{
this.totalSize = totalSize;
}
private void test(String[] names, byte[][] docs, int workSize) throws Exception
{
final int DOC_COUNT = docs.length;
final byte[] WORKSPACE = new byte[totalSize];
// Let's try to guestimate suitable size... to get to 10 megs to process
// but, with more docs, give more time
REPS = Math.max(1, (int) ((double) (PER_ITERATION_LENGTH * Math.sqrt(DOC_COUNT)) / (double) totalSize));
// final int TYPES = 1;
final int TYPES = 2;
final int WARMUP_ROUNDS = 5;
int roundTotal = 0;
int roundsDone = 0;
final long[][] times = new long[DOC_COUNT][];
for (int i = 0; i < times.length; ++i) {
times[i] = new long[TYPES];
}
System.out.printf("Read %d bytes to compress, uncompress; will do %d repetitions, %.1f MB per round\n",
totalSize, REPS, (REPS * totalSize) / 1000000.0);
// But first, validate!
_preValidate(docs);
int[] msecs = new int[DOC_COUNT];
for (;; ++roundTotal) {
try { Thread.sleep(100L); } catch (InterruptedException ie) { }
int round = (roundTotal % TYPES);
String msg;
long msec;
switch (round) {
case 0:
msg = "GZIP compress/stream/NING";
msec = testGzipCompressNing(docs, msecs);
break;
case 1:
msg = "GZIP compress/stream/JDK";
msec = testGzipCompressJDK(docs, msecs);
break;
/*
case 0:
msg = "LZF-Unsafe compress/block";
msec = testLZFUnsafeCompress(docs, WORKSPACE, msecs);
break;
case 0:
msg = "LZF compress/block";
msec = testLZFSafeCompress(REPS, docs, WORKSPACE, msecs);
roundDone = true;
break;
case 1:
msg = "LZF compress/stream";
msec = testLZFUnsafeCompressStream(docs, msecs);
break;
*/
default:
throw new Error();
}
boolean roundDone = (round == 1);
// skip first 5 rounds to let results stabilize
if (roundsDone >= WARMUP_ROUNDS) {
for (int i = 0; i < DOC_COUNT; ++i) {
times[i][round] += msecs[i];
}
}
System.out.printf("Test '%s' [%d bytes] -> %d msecs\n", msg, size, msec);
if (roundDone) {
roundDone = false;
++roundsDone;
if ((roundsDone % 3) == 0 && roundsDone > WARMUP_ROUNDS) {
_printResults((roundsDone - WARMUP_ROUNDS), names, times);
}
}
if ((roundTotal % 17) == 0) {
System.out.println("[GC]");
Thread.sleep(100L);
System.gc();
Thread.sleep(100L);
}
}
}
protected void _printResults(int rounds, String[] names, long[][] timeSets)
{
System.out.printf("Averages after %d rounds:", rounds);
double den = (double) rounds;
double[] totals = null;
for (int file = 0; file < names.length; ++file) {
System.out.printf(" %s(", names[file]);
long[] times = timeSets[file];
if (totals == null) {
totals = new double[times.length];
}
for (int i = 0; i < times.length; ++i){
if (i > 0) {
System.out.print('/');
}
double msecs = times[i] / den;
System.out.printf("%.1f", msecs);
totals[i] += msecs;
}
System.out.printf(")");
}
System.out.println();
// then totals
System.out.printf(" for a total of: ");
// first, msecs
for (int i = 0; i < totals.length; ++i) {
if (i > 0) {
System.out.print('/');
}
double msecs = totals[i];
System.out.printf("%.1f", msecs);
}
System.out.print(" msecs; ");
// then throughput
for (int i = 0; i < totals.length; ++i) {
if (i > 0) {
System.out.print('/');
}
double msecs = totals[i];
double bytes = (REPS * totalSize);
// msecs-to-seconds, x1000; bytes to megabytes, /1M
System.out.printf("%.1f", (bytes / msecs) / 1000.0);
}
System.out.println(" MB/s");
}
protected void _preValidate(byte[][] inputs) throws LZFException
{
int index = 0;
for (byte[] input : inputs) {
++index;
byte[] encoded1 = LZFEncoder.encode(input);
byte[] encoded2 = LZFEncoder.safeEncode(input);
if (encoded1.length == encoded2.length) {
for (int i = 0, len = encoded1.length; i < len; ++i) {
if (encoded1[i] != encoded2[i]) {
throw new IllegalStateException("Compressed contents of entry "+index+"/"+input.length+" differ at "+i+"/"+len);
}
}
} else {
// Actually, let's allow some slack...
int diff = Math.abs(encoded1.length - encoded2.length);
// 1/256 seems fine (but at least 16)
int maxDiff = Math.max(16, encoded1.length >> 8);
if (diff > maxDiff) {
throw new IllegalStateException("Compressed contents of entry "+index+"/"+input.length+" differ by more than "+maxDiff+" bytes: expected "+encoded1.length+", got "+encoded2.length);
}
System.err.printf("WARN: sizes differ slightly, %d vs %s (old/new)\n", encoded1.length, encoded2.length);
}
// uncompress too
byte[] output1 = LZFDecoder.decode(encoded1);
byte[] output2 = LZFDecoder.decode(encoded2);
if (output1.length != output2.length) {
throw new IllegalStateException("Uncompressed contents of entry "+index+"/"+input.length+" differ!");
}
for (int i = 0, len = output1.length; i < len; ++i) {
if (output1[i] != output2[i]) {
throw new IllegalStateException("Uncompressed contents of entry "+index+"/"+input.length+" differ at "+i+"/"+len);
}
}
}
}
protected final long testLZFSafeCompress(byte[][] inputs,
final byte[] WORKSPACE, int[] msecs) throws Exception
{
size = 0;
final long mainStart = System.currentTimeMillis();
for (int i = 0, len = inputs.length; i < len; ++i) {
final long start = System.currentTimeMillis();
int reps = REPS;
int bytes = 0;
while (--reps >= 0) {
final byte[] input = inputs[i];
bytes = LZFEncoder.safeAppendEncoded(input, 0, input.length, WORKSPACE, 0);
}
size += bytes;
msecs[i] = (int) (System.currentTimeMillis() - start);
}
return System.currentTimeMillis() - mainStart;
}
protected final long testLZFUnsafeCompress(byte[][] inputs,
final byte[] WORKSPACE, int[] msecs) throws Exception
{
size = 0;
final long mainStart = System.currentTimeMillis();
for (int i = 0, len = inputs.length; i < len; ++i) {
final long start = System.currentTimeMillis();
int reps = REPS;
int bytes = 0;
while (--reps >= 0) {
final byte[] input = inputs[i];
bytes = LZFEncoder.appendEncoded(input, 0, input.length, WORKSPACE, 0);
}
size += bytes;
msecs[i] = (int) (System.currentTimeMillis() - start);
}
return System.currentTimeMillis() - mainStart;
}
protected final long testLZFUnsafeCompressStream(byte[][] inputs, int[] msecs)
throws Exception
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream(8000);
size = 0;
final long mainStart = System.currentTimeMillis();
for (int i = 0, len = inputs.length; i < len; ++i) {
bytes.reset();
final long start = System.currentTimeMillis();
int reps = REPS;
while (--reps >= 0) {
bytes.reset();
LZFOutputStream out = new LZFOutputStream(bytes);
out.write(inputs[i]);
out.close();
}
size += bytes.size();
msecs[i] = (int) (System.currentTimeMillis() - start);
}
return System.currentTimeMillis() - mainStart;
}
protected final long testGzipCompressNing(byte[][] inputs, int[] msecs) throws IOException
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream(8000);
size = 0;
final long mainStart = System.currentTimeMillis();
for (int i = 0, len = inputs.length; i < len; ++i) {
bytes.reset();
final long start = System.currentTimeMillis();
int reps = REPS;
while (--reps >= 0) {
bytes.reset();
OptimizedGZIPOutputStream out = new OptimizedGZIPOutputStream(bytes);
out.write(inputs[i]);
out.close();
}
size += bytes.size();
msecs[i] = (int) (System.currentTimeMillis() - start);
}
return System.currentTimeMillis() - mainStart;
}
protected final long testGzipCompressJDK(byte[][] inputs, int[] msecs) throws IOException
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream(8000);
size = 0;
final long mainStart = System.currentTimeMillis();
for (int i = 0, len = inputs.length; i < len; ++i) {
bytes.reset();
final long start = System.currentTimeMillis();
int reps = REPS;
while (--reps >= 0) {
bytes.reset();
DeflaterOutputStream out = new DeflaterOutputStream(bytes);
out.write(inputs[i]);
out.close();
}
size += bytes.size();
msecs[i] = (int) (System.currentTimeMillis() - start);
}
return System.currentTimeMillis() - mainStart;
}
public static void main(String[] args) throws Exception
{
if (args.length < 1) {
System.err.println("Usage: java ... [file1] ... [fileN]");
System.exit(1);
}
byte[][] data = new byte[args.length][];
String[] names = new String[args.length];
int totalSize = 0;
int maxSize = 0;
for (int i = 0; i < args.length; ++i) {
File f = new File(args[i]);
names[i] = f.getName();
ByteArrayOutputStream bytes = new ByteArrayOutputStream((int) f.length());
byte[] buffer = new byte[4000];
int count;
FileInputStream in = new FileInputStream(f);
while ((count = in.read(buffer)) > 0) {
bytes.write(buffer, 0, count);
}
in.close();
data[i] = bytes.toByteArray();
final int len = data[i].length;
totalSize += len;
maxSize = Math.max(maxSize, LZFEncoder.estimateMaxWorkspaceSize(len));
}
new ManualCompressComparison(totalSize).test(names, data, maxSize);
}
protected final static class BogusOutputStream extends OutputStream
{
protected int _bytes;
@Override public void write(byte[] buf) { write(buf, 0, buf.length); }
@Override public void write(byte[] buf, int offset, int len) {
_bytes += len;
}
@Override
public void write(int b) throws IOException {
_bytes++;
}
public int length() { return _bytes; }
public void reset() { _bytes = 0; }
}
}
compress-compress-lzf-1.0.3/src/test/java/perf/ManualSkipComparison.java 0000664 0000000 0000000 00000006231 12373557273 0026375 0 ustar 00root root 0000000 0000000 package perf;
import java.io.*;
import com.ning.compress.lzf.*;
import com.ning.compress.lzf.util.LZFFileInputStream;
import com.ning.compress.lzf.util.LZFFileOutputStream;
/**
* Micro-benchmark for testing performance of skip alternatives.
*/
public class ManualSkipComparison
{
private int size = 0;
private void test(File file, int origSize) throws Exception
{
// Let's try to guestimate suitable size... to get to 50 megs to process
final int REPS = (int) ((double) (50 * 1000 * 1000) / (double) file.length());
System.out.printf("Skipping %d bytes of compressed data, %d reps.\n",
file.length(), REPS);
int i = 0;
while (true) {
try { Thread.sleep(100L); } catch (InterruptedException ie) { }
int round = (i++ % 2);
String msg;
boolean lf = (round == 0);
long msecs;
switch (round) {
case 0:
msg = "LZF skip/old";
msecs = testSkip(REPS, file, false);
break;
case 1:
msg = "LZF skip/NEW";
msecs = testSkip(REPS, file, true);
break;
default:
throw new Error();
}
if (lf) {
System.out.println();
}
System.out.println("Test '"+msg+"' ["+size+" bytes] -> "+msecs+" msecs");
if (size != origSize) { // sanity check
throw new Error("Wrong skip count!!!");
}
}
}
private final long testSkip(int REPS, File file, boolean newSkip) throws Exception
{
long start = System.currentTimeMillis();
long len = -1L;
// final byte[] buffer = new byte[16000];
while (--REPS >= 0) {
InputStream in = newSkip ? new LZFFileInputStream(file)
: new LZFInputStream(new FileInputStream(file));
len = 0;
long skipped;
while ((skipped = in.skip(Integer.MAX_VALUE)) >= 0L) {
len += skipped;
}
in.close();
}
size = (int) len;
return System.currentTimeMillis() - start;
}
public static void main(String[] args) throws Exception
{
if (args.length != 1) {
System.err.println("Usage: java ... [file]");
System.exit(1);
}
File in = new File(args[0]);
System.out.printf("Reading input, %d bytes...\n", in.length());
File out = File.createTempFile("skip-perf", ".lzf");
System.out.printf("(writing as file '%s')\n", out.getPath());
byte[] buffer = new byte[4000];
int count;
FileInputStream ins = new FileInputStream(in);
LZFFileOutputStream outs = new LZFFileOutputStream(out);
while ((count = ins.read(buffer)) > 0) {
outs.write(buffer, 0, count);
}
ins.close();
outs.close();
System.out.printf("Compressed as file '%s', %d bytes\n", out.getPath(), out.length());
new ManualSkipComparison().test(out, (int) in.length());
}
}
compress-compress-lzf-1.0.3/src/test/java/perf/ManualUncompressComparison.java 0000664 0000000 0000000 00000012466 12373557273 0027634 0 ustar 00root root 0000000 0000000 package perf;
import java.io.*;
import com.ning.compress.lzf.*;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
/**
* Simple manual performance micro-benchmark that compares compress and
* decompress speeds of this LZF implementation with other codecs.
*/
public class ManualUncompressComparison
{
protected int size = 0;
protected byte[] _lzfEncoded;
private void test(byte[] input) throws Exception
{
_lzfEncoded = LZFEncoder.encode(input);
// Let's try to guestimate suitable size... to get to 20 megs to process
final int REPS = Math.max(1, (int) ((double) (20 * 1000 * 1000) / (double) input.length));
// final int TYPES = 1;
final int TYPES = 2;
final int WARMUP_ROUNDS = 5;
int i = 0;
int roundsDone = 0;
final long[] times = new long[TYPES];
System.out.println("Read "+input.length+" bytes to compress, uncompress; will do "+REPS+" repetitions");
// But first, validate!
_preValidate(_lzfEncoded);
while (true) {
try { Thread.sleep(100L); } catch (InterruptedException ie) { }
int round = (i++ % TYPES);
String msg;
boolean lf = (round == 0);
long msecs;
switch (round) {
case 0:
msg = "LZF decompress/block/safe";
msecs = testLZFDecompress(REPS, _lzfEncoded, ChunkDecoderFactory.safeInstance());
break;
case 1:
msg = "LZF decompress/block/UNSAFE";
msecs = testLZFDecompress(REPS, _lzfEncoded, ChunkDecoderFactory.optimalInstance());
break;
case 2:
msg = "LZF decompress/stream";
msecs = testLZFDecompressStream(REPS, _lzfEncoded);
break;
default:
throw new Error();
}
// skip first 5 rounds to let results stabilize
if (roundsDone >= WARMUP_ROUNDS) {
times[round] += msecs;
}
System.out.printf("Test '%s' [%d bytes] -> %d msecs\n", msg, size, msecs);
if (lf) {
++roundsDone;
if ((roundsDone % 3) == 0 && roundsDone > WARMUP_ROUNDS) {
double den = (double) (roundsDone - WARMUP_ROUNDS);
if (times.length == 1) {
System.out.printf("Averages after %d rounds: %.1f msecs\n",
(int) den, times[0] / den);
} else {
System.out.printf("Averages after %d rounds (safe / UNSAFE): %.1f / %.1f msecs\n",
(int) den,
times[0] / den, times[1] / den);
}
System.out.println();
}
}
if ((i % 17) == 0) {
System.out.println("[GC]");
Thread.sleep(100L);
System.gc();
Thread.sleep(100L);
}
}
}
protected void _preValidate(byte[] compressed) throws LZFException
{
byte[] decoded1 = LZFDecoder.decode(compressed);
byte[] decoded2 = LZFDecoder.safeDecode(compressed);
if (decoded1.length == decoded2.length) {
for (int i = 0, len = decoded1.length; i < len; ++i) {
if (decoded1[i] != decoded2[i]) {
throw new IllegalStateException("Uncompressed contents differ at "+i+"/"+len);
}
}
} else {
throw new IllegalStateException("Uncompressed content lengths diff: expected "+decoded1.length+", got "+decoded2.length);
}
}
protected final long testLZFDecompress(int REPS, byte[] encoded, ChunkDecoder decoder) throws Exception
{
size = encoded.length;
long start = System.currentTimeMillis();
byte[] uncomp = null;
while (--REPS >= 0) {
uncomp = decoder.decode(encoded);
}
size = uncomp.length;
return System.currentTimeMillis() - start;
}
protected final long testLZFDecompressStream(int REPS, byte[] encoded) throws Exception
{
final byte[] buffer = new byte[8000];
size = 0;
long start = System.currentTimeMillis();
while (--REPS >= 0) {
int total = 0;
LZFInputStream in = new LZFInputStream(new ByteArrayInputStream(encoded));
int count;
while ((count = in.read(buffer)) > 0) {
total += count;
}
size = total;
in.close();
}
return System.currentTimeMillis() - start;
}
public static void main(String[] args) throws Exception
{
if (args.length != 1) {
System.err.println("Usage: java ... [file]");
System.exit(1);
}
File f = new File(args[0]);
ByteArrayOutputStream bytes = new ByteArrayOutputStream((int) f.length());
byte[] buffer = new byte[4000];
int count;
FileInputStream in = new FileInputStream(f);
while ((count = in.read(buffer)) > 0) {
bytes.write(buffer, 0, count);
}
in.close();
new ManualUncompressComparison().test(bytes.toByteArray());
}
}
compress-compress-lzf-1.0.3/src/test/java/perf/ManualUnsafePerf.java 0000664 0000000 0000000 00000012676 12373557273 0025504 0 ustar 00root root 0000000 0000000 package perf;
import java.lang.reflect.Field;
import sun.misc.Unsafe;
@SuppressWarnings("restriction")
public class ManualUnsafePerf
{
protected static final Unsafe unsafe;
static {
try {
Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
unsafe = (Unsafe) theUnsafe.get(null);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
protected static final long BYTE_ARRAY_OFFSET = unsafe.arrayBaseOffset(byte[].class);
protected static final long CHAR_ARRAY_OFFSET = unsafe.arrayBaseOffset(char[].class);
static final int INPUT_LEN = 48;
private void test() throws Exception
{
// Let's try to guestimate suitable size... to get to 10 megs to process
// but, with more docs, give more time
final int REPS = 2500 * 1000;
final int WARMUP_ROUNDS = 5;
int roundTotal = 0;
int roundsDone = 0;
final String[] names = new String[] {"Decode/JDK", "Decode/Unsafe" };
final int TYPES = names.length;
final long[] times = new long[TYPES];
StringBuilder sb = new StringBuilder();
for (int i = 0; i < INPUT_LEN; ++i) {
sb.append((char) ('A'+i));
}
byte[] INPUT = new byte[INPUT_LEN + 8];
{
byte[] b = sb.toString().getBytes("UTF-8");
System.arraycopy(b, 0, INPUT, 4, INPUT_LEN);
}
for (;; ++roundTotal) {
try { Thread.sleep(100L); } catch (InterruptedException ie) { }
int round = (roundTotal % TYPES);
String msg = names[round];
long msec;
switch (round) {
case 0:
msec = testDecodeJDK(REPS, INPUT, 4, INPUT_LEN);
break;
case 1:
msec = testDecodeUnsafe(REPS, INPUT, 4, INPUT_LEN);
break;
default:
throw new Error();
}
boolean roundDone = (round == 1);
// skip first 5 rounds to let results stabilize
if (roundsDone >= WARMUP_ROUNDS) {
times[round] += msec;
}
System.out.printf("Test '%s' -> %d msecs\n", msg, msec);
if (roundDone) {
roundDone = false;
++roundsDone;
if ((roundsDone % 7) == 0 && roundsDone > WARMUP_ROUNDS) {
_printResults((roundsDone - WARMUP_ROUNDS), names, times);
}
}
if ((roundTotal % 17) == 0) {
System.out.println("[GC]");
Thread.sleep(100L);
System.gc();
Thread.sleep(100L);
}
}
}
public long testDecodeJDK(int reps, byte[] input, final int offset, final int len)
{
final long mainStart = System.currentTimeMillis();
char[] result = new char[64];
while (--reps >= 0) {
for (int i = 0; i < len; ++i) {
result[i] = (char) input[offset+i];
}
}
long time = System.currentTimeMillis() - mainStart;
return time;
}
public long testDecodeUnsafe(int reps, byte[] input, final int offset, final int len)
{
final long mainStart = System.currentTimeMillis();
char[] result = new char[100];
while (--reps >= 0) {
// long inBase = BYTE_ARRAY_OFFSET + offset;
// long outBase = CHAR_ARRAY_OFFSET;
// final long inEnd = inBase + len;
for (int i = 0; i < len; ) {
result[i++] = (char) input[offset+1];
/*
int quad = unsafe.getInt(input, inBase);
inBase += 4;
result[i++] = (char) (quad >>> 24);
result[i++] = (char) ((quad >> 16) & 0xFF);
result[i++] = (char) ((quad >> 8) & 0xFF);
result[i++] = (char) (quad & 0xFF);
*/
/*
int q1 = ((quad >>> 24) << 16) + ((quad >> 16) & 0xFF);
unsafe.putInt(result, outBase, q1);
outBase += 4;
int q2 = (quad & 0xFFFF);
q2 = ((q2 >> 8) << 16) | (q2 & 0xFF);
unsafe.putInt(result, outBase, q2);
outBase += 4;
long l = q1;
l = (l << 32) | q2;
unsafe.putLong(result, outBase, l);
outBase += 8;
*/
}
}
long time = System.currentTimeMillis() - mainStart;
/*
String str = new String(result, 0, len);
System.out.println("("+str.length()+") '"+str+"'");
*/
return time;
}
protected void _printResults(int rounds, String[] names, long[] times)
{
System.out.printf(" Averages after %d rounds:", rounds);
double den = (double) rounds;
for (int file = 0; file < names.length; ++file) {
if (file > 0) {
System.out.print(" / ");
}
System.out.printf(" %s(", names[file]);
long time = times[file];
double msecs = time / den;
System.out.printf("%.1f)", msecs);
}
System.out.println();
}
public static void main(String[] args) throws Exception
{
new ManualUnsafePerf().test();
}
}
compress-compress-lzf-1.0.3/src/test/lzf/ 0000775 0000000 0000000 00000000000 12373557273 0020347 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/lzf/TestLZF.java 0000664 0000000 0000000 00000004122 12373557273 0022504 0 ustar 00root root 0000000 0000000 /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package lzf;
import java.io.*;
import junit.framework.TestCase;
/**
* Semi-automatic non-unit test: will use all files on current working
* directory (and its subdirs) for testing that LZF encode+decode
* will correctly round-trip content.
*/
public class TestLZF
{
public void testWithFiles() throws Exception
{
File currDir = new File("").getAbsoluteFile();
int count = _handleFiles(currDir);
System.out.println("OK: tested with "+count+" files.");
}
private int _handleFiles(File dir) throws IOException
{
System.out.println("Testing files from dir '"+dir.getAbsolutePath()+"'...");
int count = 0;
for (File f : dir.listFiles()) {
if (f.isDirectory()) {
count += _handleFiles(f);
} else {
byte[] data = _readData(f);
byte[] enc = LZFEncoder.encode(data);
byte[] dec = LZFDecoder.decode(enc);
assertArrayEquals("File '"+f.getAbsolutePath()+"'", data, dec);
++count;
}
}
return count;
}
private static byte[] _readData(File in) throws IOException
{
int len = (int) in.length();
byte[] result = new byte[len];
int offset = 0;
FileInputStream fis = new FileInputStream(in);
while (len > 0) {
int count = fis.read(result, offset, len);
if (count < 0) break;
len -= count;
offset += count;
}
fis.close();
return result;
}
} compress-compress-lzf-1.0.3/src/test/resources/ 0000775 0000000 0000000 00000000000 12373557273 0021566 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/resources/binary/ 0000775 0000000 0000000 00000000000 12373557273 0023052 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/test/resources/binary/help.bin 0000664 0000000 0000000 00017561512 12373557273 0024516 0 ustar 00root root 0000000 0000000 ?_ Ir6 @ J> 0 ' l! 0 First Impression OCX UG Help ]2 BrowseButtons() Z main main Z secondary (w95sec) First Impression OCX UG Help Q
/ ~$ ;) z4 d d |CONTEXT |CTXOMAP Lg |FONT d |KWBTREE M |KWDATA v |KWMAP |SYSTEM |TOPIC o |TTLBTREE | |bm0 ' |bm10 u |bm11 l |bm12 E| |bm13
|bm14 ţ
|bm15 k |bm16 { |bm17 |bm18 |bm19 - |bm20 @ |bm21 |bm22 |bm23 $
|bm24 \
|bm25 0
|bm26
|bm27 |bm28 |bm29 _ |bm30 $ |bm31 U |bm32 * |bm33 |bm34 ai |bm35 |bm36 2 |bm37 |bm38 |bm39 i |bm4 O( |bm40 u[ |bm41 |bm42 |bm43 t |bm44 |bm45 |bm46 @ |bm47 |bm48 |bm49 JH |bm5 /[ |bm50 - |bm51 z |bm52 |bm53 u |bm54 |bm55 h |bm56
|bm57 |bm58 - |bm59 ! |bm6 |bm60 { |bm61 v |bm62 _+ |bm63 ˗ |bm64 |bm65 < |bm66 |bm67 L |bm68 |bm69 L |bm7 G |bm70 } |bm71 6 |bm72 . |bm73 v |bm74 F |bm75 Wl |bm76 |bm77 0 |bm78 =l |bm79 ]! |bm8 7 |bm80 8e! |bm81 ! |bm82 " |bm83 fH" |bm84 v" |bm85 ǎ" |bm86 " |bm87 2" |bm88 ;" |bm89 # |bm9 \ |bm90 A# |bm91 # |bm92 # |bm93 Q($ ^ ^ @ L 1 L 7 About This Help7 % $ About This Help L 7 % This user's guide presents background information about using First Impression, as well as task-oriented discussions of First Impression features. For information about adding First Impression to your application, handling error messages and events, and alphabetical references of First Impression's OCX object, methods, events, and properties, refer to the First Impression OCX Reference Manual. J 1 / Documentation ConventionsA 7 % 8 Documentation Conventions % Throughout this documentation, a set of typographic conventions are used to define elements and references to First Impression items. Recognizing these conventions will assist your comprehension of this documentation. y # < V #|F , Convention example Description % a " x < F #\ V AxisSelected, AllowSelections, Select , Names of events, properties, and methods, are in proper case and bold font. a D #V1 > To install First Impression: A series of numbered instructions are preceded by an introductory line. The introductory line begins with an arrowhead. N I W #| 0 . 1.Type a:\setup . Numbered instructions provide step-by-step directions for performing tasks. The instructions should be performed in the order they are presented. In numbered steps, items you are to enter are shown in Letter Gothic font. ` F #\ chart plot In general sections, italic text is used for the first occurrence of a new term. r I F #\ $ thicknessratio In reference sections, italic text indicates variable or argument information you must supply. n ^ I #b $ [ axis_id ] In reference sections, italic text surrounded by square brackets indicates optional arguments. O
D #V[ {TRUE|FALSE} In reference sections, text surrounded by braces indicates you must make a choice among the items inside the braces. Choices are separated by vertical bars. N ^
F #\ 6 VtChart1.AllowDithering Letter Gothic font is used for all code examples. ~ ; O
a C #Vv VCFI32.OCX File names are presented in upper case text. [
F #\ L VtChart1.RowCount 'number of rows In code examples, an apostrophe precedes a comment. - a / (
B q 1 $ q Chart Terminology9 / % ( Chart Terminology " q Z " ;Ȗz -* f# Charts provide a graphical representation of data. Values or data points are displayed in formats such as: bars, lines, markers, filled areas, bubbles, or pie slices. These data points are grouped into series that are identified with unique colors or patterns. In many chart types, one data point from each series is grouped together by category along an axis. Related Topics: General Chart Elements 2D Chart Elements 3D Chart Elements } T x Ii9
/S π Selecting Chart Elements Formatting Chart Elements Double Clicking on Chart Elements Using Dialog Boxes G @ 1 ; @ J@ TB General Chart Elements @ > J@ % 2 General Chart Elements
| @ TB " $ ; -* f# Ii9
/S π Charts can also have titles, backdrops, legends, plots, and footnotes. The following illustration identifies these common chart elements in their default positions. Related Topics: Chart Terminology 2D Chart Elements 3D Chart Elements Selecting Chart Elements Formatting Chart Elements Double Clicking on Chart Elements Using Dialog Boxes B J@ B 1 Ђ B B D 2D Chart Elements9 TB B % ( 2D Chart Elements % B D M " $ ; ;Ȗz f# Ii9
/S π The following illustration identifies the elements in a typical 2D chart. Related Topics: Chart Terminology General Chart Elements 3D Chart Elements Selecting Chart Elements Formatting Chart Elements Double Clicking on Chart Elements Using Dialog Boxes B B D 1 +
D D F 3D Chart Elements9 D D % ( 3D Chart Elements p L D mE $ The following illustration identifies the elements in a typical 3D chart. 2 D E . ,
" X mE F $ ; ;Ȗz -* Ii9
/S π Related Topics: Chart Terminology General Chart Elements 2D Chart Elements Selecting Chart Elements Formatting Chart Elements Double Clicking on Chart Elements Using Dialog Boxes I E @G 1 Ђ @G G P Selecting Chart Elements@ F G % 6 Selecting Chart Elements @G >H % 3 You can click the left mouse button once on a chart element to select it. The following table provides information about selecting each chart element. " G H k #D F 4 To select... Click... Effect p >H I t # F Chart In the chart, but not on a specific chart element. Selection handles appear around the chart. } H J d # F \ Chart title Anywhere in the title area. Selection handles appear around the title. You can resize or reposition it. I uK e # F \ Footnote Anywhere in the footnote area. Selection handles appear around the footnote. You can resize or reposition it. J `L e #
F l Legend In the legend, but not the legend keys. Selection handles appear around the legend. You can resize or reposition it. & uK M e # F Series On an element in an unselected series, or on the legend key identifying the series. Selection handles appear on all elements of chart series. Handle also appears on legend key. v `L `N d # F b Series Label Anywhere in the series label. Selection handles appear around the label. You can reposition it. M VO e ## F z Plot In the plot, but not on a specific chart element. Selection handles appear around the chart plot. You can resize or reposition it. `N M e #
F p Axis On the axis text, axis line, or axis ticks. Selection haVO M F ndles appear on the axis. You cannot manually resize the axis. f VO d # F d Axis title Anywhere in the axis title area. Selection handles appear around the axis title. Q M ́ d # F F Chart grid On any grid line. Selection handles appear around the grid. e #q F > 2D wall and 3D wall or base Any part of the base or wall other than a grid line. Selection handles appear around the wall in a 2D chart, and the base and wall in a 3D chart. $ ́
e # F Data point Click on a data point in a selected series, or on a data point in the same series as another selected data point. Selection handles appear on the single data point only. e # F ( r Data point label Anywhere in the data point label. Selection handles appear around the data point label. You can reposition it. U
P Ԁ $ ; ;Ȗz -* f#
/S π Related Topics : Chart Terminology General Chart Elements 2D Chart Elements 3D Chart Elements Formatting Chart Elements Double Clicking on Chart Elements Using Dialog Boxes J 1
ۆ Y Formatting Chart ElementsA P ۆ % 8 Formatting Chart Elements % ' You can format any element of a First Impression chart by changing settings in a dialog box. First Impression dialog boxes can be displayed by making a selection from a menu or by double clicking the appropriate chart element. Click the right mouse button anywhere within a First Impression chart to display the floating menu. Once the menu appears, use the left mouse button to select a menu item. Clicking on menu items followed by three periods displays a submenu as shown in the following illustration. Q ۆ . , " The following table describes the purpose of each item on the floating menu. ' k #N F <_inputBuffer
.
*/
protected byte[] _decodeBuffer;
/*
///////////////////////////////////////////////////////////////////////
// Decoder state
///////////////////////////////////////////////////////////////////////
*/
/**
* Current decoding state, which determines meaning of following byte(s).
*/
protected int _state = STATE_INITIAL;
/**
* Flag set if {@link DataHandler} indicates that processing should be
* terminated.
*/
protected boolean _terminated;
/**
* Header flags read from gzip header
*/
protected int _flags;
/**
* Expected CRC for header, from gzip file itself.
*/
protected int _headerCRC;
/**
* Simple counter used when skipping fixed number of bytes
*/
protected int _skippedBytes;
/**
* CRC container in trailer, should match calculated CRC over data
*/
protected int _trailerCRC;
/**
* Number of bytes that trailer indicates preceding data stream
* should have had.
*/
protected int _trailerCount;
/*
///////////////////////////////////////////////////////////////////////
// Instance creation
///////////////////////////////////////////////////////////////////////
*/
public GZIPUncompressor(DataHandler h)
{
this(h, DEFAULT_CHUNK_SIZE, BufferRecycler.instance(), GZIPRecycler.instance());
}
public GZIPUncompressor(DataHandler h, int inputChunkLength)
{
this(h, inputChunkLength, BufferRecycler.instance(), GZIPRecycler.instance());
}
public GZIPUncompressor(DataHandler h, int inputChunkLength, BufferRecycler bufferRecycler, GZIPRecycler gzipRecycler)
{
_inputChunkLength = inputChunkLength;
_handler = h;
_recycler = bufferRecycler;
_decodeBuffer = bufferRecycler.allocDecodeBuffer(DECODE_BUFFER_SIZE);
_gzipRecycler = gzipRecycler;
_inflater = gzipRecycler.allocInflater();
_crc = new CRC32();
}
/*
///////////////////////////////////////////////////////////////////////
// Uncompressor API implementation
///////////////////////////////////////////////////////////////////////
*/
@Override
public boolean feedCompressedData(byte[] comp, int offset, int len) throws IOException
{
if (_terminated) {
return false;
}
final int end = offset + len;
if (_state != STATE_BODY) {
if (_state < STATE_TRAILER_INITIAL) { // header
offset = _handleHeader(comp, offset, end);
if (offset >= end) { // not fully handled yet
return true;
}
// fall through to body
} else { // trailer
offset = _handleTrailer(comp, offset, end);
if (offset < end) { // sanity check
_throwInternal();
}
// either way, we are done
return true;
}
}
// Ok, decode...
while (true) {
// first: if input is needed, give some
if (_inflater.needsInput()) {
final int left = end-offset;
if (left < 1) { // need input but nothing to give, leve
return true;
}
final int amount = Math.min(left, _inputChunkLength);
_inflater.setInput(comp, offset, amount);
offset += amount;
}
// and then see what we can get out if anything
while (true) {
int decoded;
try {
decoded = _inflater.inflate(_decodeBuffer);
} catch (DataFormatException e) {
throw new GZIPException("Problems inflating gzip data: "+e.getMessage(), e);
}
if (decoded == 0) {
break;
}
_crc.update(_decodeBuffer, 0, decoded);
if (!_handler.handleData(_decodeBuffer, 0, decoded)) {
_terminated = true;
return false;
}
}
if (_inflater.finished() || _inflater.needsDictionary()) {
_state = STATE_TRAILER_INITIAL;
// also: push back some of data that is buffered
int remains = _inflater.getRemaining();
if (remains > 0) {
offset -= remains;
}
break;
}
}
// finally; handle trailer if we got this far
offset = _handleTrailer(comp, offset, end);
if (offset < end) { // sanity check
_throwInternal();
}
return !_terminated;
}
@Override
public void complete() throws IOException
{
byte[] b = _decodeBuffer;
if (b != null) {
_decodeBuffer = null;
_recycler.releaseDecodeBuffer(b);
}
Inflater i = _inflater;
if (i != null) {
_inflater = null;
_gzipRecycler.releaseInflater(i);
}
// 24-May-2012, tatu: Should we call this here; or fail with exception?
_handler.allDataHandled();
if (!_terminated) {
if (_state != STATE_INITIAL) {
if (_state >= STATE_TRAILER_INITIAL) {
if (_state == STATE_BODY) {
throw new GZIPException("Invalid GZIP stream: end-of-input in the middle of compressed data");
}
throw new GZIPException("Invalid GZIP stream: end-of-input in the trailer (state: "+_state+")");
}
throw new GZIPException("Invalid GZIP stream: end-of-input in header (state: "+_state+")");
}
}
}
/*
///////////////////////////////////////////////////////////////////////
// Helper methods, header/trailer
///////////////////////////////////////////////////////////////////////
*/
protected final boolean _hasFlag(int flag) {
return (_flags & flag) == flag;
}
private final int _handleHeader(byte[] comp, int offset, final int end) throws IOException
{
main_loop:
while (offset < end) {
byte b = comp[offset++];
_crc.update(b);
switch (_state) {
case STATE_INITIAL:
if (b != GZIP_MAGIC_0) {
_reportBadHeader(comp, offset, end, 0);
}
if (offset >= end) {
_state = STATE_HEADER_SIG1;
break;
}
b = comp[offset++];
_crc.update(b);
// fall through
case STATE_HEADER_SIG1:
if (b != GZIP_MAGIC_1) {
_reportBadHeader(comp, offset, end, 1);
}
if (offset >= end) {
_state = STATE_HEADER_COMP_TYPE;
break;
}
b = comp[offset++];
_crc.update(b);
// fall through
case STATE_HEADER_COMP_TYPE:
if (b != Deflater.DEFLATED) {
_reportBadHeader(comp, offset, end, 1);
}
if (offset >= end) {
_state = STATE_HEADER_FLAGS;
break;
}
b = comp[offset++];
_crc.update(b);
// fall through
case STATE_HEADER_FLAGS:
_flags = b; // should we validate these?
_skippedBytes = 0;
_state = STATE_HEADER_SKIP;
if (offset >= end) {
break;
}
b = comp[offset++];
_crc.update(b);
// fall through
case STATE_HEADER_SKIP:
while (++_skippedBytes < 6) {
if (offset >= end) {
break main_loop;
}
b = comp[offset++];
_crc.update(b);
}
if (_hasFlag(FEXTRA)) {
_state = STATE_HEADER_EXTRA0;
} else if (_hasFlag(FNAME)) {
_state = STATE_HEADER_FNAME;
} else if (_hasFlag(FCOMMENT)) {
_state = STATE_HEADER_COMMENT;
} else if (_hasFlag(FHCRC)) {
_state = STATE_HEADER_CRC0;
} else { // no extras... body, I guess?
_state = STATE_BODY;
break main_loop;
}
// let's keep things simple, do explicit re-loop to sort it out:
continue;
case STATE_HEADER_EXTRA0:
_state = STATE_HEADER_EXTRA1;
break;
case STATE_HEADER_EXTRA1:
if (_hasFlag(FNAME)) {
_state = STATE_HEADER_FNAME;
} else if (_hasFlag(FCOMMENT)) {
_state = STATE_HEADER_COMMENT;
} else if (_hasFlag(FHCRC)) {
_state = STATE_HEADER_CRC0;
} else {
_state = STATE_BODY;
break main_loop;
}
break;
case STATE_HEADER_FNAME: // skip until zero
while (b != 0) {
if (offset >= end) {
break main_loop;
}
b = comp[offset++];
_crc.update(b);
}
if (_hasFlag(FCOMMENT)) {
_state = STATE_HEADER_COMMENT;
} else if (_hasFlag(FHCRC)) {
_state = STATE_HEADER_CRC0;
} else {
_state = STATE_BODY;
break main_loop;
}
break;
case STATE_HEADER_COMMENT:
while (b != 0) {
if (offset >= end) {
break main_loop;
}
b = comp[offset++];
}
if (_hasFlag(FHCRC)) {
_state = STATE_HEADER_CRC0;
} else {
_state = STATE_BODY;
break main_loop;
}
break;
case STATE_HEADER_CRC0:
_headerCRC = b & 0xFF;
if (offset >= end) {
_state = STATE_HEADER_CRC1;
break;
}
b = comp[offset++];
_crc.update(b);
// fall through
case STATE_HEADER_CRC1:
_headerCRC += ((b & 0xFF) << 8);
int act = (int)_crc.getValue() & 0xffff;
if (act != _headerCRC) {
throw new GZIPException("Corrupt GZIP header: header CRC 0x"
+Integer.toHexString(act)+", expected 0x "
+Integer.toHexString(_headerCRC));
}
_state = STATE_BODY;
break main_loop;
default:
_throwInternal("Unknown header state: "+_state);
}
}
if (_state == STATE_BODY) {
_crc.reset();
}
return offset;
}
private final int _handleTrailer(byte[] comp, int offset, final int end) throws IOException
{
while (offset < end) {
byte b = comp[offset++];
switch (_state) {
case STATE_TRAILER_INITIAL:
_trailerCRC = b & 0xFF;
_state = STATE_TRAILER_CRC1;
break;
case STATE_TRAILER_CRC1:
_trailerCRC += (b & 0xFF) << 8;
_state = STATE_TRAILER_CRC2;
break;
case STATE_TRAILER_CRC2:
_trailerCRC += (b & 0xFF) << 16;
_state = STATE_TRAILER_CRC3;
break;
case STATE_TRAILER_CRC3:
_trailerCRC += (b & 0xFF) << 24;
final int actCRC = (int) _crc.getValue();
// verify CRC:
if (_trailerCRC != actCRC) {
throw new GZIPException("Corrupt block or trailer: expected CRC "
+Integer.toHexString(_trailerCRC)+", computed "+Integer.toHexString(actCRC));
}
_state = STATE_TRAILER_LEN0;
break;
case STATE_TRAILER_LEN0:
_trailerCount = b & 0xFF;
_state = STATE_TRAILER_LEN1;
break;
case STATE_TRAILER_LEN1:
_trailerCount += (b & 0xFF) << 8;
_state = STATE_TRAILER_LEN2;
break;
case STATE_TRAILER_LEN2:
_trailerCount += (b & 0xFF) << 16;
_state = STATE_TRAILER_LEN3;
break;
case STATE_TRAILER_LEN3:
_trailerCount += (b & 0xFF) << 24;
_state = STATE_INITIAL;
// Verify count...
int actCount32 = (int) _inflater.getBytesWritten();
if (actCount32 != _trailerCount) {
throw new GZIPException("Corrupt block or trailed: expected byte count "+_trailerCount+", read "+actCount32);
}
break;
default:
_throwInternal("Unknown trailer state: "+_state);
}
}
return offset;
}
/*
///////////////////////////////////////////////////////////////////////
// Helper methods, other
///////////////////////////////////////////////////////////////////////
*/
protected void _throwInternal() throws GZIPException {
throw new GZIPException("Internal error");
}
protected void _throwInternal(String msg) throws GZIPException {
throw new GZIPException("Internal error: "+msg);
}
protected void _reportBadHeader(byte[] comp, int nextOffset, int end, int relative)
throws GZIPException
{
String byteStr = "0x"+Integer.toHexString(comp[nextOffset] & 0xFF);
if (relative <= 1) {
int exp = (relative == 0) ? (GZIP_MAGIC & 0xFF) : (GZIP_MAGIC >> 8);
--nextOffset;
throw new GZIPException("Bad GZIP stream: byte #"+relative+" of header not '"
+exp+"' (0x"+Integer.toHexString(exp)+") but "+byteStr);
}
if (relative == 2) { // odd that
throw new GZIPException("Bad GZIP stream: byte #2 of header invalid: type "+byteStr
+" not supported, 0x"+Integer.toHexString(Deflater.DEFLATED)
+" expected");
}
throw new GZIPException("Bad GZIP stream: byte #"+relative+" of header invalid: "+byteStr);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/gzip/OptimizedGZIPInputStream.java 0000664 0000000 0000000 00000024660 12373557273 0032464 0 ustar 00root root 0000000 0000000 package com.ning.compress.gzip;
import java.io.*;
import java.util.zip.*;
import com.ning.compress.BufferRecycler;
/**
* Optimized variant of {@link java.util.zip.GZIPInputStream} that
* reuses underlying {@link java.util.zip.Deflater} instance}.
*/
public class OptimizedGZIPInputStream
extends InputStream
{
/**
* What kinds of chunks do we feed underlying {@link Inflater}?
*/
private final static int INPUT_BUFFER_SIZE = 16000;
/**
* Enumeration used for keeping track of decoding state within
* stream
*/
enum State {
GZIP_HEADER, GZIP_CONTENT, GZIP_TRAILER, GZIP_COMPLETE;
};
/*
///////////////////////////////////////////////////////////////////////
// Helper objects
///////////////////////////////////////////////////////////////////////
*/
protected Inflater _inflater;
protected final CRC32 _crc;
/**
* Object that handles details of buffer recycling
*/
protected final BufferRecycler _bufferRecycler;
protected final GZIPRecycler _gzipRecycler;
/*
///////////////////////////////////////////////////////////////////////
// State
///////////////////////////////////////////////////////////////////////
*/
protected byte[] _buffer;
protected int _bufferPtr;
protected int _bufferEnd;
/**
* Temporary buffer used for single-byte reads, skipping.
*/
protected byte[] _tmpBuffer;
/**
* Underlying input stream from which compressed data is to be
* read from.
*/
protected InputStream _rawInput;
/**
* Flag set to true during handling of header processing
*/
protected OptimizedGZIPInputStream.State _state;
/*
///////////////////////////////////////////////////////////////////////
// Construction
///////////////////////////////////////////////////////////////////////
*/
public OptimizedGZIPInputStream(InputStream in) throws IOException
{
this(in, BufferRecycler.instance(), GZIPRecycler.instance());
}
public OptimizedGZIPInputStream(InputStream in, BufferRecycler bufferRecycler, GZIPRecycler gzipRecycler) throws IOException
{
super();
_bufferRecycler = bufferRecycler;
_gzipRecycler = gzipRecycler;
_rawInput = in;
_buffer = bufferRecycler.allocInputBuffer(INPUT_BUFFER_SIZE);
_bufferPtr = _bufferEnd = 0;
_inflater = gzipRecycler.allocInflater();
_crc = new CRC32();
// And then need to process header...
_readHeader();
_state = State.GZIP_CONTENT;
_crc.reset();
// and if all is good, kick start inflater etc
if (_bufferPtr >= _bufferEnd) { // need more data
_loadMore();
}
_inflater.setInput(_buffer, _bufferPtr, _bufferEnd-_bufferPtr);
}
/*
///////////////////////////////////////////////////////////////////////
// InputStream implementation
///////////////////////////////////////////////////////////////////////
*/
@Override
public int available()
{
if (_state == State.GZIP_COMPLETE) {
return 0;
}
// not sure if this makes sense but...
return _inflater.finished() ? 0 : 1;
}
@Override
public void close() throws IOException
{
_state = State.GZIP_COMPLETE;
if (_rawInput != null) {
_rawInput.close();
_rawInput = null;
}
byte[] b = _buffer;
if (b != null) {
_buffer = null;
_bufferRecycler.releaseInputBuffer(b);
}
b = _tmpBuffer;
if (b != null) {
_tmpBuffer = null;
_bufferRecycler.releaseDecodeBuffer(b);
}
Inflater i = _inflater;
if (i != null) {
_inflater = null;
_gzipRecycler.releaseInflater(i);
}
}
@Override
public void mark(int limit) {
// not supported... but not lethal to call
}
@Override
public boolean markSupported() {
return false;
}
@Override
public final int read() throws IOException
{
byte[] tmp = _getTmpBuffer();
int count = read(tmp, 0, 1);
if (count < 0) {
return -1;
}
return tmp[0] & 0xFF;
}
@Override
public final int read(byte[] buf) throws IOException {
return read(buf, 0, buf.length);
}
@Override
public final int read(byte[] buf, int offset, int len) throws IOException
{
if (buf == null) {
throw new NullPointerException();
}
if (offset < 0 || len < 0 || len > buf.length - offset) {
throw new IndexOutOfBoundsException();
}
if (_state == State.GZIP_COMPLETE) { // closed or EOF
return -1;
}
if (len == 0) {
return 0;
}
try {
int count;
while ((count = _inflater.inflate(buf, offset, len)) == 0) {
if (_inflater.finished() || _inflater.needsDictionary()) {
_readTrailer();
_state = State.GZIP_COMPLETE;
return -1;
}
if (_inflater.needsInput()) {
_loadMore();
_inflater.setInput(_buffer, _bufferPtr, _bufferEnd-_bufferPtr);
_bufferPtr = _bufferEnd;
}
}
_crc.update(buf, offset, count);
return count;
} catch (DataFormatException e) {
String s = e.getMessage();
throw new GZIPException(s != null ? s : "Invalid ZLIB data format");
}
}
@Override
public void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
@Override
public long skip(long n) throws IOException
{
if (n < 0) {
throw new IllegalArgumentException("negative skip length");
}
byte[] tmp = _getTmpBuffer();
long total = 0;
while (true) {
int max = (int) (n - total);
if (max == 0) {
break;
}
int count = read(tmp, 0, Math.min(max, tmp.length));
total += count;
}
return total;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
protected byte[] _getTmpBuffer()
{
if (_tmpBuffer == null) {
_tmpBuffer = _bufferRecycler.allocDecodeBuffer(INPUT_BUFFER_SIZE);
}
return _tmpBuffer;
}
protected final void _readHeader() throws IOException
{
_state = State.GZIP_HEADER;
// Check header magic
int sig = _readShort();
if (sig != GZIPUncompressor.GZIP_MAGIC) {
throw new GZIPException("Not in GZIP format (got 0x"+Integer.toHexString(sig)
+", should be 0x"+Integer.toHexString(GZIPUncompressor.GZIP_MAGIC)+")");
}
// Check compression method
if (_readByte() != Deflater.DEFLATED) {
throw new GZIPException("Unsupported compression method (only support Deflate, "+Deflater.DEFLATED+")");
}
// Read flags
int flg = _readByte();
// Skip MTIME, XFL, and OS fields
_skipBytes(6);
// Skip optional extra field
if ((flg & GZIPUncompressor.FEXTRA) != 0) {
_skipBytes(_readShort());
}
// Skip optional file name
if ((flg & GZIPUncompressor.FNAME) != 0) {
while (_readByte() != 0) ;
}
// Skip optional file comment
if ((flg & GZIPUncompressor.FCOMMENT) != 0) {
while (_readByte() != 0) ;
}
// Check optional header CRC
if ((flg & GZIPUncompressor.FHCRC) != 0) {
int act = (int)_crc.getValue() & 0xffff;
int exp = _readShort();
if (act != exp) {
throw new GZIPException("Corrupt GZIP header (header CRC 0x"
+Integer.toHexString(act)+", expected 0x "
+Integer.toHexString(exp));
}
}
}
protected final void _readTrailer() throws IOException
{
int actCrc = (int) _crc.getValue();
// does Inflater have leftovers?
int remains = _inflater.getRemaining();
if (remains > 0) {
// ok, let's update ptr to indicate where we are at...
_bufferPtr = _bufferEnd - remains;
} else { // if not, just load more
_loadMore(8);
}
int expCrc = _readInt();
int expCount = _readInt();
int actCount32 = (int) _inflater.getBytesWritten();
if (actCount32 != expCount) {
throw new GZIPException("Corrupt trailer: expected byte count "+expCount+", read "+actCount32);
}
if (expCrc != actCrc) {
throw new GZIPException("Corrupt trailer: expected CRC "+Integer.toHexString(expCrc)+", computed "+Integer.toHexString(actCrc));
}
}
private final void _skipBytes(int count) throws IOException
{
while (--count >= 0) {
_readByte();
}
}
private final int _readByte() throws IOException
{
if (_bufferPtr >= _bufferEnd) {
_loadMore();
}
byte b = _buffer[_bufferPtr++];
if (_state == State.GZIP_HEADER) {
_crc.update(b);
}
return b & 0xFF;
}
private final int _readShort() throws IOException
{
// LSB... blech
return _readByte() | (_readByte() << 8);
}
private final int _readInt() throws IOException
{
// LSB... yuck
return _readByte() | (_readByte() << 8)
| (_readByte() << 16) | (_readByte() << 24);
}
private final void _loadMore() throws IOException
{
// let's read at most 8k; deflater has to buffer some of data
_loadMore(Math.min(_buffer.length, INPUT_BUFFER_SIZE));
}
private final void _loadMore(int max) throws IOException
{
int count = _rawInput.read(_buffer, 0, max);
if (count < 1) {
String prob = (count < 0) ?
"Unexpected end of input" : "Strange underlying stream (returned 0 bytes for read)";
throw new GZIPException(prob+" when reading "+_state);
}
_bufferPtr = 0;
_bufferEnd = count;
}
} compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/gzip/OptimizedGZIPOutputStream.java 0000664 0000000 0000000 00000011105 12373557273 0032653 0 ustar 00root root 0000000 0000000 package com.ning.compress.gzip;
import java.io.*;
import java.util.zip.CRC32;
import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream;
/**
* Optimized variant of {@link java.util.zip.GZIPOutputStream} that
* reuses underlying {@link java.util.zip.Deflater} instance}.
*/
public class OptimizedGZIPOutputStream
extends OutputStream
{
/**
* GZIP header magic number; written out LSB like most everything
* else (i.e. as 0x1f 0x8b)
*/
private final static int GZIP_MAGIC = 0x8b1f;
/**
* For now, static header seems fine, since JDK default gzip writer
* does it too:
*/
final static byte[] DEFAULT_HEADER = new byte[] {
(byte) GZIP_MAGIC, // Magic number (short)
(byte)(GZIP_MAGIC >> 8), // Magic number (short)
Deflater.DEFLATED, // Compression method (CM)
0, // Flags (FLG)
0, // Modification time MTIME (int)
0, // Modification time MTIME (int)
0, // Modification time MTIME (int)
0, // Modification time MTIME (int)
0, // Extra flags (XFLG)
(byte) 0xff // Operating system (OS), UNKNOWN
};
/*
///////////////////////////////////////////////////////////////////////
// Helper objects
///////////////////////////////////////////////////////////////////////
*/
protected Deflater _deflater;
protected final GZIPRecycler _gzipRecycler;
protected final byte[] _eightByteBuffer = new byte[8];
/*
///////////////////////////////////////////////////////////////////////
// State
///////////////////////////////////////////////////////////////////////
*/
/**
* Underlying output stream that header, compressed content and
* footer go to
*/
protected OutputStream _rawOut;
// TODO: write this out, not strictly needed...
protected DeflaterOutputStream _deflaterOut;
protected CRC32 _crc;
/*
///////////////////////////////////////////////////////////////////////
// Construction
///////////////////////////////////////////////////////////////////////
*/
public OptimizedGZIPOutputStream(OutputStream out) throws IOException
{
super();
_gzipRecycler = GZIPRecycler.instance();
_rawOut = out;
// write header:
_rawOut.write(DEFAULT_HEADER);
_deflater = _gzipRecycler.allocDeflater();
_deflaterOut = new DeflaterOutputStream(_rawOut, _deflater, 4000);
_crc = new CRC32();
}
/*
///////////////////////////////////////////////////////////////////////
// OutputStream implementation
///////////////////////////////////////////////////////////////////////
*/
@Override
public void close() throws IOException
{
_deflaterOut.finish();
_deflaterOut = null;
_writeTrailer(_rawOut);
_rawOut.close();
Deflater d = _deflater;
if (d != null) {
_deflater = null;
_gzipRecycler.releaseDeflater(d);
}
}
@Override
public void flush() throws IOException {
_deflaterOut.flush();
}
@Override
public final void write(byte[] buf) throws IOException {
write(buf, 0, buf.length);
}
@Override
public final void write(int c) throws IOException {
_eightByteBuffer[0] = (byte) c;
write(_eightByteBuffer, 0, 1);
}
@Override
public void write(byte[] buf, int off, int len) throws IOException {
_deflaterOut.write(buf, off, len);
_crc.update(buf, off, len);
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
private void _writeTrailer(OutputStream out) throws IOException
{
_putInt(_eightByteBuffer, 0, (int) _crc.getValue());
_putInt(_eightByteBuffer, 4, _deflater.getTotalIn());
out.write(_eightByteBuffer, 0, 8);
}
/**
* Stupid GZIP, writes stuff in wrong order (not network, but x86)
*/
private final static void _putInt(byte[] buf, int offset, int value)
{
buf[offset++] = (byte) (value);
buf[offset++] = (byte) (value >> 8);
buf[offset++] = (byte) (value >> 16);
buf[offset] = (byte) (value >> 24);
}
} compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/gzip/package-info.java 0000664 0000000 0000000 00000000716 12373557273 0030152 0 ustar 00root root 0000000 0000000 /**
Package that contains optimized stream implementations for working
with GZIP. Internally JDK provided efficient ZLIB codec is used for
actual encoding and decoding.
Code here
adds appropriate reuse to specifically improve handling of relatively
short compressed data; and may also have better support for alternate
operating modes such as "push-style" handling that is needed for
non-blocking ("async") stream processing.
*/
package com.ning.compress.gzip;
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/ 0000775 0000000 0000000 00000000000 12373557273 0024601 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/ChunkDecoder.java 0000664 0000000 0000000 00000025434 12373557273 0030012 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.IOException;
import java.io.InputStream;
/**
* Decoder that handles decoding of sequence of encoded LZF chunks,
* combining them into a single contiguous result byte array.
*returnValue = -(decodedAmount + 2)
)
*/
public abstract int skipOrDecodeChunk(final InputStream is, final byte[] inputBuffer,
final byte[] outputBuffer, final long maxToSkip)
throws IOException;
/*
///////////////////////////////////////////////////////////////////////
// Public static methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Helper method that will calculate total uncompressed size, for sequence of
* one or more LZF blocks stored in given byte array.
* Will do basic sanity checking, so that this method can be called to
* verify against some types of corruption.
*/
public static int calculateUncompressedSize(byte[] data, int ptr, int length) throws LZFException
{
int uncompressedSize = 0;
int blockNr = 0;
final int end = ptr + length;
while (ptr < end) {
// can use optional end marker
if (ptr == (data.length + 1) && data[ptr] == BYTE_NULL) {
++ptr; // so that we'll be at end
break;
}
// simpler to handle bounds checks by catching exception here...
try {
if (data[ptr] != LZFChunk.BYTE_Z || data[ptr+1] != LZFChunk.BYTE_V) {
throw new LZFException("Corrupt input data, block #"+blockNr+" (at offset "+ptr+"): did not start with 'ZV' signature bytes");
}
int type = (int) data[ptr+2];
int blockLen = uint16(data, ptr+3);
if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
ptr += 5;
uncompressedSize += blockLen;
} else if (type == LZFChunk.BLOCK_TYPE_COMPRESSED) { // compressed
uncompressedSize += uint16(data, ptr+5);
ptr += 7;
} else { // unknown... CRC-32 would be 2, but that's not implemented by cli tool
throw new LZFException("Corrupt input data, block #"+blockNr+" (at offset "+ptr+"): unrecognized block type "+(type & 0xFF));
}
ptr += blockLen;
} catch (ArrayIndexOutOfBoundsException e) {
throw new LZFException("Corrupt input data, block #"+blockNr+" (at offset "+ptr+"): truncated block header");
}
++blockNr;
}
// one more sanity check:
if (ptr != end) {
throw new LZFException("Corrupt input data: block #"+blockNr+" extends "+(data.length - ptr)+" beyond end of input");
}
return uncompressedSize;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
protected final static int uint16(byte[] data, int ptr) {
return ((data[ptr] & 0xFF) << 8) + (data[ptr+1] & 0xFF);
}
/**
* Helper method to forcibly load header bytes that must be read before
* chunk can be handled.
*/
protected final static int readHeader(final InputStream is, final byte[] inputBuffer)
throws IOException
{
// Ok: simple case first, where we just get all data we need
int needed = HEADER_BYTES;
int count = is.read(inputBuffer, 0, needed);
if (count == needed) {
return count;
}
if (count <= 0) {
return 0;
}
// if not, a source that trickles data (network etc); must loop
int offset = count;
needed -= count;
do {
count = is.read(inputBuffer, offset, needed);
if (count <= 0) {
break;
}
offset += count;
needed -= count;
} while (needed > 0);
return offset;
}
protected final static void readFully(InputStream is, boolean compressed,
byte[] outputBuffer, int offset, int len) throws IOException
{
int left = len;
while (left > 0) {
int count = is.read(outputBuffer, offset, left);
if (count < 0) { // EOF not allowed here
throw new LZFException("EOF in "+len+" byte ("
+(compressed ? "" : "un")+"compressed) block: could only read "
+(len-left)+" bytes");
}
offset += count;
left -= count;
}
}
protected final static void skipFully(final InputStream is, int amount) throws IOException
{
final int orig = amount;
while (amount > 0) {
long skipped = is.skip(amount);
if (skipped <= 0) {
throw new LZFException("Input problem: failed to skip "+orig+" bytes in input stream, only skipped "
+(orig-amount));
}
amount -= (int) skipped;
}
}
protected void _reportCorruptHeader() throws LZFException {
throw new LZFException("Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)");
}
/**
* Helper method called when it is determined that the target buffer can not
* hold all data to copy or uncompress
*/
protected void _reportArrayOverflow(byte[] targetBuffer, int outPtr, int dataLen)
throws LZFException
{
throw new LZFException("Target buffer too small ("+targetBuffer.length+"): can not copy/uncompress "
+dataLen+" bytes to offset "+outPtr);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/ChunkEncoder.java 0000664 0000000 0000000 00000034027 12373557273 0030022 0 ustar 00root root 0000000 0000000 /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package com.ning.compress.lzf;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import com.ning.compress.BufferRecycler;
/**
* Class that handles actual encoding of individual chunks.
* Resulting chunks can be compressed or non-compressed; compression
* is only used if it actually reduces chunk size (including overhead
* of additional header bytes)
*-1
without appending anything.
*
* @param maxResultRatio Value between 0.05 and 1.10 to indicate maximum relative size of
* the result to use, in order to append encoded chunk
*
* @return Offset after appending compressed chunk, if compression produces compact
* enough chunk; otherwise -1 to indicate that no compression resulted.
*/
public int appendEncodedIfCompresses(final byte[] input, double maxResultRatio,
final int inputPtr, final int inputLen,
final byte[] outputBuffer, final int outputPos)
{
if (inputLen >= MIN_BLOCK_TO_COMPRESS) {
final int compStart = outputPos + LZFChunk.HEADER_LEN_COMPRESSED;
final int end = tryCompress(input, inputPtr, inputPtr+inputLen, outputBuffer, compStart);
final int maxSize = (int) (maxResultRatio * inputLen + LZFChunk.HEADER_LEN_COMPRESSED + 0.5);
if (end <= (outputPos + maxSize)) { // yes, compressed enough, let's do this!
final int compLen = end - compStart;
LZFChunk.appendCompressedHeader(inputLen, compLen, outputBuffer, outputPos);
return end;
}
}
return -1;
}
/**
* Method for encoding individual chunk, writing it to given output stream.
*/
public void encodeAndWriteChunk(byte[] data, int offset, int len, OutputStream out)
throws IOException
{
if (len >= MIN_BLOCK_TO_COMPRESS) {
// If we have non-trivial block, and can compress it by at least
// 2 bytes (since header is 2 bytes longer), let's compress:
int compEnd = tryCompress(data, offset, offset+len, _encodeBuffer, LZFChunk.HEADER_LEN_COMPRESSED);
final int compLen = compEnd - LZFChunk.HEADER_LEN_COMPRESSED;
if (compLen < (len-2)) { // yes, compressed block is smaller (consider header is 2 bytes longer)
LZFChunk.appendCompressedHeader(len, compLen, _encodeBuffer, 0);
out.write(_encodeBuffer, 0, compEnd);
return;
}
}
// Otherwise leave uncompressed:
byte[] headerBuf = _headerBuffer;
if (headerBuf == null) {
_headerBuffer = headerBuf = new byte[LZFChunk.MAX_HEADER_LEN];
}
LZFChunk.writeNonCompressedHeader(len, out, headerBuf);
out.write(data, offset, len);
}
/**
* Method for encoding individual chunk, writing it to given output stream,
* if (and only if!) it compresses enough.
*
* @return True if compression occurred and chunk was written; false if not.
*/
public boolean encodeAndWriteChunkIfCompresses(byte[] data, int offset, int inputLen,
OutputStream out, double resultRatio)
throws IOException
{
if (inputLen >= MIN_BLOCK_TO_COMPRESS) {
int compEnd = tryCompress(data, offset, offset+inputLen, _encodeBuffer, LZFChunk.HEADER_LEN_COMPRESSED);
final int maxSize = (int) (resultRatio * inputLen + LZFChunk.HEADER_LEN_COMPRESSED + 0.5);
if (compEnd <= maxSize) { // yes, down to small enough
LZFChunk.appendCompressedHeader(inputLen, compEnd-LZFChunk.HEADER_LEN_COMPRESSED,
_encodeBuffer, 0);
out.write(_encodeBuffer, 0, compEnd);
return true;
}
}
return false;
}
public BufferRecycler getBufferRecycler() {
return _recycler;
}
/*
///////////////////////////////////////////////////////////////////////
// Abstract methods for sub-classes
///////////////////////////////////////////////////////////////////////
*/
/**
* Main workhorse method that will try to compress given chunk, and return
* end position (offset to byte after last included byte).
* Result will be "raw" encoded contents without chunk header information:
* caller is responsible for prepending header, if it chooses to use encoded
* data; it may also choose to instead create an uncompressed chunk.
*
* @return Output pointer after handling content, such that result - originalOutPost
* is the actual length of compressed chunk (without header)
*/
protected abstract int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos);
/*
///////////////////////////////////////////////////////////////////////
// Shared helper methods
///////////////////////////////////////////////////////////////////////
*/
protected final int hash(int h) {
// or 184117; but this seems to give better hashing?
return ((h * 57321) >> 9) & _hashModulo;
// original lzf-c.c used this:
//return (((h ^ (h << 5)) >> (24 - HLOG) - h*5) & _hashModulo;
// but that didn't seem to provide better matches
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZF.java 0000664 0000000 0000000 00000010067 12373557273 0026103 0 ustar 00root root 0000000 0000000 /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package com.ning.compress.lzf;
import java.io.*;
import com.ning.compress.lzf.util.LZFFileInputStream;
import com.ning.compress.lzf.util.LZFFileOutputStream;
/**
* Simple command-line utility that can be used for testing LZF
* compression, or as rudimentary command-line tool.
* Arguments are the same as used by the "standard" lzf command line tool
*
* @author Tatu Saloranta (tatu@ning.com)
*/
public class LZF
{
public final static String SUFFIX = ".lzf";
protected void process(String[] args) throws IOException
{
if (args.length == 2) {
String oper = args[0];
boolean compress = "-c".equals(oper);
boolean toSystemOutput = !compress && "-o".equals(oper);
if (compress || toSystemOutput || "-d".equals(oper)) {
String filename = args[1];
File src = new File(filename);
if (!src.exists()) {
System.err.println("File '"+filename+"' does not exist.");
System.exit(1);
}
if (!compress && !filename.endsWith(SUFFIX)) {
System.err.println("File '"+filename+"' does end with expected suffix ('"+SUFFIX+"', won't decompress.");
System.exit(1);
}
if (compress) {
int inputLength = 0;
File resultFile = new File(filename+SUFFIX);
InputStream in = new FileInputStream(src);
OutputStream out = new LZFFileOutputStream(resultFile);
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = in.read(buffer, 0, buffer.length)) != -1) {
inputLength += bytesRead;
out.write(buffer, 0, bytesRead);
}
in.close();
out.flush();
out.close();
System.out.printf("Compressed '%s' into '%s' (%d->%d bytes)\n",
src.getPath(), resultFile.getPath(),
inputLength, resultFile.length());
} else {
OutputStream out;
LZFFileInputStream in = new LZFFileInputStream(src);
File resultFile = null;
if (toSystemOutput) {
out = System.out;
} else {
resultFile = new File(filename.substring(0, filename.length() - SUFFIX.length()));
out = new FileOutputStream(resultFile);
}
int uncompLen = in.readAndWrite(out);
in.close();
out.flush();
out.close();
if (resultFile != null) {
System.out.printf("Uncompressed '%s' into '%s' (%d->%d bytes)\n",
src.getPath(), resultFile.getPath(),
src.length(), uncompLen);
}
}
return;
}
}
System.err.println("Usage: java "+getClass().getName()+" -c/-d/-o source-file");
System.err.println(" -d parameter: decompress to file");
System.err.println(" -c parameter: compress to file");
System.err.println(" -o parameter: decompress to stdout");
System.exit(1);
}
public static void main(String[] args) throws IOException {
new LZF().process(args);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFChunk.java 0000664 0000000 0000000 00000012667 12373557273 0027104 0 ustar 00root root 0000000 0000000 /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package com.ning.compress.lzf;
import java.io.*;
/**
* Helper class used to store LZF encoded segments (compressed and non-compressed)
* that can be sequenced to produce LZF files/streams.
*
* @author Tatu Saloranta
*/
public class LZFChunk
{
/**
* Maximum length of literal run for LZF encoding.
*/
public static final int MAX_LITERAL = 1 << 5; // 32
/**
* Chunk length is limited by 2-byte length indicator, to 64k
*/
public static final int MAX_CHUNK_LEN = 0xFFFF;
/**
* Header can be either 7 bytes (compressed) or 5 bytes (uncompressed)
* long
*/
public static final int MAX_HEADER_LEN = 7;
public static final int HEADER_LEN_COMPRESSED = 7;
public static final int HEADER_LEN_NOT_COMPRESSED = 5;
public final static byte BYTE_Z = 'Z';
public final static byte BYTE_V = 'V';
public final static int BLOCK_TYPE_NON_COMPRESSED = 0;
public final static int BLOCK_TYPE_COMPRESSED = 1;
protected final byte[] _data;
protected LZFChunk _next;
private LZFChunk(byte[] data) { _data = data; }
/**
* Factory method for constructing compressed chunk
*/
public static LZFChunk createCompressed(int origLen, byte[] encData, int encPtr, int encLen)
{
byte[] result = new byte[encLen + HEADER_LEN_COMPRESSED];
result[0] = BYTE_Z;
result[1] = BYTE_V;
result[2] = BLOCK_TYPE_COMPRESSED;
result[3] = (byte) (encLen >> 8);
result[4] = (byte) encLen;
result[5] = (byte) (origLen >> 8);
result[6] = (byte) origLen;
System.arraycopy(encData, encPtr, result, HEADER_LEN_COMPRESSED, encLen);
return new LZFChunk(result);
}
public static int appendCompressedHeader(int origLen, int encLen, byte[] headerBuffer, int offset)
{
headerBuffer[offset++] = BYTE_Z;
headerBuffer[offset++] = BYTE_V;
headerBuffer[offset++] = BLOCK_TYPE_COMPRESSED;
headerBuffer[offset++] = (byte) (encLen >> 8);
headerBuffer[offset++] = (byte) encLen;
headerBuffer[offset++] = (byte) (origLen >> 8);
headerBuffer[offset++] = (byte) origLen;
return offset;
}
public static void writeCompressedHeader(int origLen, int encLen, OutputStream out, byte[] headerBuffer)
throws IOException
{
headerBuffer[0] = BYTE_Z;
headerBuffer[1] = BYTE_V;
headerBuffer[2] = BLOCK_TYPE_COMPRESSED;
headerBuffer[3] = (byte) (encLen >> 8);
headerBuffer[4] = (byte) encLen;
headerBuffer[5] = (byte) (origLen >> 8);
headerBuffer[6] = (byte) origLen;
out.write(headerBuffer, 0, HEADER_LEN_COMPRESSED);
}
/**
* Factory method for constructing compressed chunk
*/
public static LZFChunk createNonCompressed(byte[] plainData, int ptr, int len)
{
byte[] result = new byte[len + HEADER_LEN_NOT_COMPRESSED];
result[0] = BYTE_Z;
result[1] = BYTE_V;
result[2] = BLOCK_TYPE_NON_COMPRESSED;
result[3] = (byte) (len >> 8);
result[4] = (byte) len;
System.arraycopy(plainData, ptr, result, HEADER_LEN_NOT_COMPRESSED, len);
return new LZFChunk(result);
}
/**
* Method for appending specific content as non-compressed chunk, in
* given buffer.
*/
public static int appendNonCompressed(byte[] plainData, int ptr, int len,
byte[] outputBuffer, int outputPtr)
{
outputBuffer[outputPtr++] = BYTE_Z;
outputBuffer[outputPtr++] = BYTE_V;
outputBuffer[outputPtr++] = BLOCK_TYPE_NON_COMPRESSED;
outputBuffer[outputPtr++] = (byte) (len >> 8);
outputBuffer[outputPtr++] = (byte) len;
System.arraycopy(plainData, ptr, outputBuffer, outputPtr, len);
return outputPtr + len;
}
public static int appendNonCompressedHeader(int len, byte[] headerBuffer, int offset)
{
headerBuffer[offset++] = BYTE_Z;
headerBuffer[offset++] = BYTE_V;
headerBuffer[offset++] = BLOCK_TYPE_NON_COMPRESSED;
headerBuffer[offset++] = (byte) (len >> 8);
headerBuffer[offset++] = (byte) len;
return offset;
}
public static void writeNonCompressedHeader(int len, OutputStream out, byte[] headerBuffer)
throws IOException
{
headerBuffer[0] = BYTE_Z;
headerBuffer[1] = BYTE_V;
headerBuffer[2] = BLOCK_TYPE_NON_COMPRESSED;
headerBuffer[3] = (byte) (len >> 8);
headerBuffer[4] = (byte) len;
out.write(headerBuffer, 0, HEADER_LEN_NOT_COMPRESSED);
}
public void setNext(LZFChunk next) { _next = next; }
public LZFChunk next() { return _next; }
public int length() { return _data.length; }
public byte[] getData() { return _data; }
public int copyTo(byte[] dst, int ptr) {
int len = _data.length;
System.arraycopy(_data, 0, dst, ptr, len);
return ptr+len;
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFCompressingInputStream.java 0000664 0000000 0000000 00000022205 12373557273 0032506 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf;
import java.io.IOException;
import java.io.InputStream;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.util.ChunkEncoderFactory;
/**
* Decorator {@link InputStream} implementation used for
* reading uncompressed data
* and compressing it on the fly, such that reads return compressed
* data.
* It is reverse of {@link LZFInputStream} (which instead uncompresses data).
*
* @author Tatu Saloranta
*
* @see com.ning.compress.lzf.LZFInputStream
*/
public class LZFCompressingInputStream extends InputStream
{
private final BufferRecycler _recycler;
private ChunkEncoder _encoder;
/**
* Stream used for reading data to be compressed
*/
protected final InputStream _inputStream;
/**
* Flag that indicates if we have already called 'inputStream.close()'
* (to avoid calling it multiple times)
*/
protected boolean _inputStreamClosed;
/**
* Flag that indicates whether we force full reads (reading of as many
* bytes as requested), or 'optimal' reads (up to as many as available,
* but at least one). Default is false, meaning that 'optimal' read
* is used.
*/
protected boolean _cfgFullReads = false;
/**
* Buffer in which uncompressed input is first read, before getting
* encoded in {@link #_encodedBytes}.
*/
protected byte[] _inputBuffer;
/**
* Buffer that contains compressed data that is returned to readers.
*/
protected byte[] _encodedBytes;
/**
* The current position (next char to output) in the uncompressed bytes buffer.
*/
protected int _bufferPosition = 0;
/**
* Length of the current uncompressed bytes buffer
*/
protected int _bufferLength = 0;
/**
* Number of bytes read from the underlying {@link #_inputStream}
*/
protected int _readCount = 0;
/*
///////////////////////////////////////////////////////////////////////
// Construction, configuration
///////////////////////////////////////////////////////////////////////
*/
public LZFCompressingInputStream(InputStream in)
{
this(null, in, BufferRecycler.instance());
}
public LZFCompressingInputStream(final ChunkEncoder encoder, InputStream in)
{
this(encoder, in, null);
}
public LZFCompressingInputStream(final ChunkEncoder encoder, InputStream in, BufferRecycler bufferRecycler)
{
// may be passed by caller, or could be null
_encoder = encoder;
_inputStream = in;
if (bufferRecycler==null) {
bufferRecycler = (encoder!=null) ? _encoder._recycler : BufferRecycler.instance();
}
_recycler = bufferRecycler;
_inputBuffer = bufferRecycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
// let's not yet allocate encoding buffer; don't know optimal size
}
/**
* Method that can be used define whether reads should be "full" or
* "optimal": former means that full compressed blocks are read right
* away as needed, optimal that only smaller chunks are read at a time,
* more being read as needed.
*/
public void setUseFullReads(boolean b) {
_cfgFullReads = b;
}
/*
///////////////////////////////////////////////////////////////////////
// InputStream implementation
///////////////////////////////////////////////////////////////////////
*/
@Override
public int available()
{
if (_inputStreamClosed) { // javadocs suggest 0 for closed as well (not -1)
return 0;
}
int left = (_bufferLength - _bufferPosition);
return (left <= 0) ? 0 : left;
}
@Override
public int read() throws IOException
{
if (!readyBuffer()) {
return -1;
}
return _encodedBytes[_bufferPosition++] & 255;
}
@Override
public int read(final byte[] buffer) throws IOException
{
return read(buffer, 0, buffer.length);
}
@Override
public int read(final byte[] buffer, int offset, int length) throws IOException
{
if (length < 1) {
return 0;
}
if (!readyBuffer()) {
return -1;
}
// First let's read however much data we happen to have...
int chunkLength = Math.min(_bufferLength - _bufferPosition, length);
System.arraycopy(_encodedBytes, _bufferPosition, buffer, offset, chunkLength);
_bufferPosition += chunkLength;
if (chunkLength == length || !_cfgFullReads) {
return chunkLength;
}
// Need more data, then
int totalRead = chunkLength;
do {
offset += chunkLength;
if (!readyBuffer()) {
break;
}
chunkLength = Math.min(_bufferLength - _bufferPosition, (length - totalRead));
System.arraycopy(_encodedBytes, _bufferPosition, buffer, offset, chunkLength);
_bufferPosition += chunkLength;
totalRead += chunkLength;
} while (totalRead < length);
return totalRead;
}
@Override
public void close() throws IOException
{
_bufferPosition = _bufferLength = 0;
byte[] buf = _encodedBytes;
if (buf != null) {
_encodedBytes = null;
_recycler.releaseEncodeBuffer(buf);
}
if (_encoder != null) {
_encoder.close();
}
_closeInput();
}
private void _closeInput() throws IOException
{
byte[] buf = _inputBuffer;
if (buf != null) {
_inputBuffer = null;
_recycler.releaseInputBuffer(buf);
}
if (!_inputStreamClosed) {
_inputStreamClosed = true;
_inputStream.close();
}
}
/**
* Overridden to just skip at most a single chunk at a time
*/
@Override
public long skip(long n) throws IOException
{
if (_inputStreamClosed) {
return -1;
}
int left = (_bufferLength - _bufferPosition);
// if none left, must read more:
if (left <= 0) {
// otherwise must read more to skip...
int b = read();
if (b < 0) { // EOF
return -1;
}
// push it back to get accurate skip count
--_bufferPosition;
left = (_bufferLength - _bufferPosition);
}
// either way, just skip whatever we have decoded
if (left > n) {
left = (int) n;
}
_bufferPosition += left;
return left;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Fill the uncompressed bytes buffer by reading the underlying inputStream.
* @throws IOException
*/
protected boolean readyBuffer() throws IOException
{
if (_bufferPosition < _bufferLength) {
return true;
}
if (_inputStreamClosed) {
return false;
}
// Ok: read as much as we can from input source first
int count = _inputStream.read(_inputBuffer, 0, _inputBuffer.length);
if (count < 0) { // if no input read, it's EOF
_closeInput(); // and we can close input source as well
return false;
}
int chunkLength = count;
int left = _inputBuffer.length - count;
while ((count = _inputStream.read(_inputBuffer, chunkLength, left)) > 0) {
chunkLength += count;
left -= count;
if (left < 1) {
break;
}
}
_bufferPosition = 0;
// Ok: if we don't yet have an encoder (and buffer for it), let's get one
if (_encoder == null) {
// need 7 byte header, plus regular max buffer size:
int bufferLen = chunkLength + ((chunkLength + 31) >> 5) + 7;
_encoder = ChunkEncoderFactory.optimalNonAllocatingInstance(bufferLen, _recycler);
}
if (_encodedBytes == null) {
int bufferLen = chunkLength + ((chunkLength + 31) >> 5) + 7;
_encodedBytes = _recycler.allocEncodingBuffer(bufferLen);
}
// offset of 7 so we can prepend header as necessary
int encodeEnd = _encoder.tryCompress(_inputBuffer, 0, chunkLength, _encodedBytes, 7);
// but did it compress?
if (encodeEnd < (chunkLength + 5)) { // yes! (compared to 5 byte uncomp prefix, data)
// prepend header in situ
LZFChunk.appendCompressedHeader(chunkLength, encodeEnd-7, _encodedBytes, 0);
_bufferLength = encodeEnd;
} else { // no -- so sad...
int ptr = LZFChunk.appendNonCompressedHeader(chunkLength, _encodedBytes, 0);
// TODO: figure out a way to avoid this copy; need a header
System.arraycopy(_inputBuffer, 0, _encodedBytes, ptr, chunkLength);
_bufferLength = ptr + chunkLength;
}
if (count < 0) { // did we get end-of-input?
_closeInput();
}
return true;
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFDecoder.java 0000664 0000000 0000000 00000012706 12373557273 0027373 0 ustar 00root root 0000000 0000000 /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package com.ning.compress.lzf;
import java.util.concurrent.atomic.AtomicReference;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
/**
* Decoder that handles decoding of sequence of encoded LZF chunks,
* combining them into a single contiguous result byte array.
* This class has been mostly replaced by
* {@link ChunkDecoder}, although static methods are left here
* and may still be used for convenience.
* All static methods use {@link ChunkDecoderFactory#optimalInstance}
* to find actual {@link ChunkDecoder} instance to use.
*
* @author Tatu Saloranta (tatu.saloranta@iki.fi)
*
* @see com.ning.compress.lzf.ChunkDecoder
*/
public class LZFDecoder
{
/**
* Lazily initialized "fast" instance that may use sun.misc.Unsafe
* to speed up decompression
*/
protected final static AtomicReferencesun.misc.Unsafe
* for decompression, just standard JDK functionality.
*/
protected final static AtomicReferencesun.misc.Unsafe
for memory access.
*/
public static ChunkDecoder fastDecoder() {
// race conditions are ok here, we don't really mind
ChunkDecoder dec = _fastDecoderRef.get();
if (dec == null) { //
dec = ChunkDecoderFactory.optimalInstance();
_fastDecoderRef.compareAndSet(null, dec);
}
return dec;
}
/**
* Accessor method that can be used to obtain {@link ChunkDecoder}
* that only uses standard JDK access methods, and should work on
* all Java platforms and JVMs.
*/
public static ChunkDecoder safeDecoder() {
// race conditions are ok here, we don't really mind
ChunkDecoder dec = _safeDecoderRef.get();
if (dec == null) { //
dec = ChunkDecoderFactory.safeInstance();
_safeDecoderRef.compareAndSet(null, dec);
}
return dec;
}
/*
///////////////////////////////////////////////////////////////////////
// Basic API, general
///////////////////////////////////////////////////////////////////////
*/
/**
* Helper method that checks resulting size of an LZF chunk, regardless of
* whether it contains compressed or uncompressed contents.
*/
public static int calculateUncompressedSize(byte[] data, int offset, int length) throws LZFException {
return ChunkDecoder.calculateUncompressedSize(data, length, length);
}
/*
///////////////////////////////////////////////////////////////////////
// Basic API, fast decode methods
///////////////////////////////////////////////////////////////////////
*/
public static byte[] decode(final byte[] inputBuffer) throws LZFException {
return fastDecoder().decode(inputBuffer, 0, inputBuffer.length);
}
public static byte[] decode(final byte[] inputBuffer, int offset, int length) throws LZFException {
return fastDecoder().decode(inputBuffer, offset, length);
}
public static int decode(final byte[] inputBuffer, final byte[] targetBuffer) throws LZFException {
return fastDecoder().decode(inputBuffer, 0, inputBuffer.length, targetBuffer);
}
public static int decode(final byte[] sourceBuffer, int offset, int length, final byte[] targetBuffer)
throws LZFException {
return fastDecoder().decode(sourceBuffer, offset, length, targetBuffer);
}
/*
///////////////////////////////////////////////////////////////////////
// Basic API, "safe" decode methods
///////////////////////////////////////////////////////////////////////
*/
public static byte[] safeDecode(final byte[] inputBuffer) throws LZFException {
return safeDecoder().decode(inputBuffer, 0, inputBuffer.length);
}
public static byte[] safeDecode(final byte[] inputBuffer, int offset, int length) throws LZFException {
return safeDecoder().decode(inputBuffer, offset, length);
}
public static int safeDecode(final byte[] inputBuffer, final byte[] targetBuffer) throws LZFException {
return safeDecoder().decode(inputBuffer, 0, inputBuffer.length, targetBuffer);
}
public static int safeDecode(final byte[] sourceBuffer, int offset, int length, final byte[] targetBuffer)
throws LZFException {
return safeDecoder().decode(sourceBuffer, offset, length, targetBuffer);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/LZFEncoder.java 0000664 0000000 0000000 00000030011 12373557273 0027372 0 ustar 00root root 0000000 0000000 /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package com.ning.compress.lzf;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.util.ChunkEncoderFactory;
/**
* Encoder that handles splitting of input into chunks to encode,
* calls {@link ChunkEncoder} to compress individual chunks and
* combines resulting chunks into contiguous output byte array.
*
* @author Tatu Saloranta
*/
public class LZFEncoder
{
/* Approximate maximum size for a full chunk, in case where it does not compress
* at all. Such chunks are converted to uncompressed chunks, but during compression
* process this amount of space is still needed.
*/
public final static int MAX_CHUNK_RESULT_SIZE = LZFChunk.MAX_HEADER_LEN + LZFChunk.MAX_CHUNK_LEN + (LZFChunk.MAX_CHUNK_LEN * 32 / 31);
// Static methods only, no point in instantiating
private LZFEncoder() { }
/*
///////////////////////////////////////////////////////////////////////
// Helper methods
///////////////////////////////////////////////////////////////////////
*/
/**
* Helper method that can be used to estimate maximum space needed to
* try compression of given amount of data. This is slightly larger
* than maximum resulting content since compressor has a choice of
* uncompressed chunks to use, but that is only done after compression
* fails to reduce size; and this temporary expansion of up to 3.3% or so
* (1 indicator for every 31 bytes of uncompressed data)
* is more than what eventual expansion would be (5 bytes header per
* each uncompressed chunk, usually 0.01%).
*/
public static int estimateMaxWorkspaceSize(int inputSize)
{
// single chunk; give a rough estimate with +5% (1 + 1/32 + 1/64)
if (inputSize <= LZFChunk.MAX_CHUNK_LEN) {
return LZFChunk.MAX_HEADER_LEN + inputSize + (inputSize >> 5) + (inputSize >> 6);
}
// one more special case, 2 chunks
inputSize -= LZFChunk.MAX_CHUNK_LEN;
if (inputSize <= LZFChunk.MAX_CHUNK_LEN) { // uncompressed chunk actually has 5 byte header but
return MAX_CHUNK_RESULT_SIZE + inputSize + LZFChunk.MAX_HEADER_LEN;
}
// check number of chunks we should be creating (assuming use of full chunks)
int chunkCount = 1 + ((inputSize + (LZFChunk.MAX_CHUNK_LEN-1)) / LZFChunk.MAX_CHUNK_LEN);
return MAX_CHUNK_RESULT_SIZE + chunkCount * (LZFChunk.MAX_CHUNK_LEN + LZFChunk.MAX_HEADER_LEN);
}
/*
///////////////////////////////////////////////////////////////////////
// Encoding methods, blocks
///////////////////////////////////////////////////////////////////////
*/
/**
* Method for compressing given input data using LZF encoding and
* block structure (compatible with lzf command line utility).
* Result consists of a sequence of chunks.
*_inputBuffer
.
*/
protected byte[] _decodeBuffer;
/**
* Number of bytes that have been buffered in {@link #_inputBuffer} to be
* uncompressed; or copied directly from uncompressed block.
*/
protected int _bytesReadFromBlock;
/*
///////////////////////////////////////////////////////////////////////
// Instance creation
///////////////////////////////////////////////////////////////////////
*/
public LZFUncompressor(DataHandler handler) {
this(handler, ChunkDecoderFactory.optimalInstance(), BufferRecycler.instance());
}
public LZFUncompressor(DataHandler handler, BufferRecycler bufferRecycler) {
this(handler, ChunkDecoderFactory.optimalInstance(), bufferRecycler);
}
public LZFUncompressor(DataHandler handler, ChunkDecoder dec)
{
this(handler, dec, BufferRecycler.instance());
}
public LZFUncompressor(DataHandler handler, ChunkDecoder dec, BufferRecycler bufferRecycler)
{
_handler = handler;
_decoder = dec;
_recycler = bufferRecycler;
}
/*
///////////////////////////////////////////////////////////////////////
// Uncompressor API implementation
///////////////////////////////////////////////////////////////////////
*/
@Override
public boolean feedCompressedData(byte[] comp, int offset, int len) throws IOException
{
final int end = offset + len;
while (offset < end) {
byte b = comp[offset++];
switch (_state) {
case STATE_INITIAL:
if (b != LZFChunk.BYTE_Z) {
_reportBadHeader(comp, offset, len, 0);
}
if (offset >= end) {
_state = STATE_HEADER_Z_GOTTEN;
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_Z_GOTTEN:
if (b != LZFChunk.BYTE_V) {
_reportBadHeader(comp, offset, len, 1);
}
if (offset >= end) {
_state = STATE_HEADER_ZV_GOTTEN;
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_ZV_GOTTEN:
_bytesReadFromBlock = 0;
{
int type = b & 0xFF;
if (type != LZFChunk.BLOCK_TYPE_COMPRESSED) {
if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) {
_state = STATE_HEADER_UNCOMPRESSED_0;
continue;
}
_reportBadBlockType(comp, offset, len, type);
}
}
_state = STATE_HEADER_COMPRESSED_0;
if (offset >= end) {
break;
}
b = comp[offset++];
// fall through for compressed blocks
case STATE_HEADER_COMPRESSED_0: // first byte of compressed-length
_compressedLength = b & 0xFF;
if (offset >= end) {
_state = STATE_HEADER_COMPRESSED_1;
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_COMPRESSED_1:
_compressedLength = (_compressedLength << 8) + (b & 0xFF);
if (offset >= end) {
_state = STATE_HEADER_COMPRESSED_2;
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_COMPRESSED_2:
_uncompressedLength = b & 0xFF;
if (offset >= end) {
_state = STATE_HEADER_COMPRESSED_3;
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_COMPRESSED_3:
_uncompressedLength = (_uncompressedLength << 8) + (b & 0xFF);
_state = STATE_HEADER_COMPRESSED_BUFFERING;
if (offset >= end) {
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_COMPRESSED_BUFFERING:
offset = _handleCompressed(comp, --offset, end);
// either state changes, or we run out of input...
break;
case STATE_HEADER_UNCOMPRESSED_0:
_uncompressedLength = b & 0xFF;
if (offset >= end) {
_state = STATE_HEADER_UNCOMPRESSED_1;
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_UNCOMPRESSED_1:
_uncompressedLength = (_uncompressedLength << 8) + (b & 0xFF);
_state = STATE_HEADER_UNCOMPRESSED_STREAMING;
if (offset >= end) {
break;
}
b = comp[offset++];
// fall through
case STATE_HEADER_UNCOMPRESSED_STREAMING:
offset = _handleUncompressed(comp, --offset, end);
if (_terminated) {
break;
}
// All done?
if (_bytesReadFromBlock == _uncompressedLength) {
_state = STATE_INITIAL;
}
break;
}
}
return !_terminated;
}
@Override
public void complete() throws IOException
{
byte[] b = _inputBuffer;
if (b != null) {
_inputBuffer = null;
_recycler.releaseInputBuffer(b);
}
b = _decodeBuffer;
if (b != null) {
_decodeBuffer = null;
_recycler.releaseDecodeBuffer(b);
}
// 24-May-2012, tatu: Should we call this here; or fail with exception?
_handler.allDataHandled();
if (!_terminated) {
if (_state != STATE_INITIAL) {
if (_state == STATE_HEADER_COMPRESSED_BUFFERING) {
throw new LZFException("Incomplete compressed LZF block; only got "+_bytesReadFromBlock
+" bytes, needed "+_compressedLength);
}
if (_state == STATE_HEADER_UNCOMPRESSED_STREAMING) {
throw new LZFException("Incomplete uncompressed LZF block; only got "+_bytesReadFromBlock
+" bytes, needed "+_uncompressedLength);
}
throw new LZFException("Incomplete LZF block; decoding state = "+_state);
}
}
}
/*
///////////////////////////////////////////////////////////////////////
// Helper methods, decompression
///////////////////////////////////////////////////////////////////////
*/
private final int _handleUncompressed(byte[] comp, int offset, int end) throws IOException
{
// Simple, we just do pass through...
int amount = Math.min(end-offset, _uncompressedLength-_bytesReadFromBlock);
if (!_handler.handleData(comp, offset, amount)) {
_terminated = true;
}
_bytesReadFromBlock += amount;
return offset + amount;
}
private final int _handleCompressed(byte[] comp, int offset, int end) throws IOException
{
// One special case: if we get the whole block, can avoid buffering:
int available = end-offset;
if (_bytesReadFromBlock == 0 && available >= _compressedLength) {
_uncompress(comp, offset, _compressedLength);
offset += _compressedLength;
_state = STATE_INITIAL;
return offset;
}
// otherwise need to buffer
if (_inputBuffer == null) {
_inputBuffer = _recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
}
int amount = Math.min(available, _compressedLength - _bytesReadFromBlock);
System.arraycopy(comp, offset, _inputBuffer, _bytesReadFromBlock, amount);
offset += amount;
_bytesReadFromBlock += amount;
// Got it all?
if (_bytesReadFromBlock == _compressedLength) {
_uncompress(_inputBuffer, 0, _compressedLength);
_state = STATE_INITIAL;
}
return offset;
}
private final void _uncompress(byte[] src, int srcOffset, int len) throws IOException
{
if (_decodeBuffer == null) {
_decodeBuffer = _recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
}
_decoder.decodeChunk(src, srcOffset, _decodeBuffer, 0, _uncompressedLength);
_handler.handleData(_decodeBuffer, 0, _uncompressedLength);
}
/*
///////////////////////////////////////////////////////////////////////
// Helper methods, error reporting
///////////////////////////////////////////////////////////////////////
*/
protected void _reportBadHeader(byte[] comp, int nextOffset, int len, int relative)
throws IOException
{
char exp = (relative == 0) ? 'Z' : 'V';
--nextOffset;
throw new LZFException("Bad block: byte #"+relative+" of block header not '"
+exp+"' (0x"+Integer.toHexString(exp)
+") but 0x"+Integer.toHexString(comp[nextOffset] & 0xFF)
+" (at "+(nextOffset-1)+"/"+(len)+")");
}
protected void _reportBadBlockType(byte[] comp, int nextOffset, int len, int type)
throws IOException
{
throw new LZFException("Bad block: unrecognized type 0x"+Integer.toHexString(type & 0xFF)
+" (at "+(nextOffset-1)+"/"+len+")");
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/ 0000775 0000000 0000000 00000000000 12373557273 0025542 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/UnsafeChunkDecoder.java 0000664 0000000 0000000 00000027056 12373557273 0032117 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.impl;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Field;
import sun.misc.Unsafe;
import com.ning.compress.lzf.*;
/**
* Highly optimized {@link ChunkDecoder} implementation that uses
* Sun JDK's Unsafe class (which may be included by other JDK's as well;
* IBM's apparently does).
*sun.misc.Unsafe
functionality, which gives
* nice extra boost for speed.
*
* @author Tatu Saloranta (tatu.saloranta@iki.fi)
*/
@SuppressWarnings("restriction")
public abstract class UnsafeChunkEncoder
extends ChunkEncoder
{
// // Our Nitro Booster, mr. Unsafe!
protected static final Unsafe unsafe;
static {
try {
Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
theUnsafe.setAccessible(true);
unsafe = (Unsafe) theUnsafe.get(null);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
protected static final long BYTE_ARRAY_OFFSET = unsafe.arrayBaseOffset(byte[].class);
protected static final long BYTE_ARRAY_OFFSET_PLUS2 = BYTE_ARRAY_OFFSET + 2;
public UnsafeChunkEncoder(int totalLength) {
super(totalLength);
}
public UnsafeChunkEncoder(int totalLength, boolean bogus) {
super(totalLength, bogus);
}
public UnsafeChunkEncoder(int totalLength, BufferRecycler bufferRecycler) {
super(totalLength, bufferRecycler);
}
public UnsafeChunkEncoder(int totalLength, BufferRecycler bufferRecycler, boolean bogus) {
super(totalLength, bufferRecycler, bogus);
}
/*
///////////////////////////////////////////////////////////////////////
// Shared helper methods
///////////////////////////////////////////////////////////////////////
*/
protected final static int _copyPartialLiterals(byte[] in, int inPos, byte[] out, int outPos,
int literals)
{
out[outPos++] = (byte) (literals-1);
// Here use of Unsafe is clear win:
// System.arraycopy(in, inPos-literals, out, outPos, literals);
long rawInPtr = BYTE_ARRAY_OFFSET + inPos - literals;
long rawOutPtr= BYTE_ARRAY_OFFSET + outPos;
switch (literals >> 3) {
case 3:
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
case 2:
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
case 1:
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
}
int left = (literals & 7);
if (left > 4) {
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
} else {
unsafe.putInt(out, rawOutPtr, unsafe.getInt(in, rawInPtr));
}
return outPos+literals;
}
protected final static int _copyLongLiterals(byte[] in, int inPos, byte[] out, int outPos,
int literals)
{
inPos -= literals;
long rawInPtr = BYTE_ARRAY_OFFSET + inPos;
long rawOutPtr = BYTE_ARRAY_OFFSET + outPos;
while (literals >= LZFChunk.MAX_LITERAL) {
out[outPos++] = (byte) 31;
++rawOutPtr;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
inPos += LZFChunk.MAX_LITERAL;
outPos += LZFChunk.MAX_LITERAL;
literals -= LZFChunk.MAX_LITERAL;
}
if (literals > 0) {
return _copyPartialLiterals(in, inPos+literals, out, outPos, literals);
}
return outPos;
}
protected final static int _copyFullLiterals(byte[] in, int inPos, byte[] out, int outPos)
{
// literals == 32
out[outPos++] = (byte) 31;
long rawInPtr = BYTE_ARRAY_OFFSET + inPos - 32;
long rawOutPtr = BYTE_ARRAY_OFFSET + outPos;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
rawInPtr += 8;
rawOutPtr += 8;
unsafe.putLong(out, rawOutPtr, unsafe.getLong(in, rawInPtr));
return (outPos + 32);
}
protected final static int _handleTail(byte[] in, int inPos, int inEnd, byte[] out, int outPos,
int literals)
{
while (inPos < inEnd) {
++inPos;
++literals;
if (literals == LZFChunk.MAX_LITERAL) {
out[outPos++] = (byte) (literals-1); // <= out[outPos - literals - 1] = MAX_LITERAL_MINUS_1;
System.arraycopy(in, inPos-literals, out, outPos, literals);
outPos += literals;
literals = 0;
}
}
if (literals > 0) {
out[outPos++] = (byte) (literals - 1);
System.arraycopy(in, inPos-literals, out, outPos, literals);
outPos += literals;
}
return outPos;
}
protected final static int _findTailMatchLength(final byte[] in, int ptr1, int ptr2, final int maxPtr1)
{
final int start1 = ptr1;
while (ptr1 < maxPtr1 && in[ptr1] == in[ptr2]) {
++ptr1;
++ptr2;
}
return ptr1 - start1 + 1; // i.e.
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/UnsafeChunkEncoderBE.java 0000664 0000000 0000000 00000012621 12373557273 0032330 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.impl;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.LZFChunk;
/**
* Implementation to use on Big-Endian architectures.
*/
@SuppressWarnings("restriction")
public final class UnsafeChunkEncoderBE
extends UnsafeChunkEncoder
{
public UnsafeChunkEncoderBE(int totalLength) {
super(totalLength);
}
public UnsafeChunkEncoderBE(int totalLength, boolean bogus) {
super(totalLength, bogus);
}
public UnsafeChunkEncoderBE(int totalLength, BufferRecycler bufferRecycler) {
super(totalLength, bufferRecycler);
}
public UnsafeChunkEncoderBE(int totalLength, BufferRecycler bufferRecycler, boolean bogus) {
super(totalLength, bufferRecycler, bogus);
}
@Override
protected int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos)
{
final int[] hashTable = _hashTable;
int literals = 0;
inEnd -= TAIL_LENGTH;
final int firstPos = inPos; // so that we won't have back references across block boundary
int seen = _getInt(in, inPos) >> 16;
while (inPos < inEnd) {
seen = (seen << 8) + (in[inPos + 2] & 255);
int off = hash(seen);
int ref = hashTable[off];
hashTable[off] = inPos;
// First expected common case: no back-ref (for whatever reason)
if ((ref >= inPos) // can't refer forward (i.e. leftovers)
|| (ref < firstPos) // or to previous block
|| (off = inPos - ref) > MAX_OFF
|| ((seen << 8) != (_getInt(in, ref-1) << 8))) {
++inPos;
++literals;
if (literals == LZFChunk.MAX_LITERAL) {
outPos = _copyFullLiterals(in, inPos, out, outPos);
literals = 0;
}
continue;
}
// match
int maxLen = inEnd - inPos + 2;
if (maxLen > MAX_REF) {
maxLen = MAX_REF;
}
if (literals > 0) {
outPos = _copyPartialLiterals(in, inPos, out, outPos, literals);
literals = 0;
}
int len = _findMatchLength(in, ref+3, inPos+3, ref+maxLen);
--off; // was off by one earlier
if (len < 7) {
out[outPos++] = (byte) ((off >> 8) + (len << 5));
} else {
out[outPos++] = (byte) ((off >> 8) + (7 << 5));
out[outPos++] = (byte) (len - 7);
}
out[outPos++] = (byte) off;
inPos += len;
seen = _getInt(in, inPos);
hashTable[hash(seen >> 8)] = inPos;
++inPos;
hashTable[hash(seen)] = inPos;
++inPos;
}
// try offlining the tail
return _handleTail(in, inPos, inEnd+4, out, outPos, literals);
}
private final static int _getInt(final byte[] in, final int inPos) {
return unsafe.getInt(in, BYTE_ARRAY_OFFSET + inPos);
}
/*
///////////////////////////////////////////////////////////////////////
// Methods for finding length of a back-reference
///////////////////////////////////////////////////////////////////////
*/
private final static int _findMatchLength(final byte[] in, int ptr1, int ptr2, final int maxPtr1)
{
// Expect at least 8 bytes to check for fast case; offline others
if ((ptr1 + 8) >= maxPtr1) { // rare case, offline
return _findTailMatchLength(in, ptr1, ptr2, maxPtr1);
}
// short matches common, so start with specialized comparison
// NOTE: we know that we have 4 bytes of slack before end, so this is safe:
int i1 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr1);
int i2 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr2);
if (i1 != i2) {
return 1 + _leadingBytes(i1, i2);
}
ptr1 += 4;
ptr2 += 4;
i1 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr1);
i2 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr2);
if (i1 != i2) {
return 5 + _leadingBytes(i1, i2);
}
return _findLongMatchLength(in, ptr1+4, ptr2+4, maxPtr1);
}
private final static int _findLongMatchLength(final byte[] in, int ptr1, int ptr2, final int maxPtr1)
{
final int base = ptr1 - 9;
// and then just loop with longs if we get that far
final int longEnd = maxPtr1-8;
while (ptr1 <= longEnd) {
long l1 = unsafe.getLong(in, BYTE_ARRAY_OFFSET + ptr1);
long l2 = unsafe.getLong(in, BYTE_ARRAY_OFFSET + ptr2);
if (l1 != l2) {
return ptr1 - base + _leadingBytes(l1, l2);
}
ptr1 += 8;
ptr2 += 8;
}
// or, if running out of runway, handle last bytes with loop-de-loop...
while (ptr1 < maxPtr1 && in[ptr1] == in[ptr2]) {
++ptr1;
++ptr2;
}
return ptr1 - base; // i.e.
}
/* With Big-Endian, in-memory layout is "natural", so what we consider
* leading is also leading for in-register.
*/
private final static int _leadingBytes(int i1, int i2) {
return Integer.numberOfLeadingZeros(i1 ^ i2) >> 3;
}
private final static int _leadingBytes(long l1, long l2) {
return Long.numberOfLeadingZeros(l1 ^ l2) >> 3;
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/UnsafeChunkEncoderLE.java 0000664 0000000 0000000 00000013226 12373557273 0032344 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.impl;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.LZFChunk;
/**
* Implementation to use on Little Endian architectures.
*/
@SuppressWarnings("restriction")
public class UnsafeChunkEncoderLE
extends UnsafeChunkEncoder
{
public UnsafeChunkEncoderLE(int totalLength) {
super(totalLength);
}
public UnsafeChunkEncoderLE(int totalLength, boolean bogus) {
super(totalLength, bogus);
}
public UnsafeChunkEncoderLE(int totalLength, BufferRecycler bufferRecycler) {
super(totalLength, bufferRecycler);
}
public UnsafeChunkEncoderLE(int totalLength, BufferRecycler bufferRecycler, boolean bogus) {
super(totalLength, bufferRecycler, bogus);
}
@Override
protected int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos)
{
final int[] hashTable = _hashTable;
int literals = 0;
inEnd -= TAIL_LENGTH;
final int firstPos = inPos; // so that we won't have back references across block boundary
int seen = _getInt(in, inPos) >> 16;
while (inPos < inEnd) {
seen = (seen << 8) + (in[inPos + 2] & 255);
// seen = (seen << 8) + (unsafe.getByte(in, BYTE_ARRAY_OFFSET_PLUS2 + inPos) & 0xFF);
int off = hash(seen);
int ref = hashTable[off];
hashTable[off] = inPos;
// First expected common case: no back-ref (for whatever reason)
if ((ref >= inPos) // can't refer forward (i.e. leftovers)
|| (ref < firstPos) // or to previous block
|| (off = inPos - ref) > MAX_OFF
|| ((seen << 8) != (_getInt(in, ref-1) << 8))) {
++inPos;
++literals;
if (literals == LZFChunk.MAX_LITERAL) {
outPos = _copyFullLiterals(in, inPos, out, outPos);
literals = 0;
}
continue;
}
if (literals > 0) {
outPos = _copyPartialLiterals(in, inPos, out, outPos, literals);
literals = 0;
}
// match
final int maxLen = Math.min(MAX_REF, inEnd - inPos + 2);
/*int maxLen = inEnd - inPos + 2;
if (maxLen > MAX_REF) {
maxLen = MAX_REF;
}*/
int len = _findMatchLength(in, ref+3, inPos+3, ref+maxLen);
--off; // was off by one earlier
if (len < 7) {
out[outPos++] = (byte) ((off >> 8) + (len << 5));
} else {
out[outPos++] = (byte) ((off >> 8) + (7 << 5));
out[outPos++] = (byte) (len - 7);
}
out[outPos++] = (byte) off;
inPos += len;
seen = _getInt(in, inPos);
hashTable[hash(seen >> 8)] = inPos;
++inPos;
hashTable[hash(seen)] = inPos;
++inPos;
}
// try offlining the tail
return _handleTail(in, inPos, inEnd+4, out, outPos, literals);
}
private final static int _getInt(final byte[] in, final int inPos) {
return Integer.reverseBytes(unsafe.getInt(in, BYTE_ARRAY_OFFSET + inPos));
}
/*
///////////////////////////////////////////////////////////////////////
// Methods for finding length of a back-reference
///////////////////////////////////////////////////////////////////////
*/
private final static int _findMatchLength(final byte[] in, int ptr1, int ptr2, final int maxPtr1)
{
// Expect at least 8 bytes to check for fast case; offline others
if ((ptr1 + 8) >= maxPtr1) { // rare case, offline
return _findTailMatchLength(in, ptr1, ptr2, maxPtr1);
}
// short matches common, so start with specialized comparison
// NOTE: we know that we have 4 bytes of slack before end, so this is safe:
int i1 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr1);
int i2 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr2);
if (i1 != i2) {
return 1 + _leadingBytes(i1, i2);
}
ptr1 += 4;
ptr2 += 4;
i1 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr1);
i2 = unsafe.getInt(in, BYTE_ARRAY_OFFSET + ptr2);
if (i1 != i2) {
return 5 + _leadingBytes(i1, i2);
}
return _findLongMatchLength(in, ptr1+4, ptr2+4, maxPtr1);
}
private final static int _findLongMatchLength(final byte[] in, int ptr1, int ptr2, final int maxPtr1)
{
final int base = ptr1 - 9;
// and then just loop with longs if we get that far
final int longEnd = maxPtr1-8;
while (ptr1 <= longEnd) {
long l1 = unsafe.getLong(in, BYTE_ARRAY_OFFSET + ptr1);
long l2 = unsafe.getLong(in, BYTE_ARRAY_OFFSET + ptr2);
if (l1 != l2) {
return ptr1 - base + _leadingBytes(l1, l2);
}
ptr1 += 8;
ptr2 += 8;
}
// or, if running out of runway, handle last bytes with loop-de-loop...
while (ptr1 < maxPtr1 && in[ptr1] == in[ptr2]) {
++ptr1;
++ptr2;
}
return ptr1 - base; // i.e.
}
/* With Little-Endian, in-memory layout is reverse of what we expect for
* in-register, so we either have to reverse bytes, or, simpler,
* calculate trailing zeroes instead.
*/
private final static int _leadingBytes(int i1, int i2) {
return Integer.numberOfTrailingZeros(i1 ^ i2) >> 3;
}
private final static int _leadingBytes(long l1, long l2) {
return Long.numberOfTrailingZeros(l1 ^ l2) >> 3;
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/UnsafeChunkEncoders.java 0000664 0000000 0000000 00000004217 12373557273 0032306 0 ustar 00root root 0000000 0000000 /* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
* OF ANY KIND, either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package com.ning.compress.lzf.impl;
import com.ning.compress.BufferRecycler;
import java.nio.ByteOrder;
/**
* Class that handles actual encoding of individual chunks.
* Resulting chunks can be compressed or non-compressed; compression
* is only used if it actually reduces chunk size (including overhead
* of additional header bytes)
*
* @author Tatu Saloranta (tatu.saloranta@iki.fi)
*/
public final class UnsafeChunkEncoders
{
private final static boolean LITTLE_ENDIAN = (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN);
public static UnsafeChunkEncoder createEncoder(int totalLength) {
if (LITTLE_ENDIAN) {
return new UnsafeChunkEncoderLE(totalLength);
}
return new UnsafeChunkEncoderBE(totalLength);
}
public static UnsafeChunkEncoder createNonAllocatingEncoder(int totalLength) {
if (LITTLE_ENDIAN) {
return new UnsafeChunkEncoderLE(totalLength, false);
}
return new UnsafeChunkEncoderBE(totalLength, false);
}
public static UnsafeChunkEncoder createEncoder(int totalLength, BufferRecycler bufferRecycler) {
if (LITTLE_ENDIAN) {
return new UnsafeChunkEncoderLE(totalLength, bufferRecycler);
}
return new UnsafeChunkEncoderBE(totalLength, bufferRecycler);
}
public static UnsafeChunkEncoder createNonAllocatingEncoder(int totalLength, BufferRecycler bufferRecycler) {
if (LITTLE_ENDIAN) {
return new UnsafeChunkEncoderLE(totalLength, bufferRecycler, false);
}
return new UnsafeChunkEncoderBE(totalLength, bufferRecycler, false);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/VanillaChunkDecoder.java 0000664 0000000 0000000 00000025506 12373557273 0032262 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.impl;
import java.io.IOException;
import java.io.InputStream;
import com.ning.compress.lzf.*;
/**
* Safe {@link ChunkDecoder} implementation that can be used on any
* platform.
*/
public class VanillaChunkDecoder extends ChunkDecoder
{
public VanillaChunkDecoder() { }
@Override
public final int decodeChunk(final InputStream is, final byte[] inputBuffer, final byte[] outputBuffer)
throws IOException
{
/* note: we do NOT read more than 5 bytes because otherwise might need to shuffle bytes
* for output buffer (could perhaps optimize in future?)
*/
int bytesRead = readHeader(is, inputBuffer);
if ((bytesRead < HEADER_BYTES)
|| inputBuffer[0] != LZFChunk.BYTE_Z || inputBuffer[1] != LZFChunk.BYTE_V) {
if (bytesRead == 0) { // probably fine, clean EOF
return -1;
}
_reportCorruptHeader();
}
int type = inputBuffer[2];
int compLen = uint16(inputBuffer, 3);
if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed
readFully(is, false, outputBuffer, 0, compLen);
return compLen;
}
// compressed
readFully(is, true, inputBuffer, 0, 2+compLen); // first 2 bytes are uncompressed length
int uncompLen = uint16(inputBuffer, 0);
decodeChunk(inputBuffer, 2, outputBuffer, 0, uncompLen);
return uncompLen;
}
@Override
public final void decodeChunk(byte[] in, int inPos, byte[] out, int outPos, int outEnd)
throws LZFException
{
do {
int ctrl = in[inPos++] & 255;
if (ctrl < LZFChunk.MAX_LITERAL) { // literal run
switch (ctrl) {
case 31:
out[outPos++] = in[inPos++];
case 30:
out[outPos++] = in[inPos++];
case 29:
out[outPos++] = in[inPos++];
case 28:
out[outPos++] = in[inPos++];
case 27:
out[outPos++] = in[inPos++];
case 26:
out[outPos++] = in[inPos++];
case 25:
out[outPos++] = in[inPos++];
case 24:
out[outPos++] = in[inPos++];
case 23:
out[outPos++] = in[inPos++];
case 22:
out[outPos++] = in[inPos++];
case 21:
out[outPos++] = in[inPos++];
case 20:
out[outPos++] = in[inPos++];
case 19:
out[outPos++] = in[inPos++];
case 18:
out[outPos++] = in[inPos++];
case 17:
out[outPos++] = in[inPos++];
case 16:
out[outPos++] = in[inPos++];
case 15:
out[outPos++] = in[inPos++];
case 14:
out[outPos++] = in[inPos++];
case 13:
out[outPos++] = in[inPos++];
case 12:
out[outPos++] = in[inPos++];
case 11:
out[outPos++] = in[inPos++];
case 10:
out[outPos++] = in[inPos++];
case 9:
out[outPos++] = in[inPos++];
case 8:
out[outPos++] = in[inPos++];
case 7:
out[outPos++] = in[inPos++];
case 6:
out[outPos++] = in[inPos++];
case 5:
out[outPos++] = in[inPos++];
case 4:
out[outPos++] = in[inPos++];
case 3:
out[outPos++] = in[inPos++];
case 2:
out[outPos++] = in[inPos++];
case 1:
out[outPos++] = in[inPos++];
case 0:
out[outPos++] = in[inPos++];
}
continue;
}
// back reference
int len = ctrl >> 5;
ctrl = -((ctrl & 0x1f) << 8) - 1;
if (len < 7) { // 2 bytes; length of 3 - 8 bytes
ctrl -= in[inPos++] & 255;
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
switch (len) {
case 6:
out[outPos] = out[outPos++ + ctrl];
case 5:
out[outPos] = out[outPos++ + ctrl];
case 4:
out[outPos] = out[outPos++ + ctrl];
case 3:
out[outPos] = out[outPos++ + ctrl];
case 2:
out[outPos] = out[outPos++ + ctrl];
case 1:
out[outPos] = out[outPos++ + ctrl];
}
continue;
}
// long version (3 bytes, length of up to 264 bytes)
len = in[inPos++] & 255;
ctrl -= in[inPos++] & 255;
// First: if there is no overlap, can just use arraycopy:
if ((ctrl + len) < -9) {
len += 9;
if (len <= 32) {
copyUpTo32WithSwitch(out, outPos+ctrl, out, outPos, len-1);
} else {
System.arraycopy(out, outPos+ctrl, out, outPos, len);
}
outPos += len;
continue;
}
// otherwise manual copy: so first just copy 9 bytes we know are needed
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
// then loop
// Odd: after extensive profiling, looks like magic number
// for unrolling is 4: with 8 performance is worse (even
// bit less than with no unrolling).
len += outPos;
final int end = len - 3;
while (outPos < end) {
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
out[outPos] = out[outPos++ + ctrl];
}
switch (len - outPos) {
case 3:
out[outPos] = out[outPos++ + ctrl];
case 2:
out[outPos] = out[outPos++ + ctrl];
case 1:
out[outPos] = out[outPos++ + ctrl];
}
} while (outPos < outEnd);
// sanity check to guard against corrupt data:
if (outPos != outEnd) {
throw new LZFException("Corrupt data: overrun in decompress, input offset "+inPos+", output offset "+outPos);
}
}
@Override
public int skipOrDecodeChunk(final InputStream is, final byte[] inputBuffer,
final byte[] outputBuffer, final long maxToSkip)
throws IOException
{
int bytesRead = readHeader(is, inputBuffer);
if ((bytesRead < HEADER_BYTES)
|| inputBuffer[0] != LZFChunk.BYTE_Z || inputBuffer[1] != LZFChunk.BYTE_V) {
if (bytesRead == 0) { // probably fine, clean EOF
return -1;
}
_reportCorruptHeader();
}
int type = inputBuffer[2];
int compLen = uint16(inputBuffer, 3);
if (type == LZFChunk.BLOCK_TYPE_NON_COMPRESSED) { // uncompressed, simple
if (compLen <= maxToSkip) {
skipFully(is, compLen);
return compLen;
}
readFully(is, false, outputBuffer, 0, compLen);
return -(compLen+1);
}
// compressed: need 2 more bytes to know uncompressed length...
readFully(is, true, inputBuffer, 0, 2);
int uncompLen = uint16(inputBuffer, 0);
// can we just skip it wholesale?
if (uncompLen <= maxToSkip) { // awesome: skip N physical compressed bytes, which mean M logical (uncomp) bytes
skipFully(is, compLen);
return uncompLen;
}
// otherwise, read and uncompress the chunk normally
readFully(is, true, inputBuffer, 2, compLen); // first 2 bytes are uncompressed length
decodeChunk(inputBuffer, 2, outputBuffer, 0, uncompLen);
return -(uncompLen+1);
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
protected static final void copyUpTo32WithSwitch(byte[] in, int inPos, byte[] out, int outPos,
int lengthMinusOne)
{
switch (lengthMinusOne) {
case 31:
out[outPos++] = in[inPos++];
case 30:
out[outPos++] = in[inPos++];
case 29:
out[outPos++] = in[inPos++];
case 28:
out[outPos++] = in[inPos++];
case 27:
out[outPos++] = in[inPos++];
case 26:
out[outPos++] = in[inPos++];
case 25:
out[outPos++] = in[inPos++];
case 24:
out[outPos++] = in[inPos++];
case 23:
out[outPos++] = in[inPos++];
case 22:
out[outPos++] = in[inPos++];
case 21:
out[outPos++] = in[inPos++];
case 20:
out[outPos++] = in[inPos++];
case 19:
out[outPos++] = in[inPos++];
case 18:
out[outPos++] = in[inPos++];
case 17:
out[outPos++] = in[inPos++];
case 16:
out[outPos++] = in[inPos++];
case 15:
out[outPos++] = in[inPos++];
case 14:
out[outPos++] = in[inPos++];
case 13:
out[outPos++] = in[inPos++];
case 12:
out[outPos++] = in[inPos++];
case 11:
out[outPos++] = in[inPos++];
case 10:
out[outPos++] = in[inPos++];
case 9:
out[outPos++] = in[inPos++];
case 8:
out[outPos++] = in[inPos++];
case 7:
out[outPos++] = in[inPos++];
case 6:
out[outPos++] = in[inPos++];
case 5:
out[outPos++] = in[inPos++];
case 4:
out[outPos++] = in[inPos++];
case 3:
out[outPos++] = in[inPos++];
case 2:
out[outPos++] = in[inPos++];
case 1:
out[outPos++] = in[inPos++];
case 0:
out[outPos++] = in[inPos++];
}
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/VanillaChunkEncoder.java 0000664 0000000 0000000 00000013630 12373557273 0032267 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.impl;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.ChunkEncoder;
import com.ning.compress.lzf.LZFChunk;
public class VanillaChunkEncoder
extends ChunkEncoder
{
/**
* @param totalLength Total encoded length; used for calculating size
* of hash table to use
*/
public VanillaChunkEncoder(int totalLength) {
super(totalLength);
}
/**
* Alternate constructor used when we want to avoid allocation encoding
* buffer, in cases where caller wants full control over allocations.
*/
protected VanillaChunkEncoder(int totalLength, boolean bogus) {
super(totalLength, bogus);
}
/**
* @param totalLength Total encoded length; used for calculating size
* of hash table to use
* @param bufferRecycler The BufferRecycler instance
*/
public VanillaChunkEncoder(int totalLength, BufferRecycler bufferRecycler) {
super(totalLength, bufferRecycler);
}
/**
* Alternate constructor used when we want to avoid allocation encoding
* buffer, in cases where caller wants full control over allocations.
*/
protected VanillaChunkEncoder(int totalLength, BufferRecycler bufferRecycler, boolean bogus) {
super(totalLength, bufferRecycler, bogus);
}
public static VanillaChunkEncoder nonAllocatingEncoder(int totalLength) {
return new VanillaChunkEncoder(totalLength, true);
}
public static VanillaChunkEncoder nonAllocatingEncoder(int totalLength, BufferRecycler bufferRecycler) {
return new VanillaChunkEncoder(totalLength, bufferRecycler, true);
}
/*
///////////////////////////////////////////////////////////////////////
// Abstract method implementations
///////////////////////////////////////////////////////////////////////
*/
/**
* Main workhorse method that will try to compress given chunk, and return
* end position (offset to byte after last included byte)
*
* @return Output pointer after handling content, such that result - originalOutPost
* is the actual length of compressed chunk (without header)
*/
@Override
protected int tryCompress(byte[] in, int inPos, int inEnd, byte[] out, int outPos)
{
final int[] hashTable = _hashTable;
++outPos; // To leave one byte for literal-length indicator
int seen = first(in, inPos); // past 4 bytes we have seen... (last one is LSB)
int literals = 0;
inEnd -= TAIL_LENGTH;
final int firstPos = inPos; // so that we won't have back references across block boundary
while (inPos < inEnd) {
byte p2 = in[inPos + 2];
// next
seen = (seen << 8) + (p2 & 255);
int off = hash(seen);
int ref = hashTable[off];
hashTable[off] = inPos;
// First expected common case: no back-ref (for whatever reason)
if (ref >= inPos // can't refer forward (i.e. leftovers)
|| (ref < firstPos) // or to previous block
|| (off = inPos - ref) > MAX_OFF
|| in[ref+2] != p2 // must match hash
|| in[ref+1] != (byte) (seen >> 8)
|| in[ref] != (byte) (seen >> 16)) {
out[outPos++] = in[inPos++];
literals++;
if (literals == LZFChunk.MAX_LITERAL) {
out[outPos - 33] = (byte) 31; // <= out[outPos - literals - 1] = MAX_LITERAL_MINUS_1;
literals = 0;
outPos++; // To leave one byte for literal-length indicator
}
continue;
}
// match
int maxLen = inEnd - inPos + 2;
if (maxLen > MAX_REF) {
maxLen = MAX_REF;
}
if (literals == 0) {
outPos--; // We do not need literal length indicator, go back
} else {
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
}
int len = 3;
// find match length
while (len < maxLen && in[ref + len] == in[inPos + len]) {
len++;
}
len -= 2;
--off; // was off by one earlier
if (len < 7) {
out[outPos++] = (byte) ((off >> 8) + (len << 5));
} else {
out[outPos++] = (byte) ((off >> 8) + (7 << 5));
out[outPos++] = (byte) (len - 7);
}
out[outPos++] = (byte) off;
outPos++;
inPos += len;
seen = first(in, inPos);
seen = (seen << 8) + (in[inPos + 2] & 255);
hashTable[hash(seen)] = inPos;
++inPos;
seen = (seen << 8) + (in[inPos + 2] & 255); // hash = next(hash, in, inPos);
hashTable[hash(seen)] = inPos;
++inPos;
}
// try offlining the tail
return _handleTail(in, inPos, inEnd+4, out, outPos, literals);
}
private final int _handleTail(byte[] in, int inPos, int inEnd, byte[] out, int outPos,
int literals)
{
while (inPos < inEnd) {
out[outPos++] = in[inPos++];
literals++;
if (literals == LZFChunk.MAX_LITERAL) {
out[outPos - literals - 1] = (byte) (literals - 1);
literals = 0;
outPos++;
}
}
out[outPos - literals - 1] = (byte) (literals - 1);
if (literals == 0) {
outPos--;
}
return outPos;
}
/*
///////////////////////////////////////////////////////////////////////
// Internal methods
///////////////////////////////////////////////////////////////////////
*/
private final int first(byte[] in, int inPos) {
return (in[inPos] << 8) + (in[inPos + 1] & 0xFF);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/impl/package-info.java 0000664 0000000 0000000 00000000216 12373557273 0030730 0 ustar 00root root 0000000 0000000 /**
Package that contains implementation classes that are not part
of public interface of LZF codec.
*/
package com.ning.compress.lzf.impl;
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/package-info.java 0000664 0000000 0000000 00000000273 12373557273 0027772 0 ustar 00root root 0000000 0000000 /**
Package that contains public API of the LZF codec, as well as some
of the implementation (specifically parts that are designed to be overridable).
*/
package com.ning.compress.lzf;
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/parallel/ 0000775 0000000 0000000 00000000000 12373557273 0026375 5 ustar 00root root 0000000 0000000 compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/parallel/BlockManager.java 0000664 0000000 0000000 00000002160 12373557273 0031564 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.parallel;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.LinkedBlockingDeque;
/**
* @author Cédrik LIME
*/
class BlockManager {
/* used as a blocking Stack (FIFO) */
private final BlockingDequeflush
method does nothing.
*/
@Override
public void flush() throws IOException
{
checkNotClosed();
}
@Override
public boolean isOpen() {
return ! _outputStreamClosed;
}
@Override
public void close() throws IOException
{
if (!_outputStreamClosed) {
if (_position > 0) {
writeCompressedBlock();
}
byte[] buf = _outputBuffer;
if (buf != null) {
assert _position == 0;
blockManager.releaseBlockToPool(_outputBuffer);
_outputBuffer = null;
}
writeExecutor.shutdown();
try {
writeExecutor.awaitTermination(1, TimeUnit.HOURS);
// at this point compressExecutor should have no pending tasks: cleanup ThreadLocal's
// we don't know how many threads; go to the max for now. This will change once we get a proper configuration bean.
int maxThreads = Runtime.getRuntime().availableProcessors();
Collection
* return optimalInstance(LZFChunk.MAX_CHUNK_LEN);
*
*/
public static ChunkEncoder optimalInstance() {
return optimalInstance(LZFChunk.MAX_CHUNK_LEN);
}
/**
* Method to use for getting compressor instance that uses the most optimal
* available methods for underlying data access. It should be safe to call
* this method as implementations are dynamically loaded; however, on some
* non-standard platforms it may be necessary to either directly load
* instances, or use {@link #safeInstance}.
*
*
* return safeInstance(LZFChunk.MAX_CHUNK_LEN);
*
*/
public static ChunkEncoder safeInstance() {
return safeInstance(LZFChunk.MAX_CHUNK_LEN);
}
/**
* Method that can be used to ensure that a "safe" compressor instance is loaded.
* Safe here means that it should work on any and all Java platforms.
*
* return optimalInstance(LZFChunk.MAX_CHUNK_LEN, bufferRecycler);
*
*/
public static ChunkEncoder optimalInstance(BufferRecycler bufferRecycler) {
return optimalInstance(LZFChunk.MAX_CHUNK_LEN, bufferRecycler);
}
/**
* Method to use for getting compressor instance that uses the most optimal
* available methods for underlying data access. It should be safe to call
* this method as implementations are dynamically loaded; however, on some
* non-standard platforms it may be necessary to either directly load
* instances, or use {@link #safeInstance}.
*
* @param totalLength Expected total length of content to compress; only matters
* for content that is smaller than maximum chunk size (64k), to optimize
* encoding hash tables
* @param bufferRecycler The BufferRecycler instance
*/
public static ChunkEncoder optimalInstance(int totalLength, BufferRecycler bufferRecycler) {
try {
return UnsafeChunkEncoders.createEncoder(totalLength, bufferRecycler);
} catch (Exception e) {
return safeInstance(totalLength, bufferRecycler);
}
}
/**
* Factory method for constructing encoder that is always passed buffer
* externally, so that it will not (nor need) allocate encoding buffer.
*/
public static ChunkEncoder optimalNonAllocatingInstance(int totalLength, BufferRecycler bufferRecycler) {
try {
return UnsafeChunkEncoders.createNonAllocatingEncoder(totalLength, bufferRecycler);
} catch (Exception e) {
return safeNonAllocatingInstance(totalLength, bufferRecycler);
}
}
/**
* Convenience method, equivalent to:
*
* return safeInstance(LZFChunk.MAX_CHUNK_LEN, bufferRecycler);
*
*/
public static ChunkEncoder safeInstance(BufferRecycler bufferRecycler) {
return safeInstance(LZFChunk.MAX_CHUNK_LEN, bufferRecycler);
}
/**
* Method that can be used to ensure that a "safe" compressor instance is loaded.
* Safe here means that it should work on any and all Java platforms.
*
* @param totalLength Expected total length of content to compress; only matters
* for content that is smaller than maximum chunk size (64k), to optimize
* encoding hash tables
* @param bufferRecycler The BufferRecycler instance
*/
public static ChunkEncoder safeInstance(int totalLength, BufferRecycler bufferRecycler) {
return new VanillaChunkEncoder(totalLength, bufferRecycler);
}
/**
* Factory method for constructing encoder that is always passed buffer
* externally, so that it will not (nor need) allocate encoding buffer.
*/
public static ChunkEncoder safeNonAllocatingInstance(int totalLength, BufferRecycler bufferRecycler) {
return VanillaChunkEncoder.nonAllocatingEncoder(totalLength, bufferRecycler);
}
}
compress-compress-lzf-1.0.3/src/main/java/com/ning/compress/lzf/util/LZFFileInputStream.java 0000664 0000000 0000000 00000030762 12373557273 0032060 0 ustar 00root root 0000000 0000000 package com.ning.compress.lzf.util;
import java.io.*;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.*;
/**
* Helper class that allows use of LZF compression even if a library requires
* use of {@link FileInputStream}.
*