diff --git a/build.third_step6.gradle b/build.third_step6.gradle
index 46f7ebe3b..63e2d5fc1 100644
--- a/build.third_step6.gradle
+++ b/build.third_step6.gradle
@@ -39,7 +39,8 @@ sourceSets{
"${srcDir}/fine-redisson/src",
"${srcDir}/fine-socketio/src",
"${srcDir}/fine-itext/src",
- "${srcDir}/fine-kryo/src"
+ "${srcDir}/fine-kryo/src",
+ "${srcDir}/fine-lz4/src"
]
}
}
@@ -99,6 +100,8 @@ task copyFiles(type:Copy,dependsOn:'compileJava'){
with dataContent.call("${srcDir}/fine-jedis/resources")
with dataContent.call("${srcDir}/fine-cssparser/src")
with dataContent.call("${srcDir}/fine-kryo/src")
+ with dataContent.call("${srcDir}/fine-lz4/src")
+ with dataContent.call("${srcDir}/fine-lz4/resources")
into "${classesDir}"
}
}
diff --git a/fine-lz4/resources/com/fr/third/net/jpountz/util/darwin/x86_64/liblz4-java.dylib b/fine-lz4/resources/com/fr/third/net/jpountz/util/darwin/x86_64/liblz4-java.dylib
new file mode 100755
index 000000000..fc98306c8
Binary files /dev/null and b/fine-lz4/resources/com/fr/third/net/jpountz/util/darwin/x86_64/liblz4-java.dylib differ
diff --git a/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/aarch64/liblz4-java.so b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/aarch64/liblz4-java.so
new file mode 100755
index 000000000..b6764721a
Binary files /dev/null and b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/aarch64/liblz4-java.so differ
diff --git a/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/amd64/liblz4-java.so b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/amd64/liblz4-java.so
new file mode 100755
index 000000000..8ee7d308f
Binary files /dev/null and b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/amd64/liblz4-java.so differ
diff --git a/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/i386/liblz4-java.so b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/i386/liblz4-java.so
new file mode 100755
index 000000000..88f27ff40
Binary files /dev/null and b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/i386/liblz4-java.so differ
diff --git a/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/ppc64le/liblz4-java.so b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/ppc64le/liblz4-java.so
new file mode 100755
index 000000000..641a47885
Binary files /dev/null and b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/ppc64le/liblz4-java.so differ
diff --git a/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/s390x/liblz4-java.so b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/s390x/liblz4-java.so
new file mode 100755
index 000000000..78f67eccf
Binary files /dev/null and b/fine-lz4/resources/com/fr/third/net/jpountz/util/linux/s390x/liblz4-java.so differ
diff --git a/fine-lz4/resources/com/fr/third/net/jpountz/util/win32/amd64/liblz4-java.so b/fine-lz4/resources/com/fr/third/net/jpountz/util/win32/amd64/liblz4-java.so
new file mode 100755
index 000000000..d986bc35f
Binary files /dev/null and b/fine-lz4/resources/com/fr/third/net/jpountz/util/win32/amd64/liblz4-java.so differ
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4BlockInputStream.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4BlockInputStream.java
new file mode 100644
index 000000000..c554ce5bd
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4BlockInputStream.java
@@ -0,0 +1,301 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static com.fr.third.net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_LEVEL_BASE;
+import static com.fr.third.net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_LZ4;
+import static com.fr.third.net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_RAW;
+import static com.fr.third.net.jpountz.lz4.LZ4BlockOutputStream.DEFAULT_SEED;
+import static com.fr.third.net.jpountz.lz4.LZ4BlockOutputStream.HEADER_LENGTH;
+import static com.fr.third.net.jpountz.lz4.LZ4BlockOutputStream.MAGIC;
+import static com.fr.third.net.jpountz.lz4.LZ4BlockOutputStream.MAGIC_LENGTH;
+
+import java.io.EOFException;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.zip.Checksum;
+
+import com.fr.third.net.jpountz.util.SafeUtils;
+import com.fr.third.net.jpountz.xxhash.StreamingXXHash32;
+import com.fr.third.net.jpountz.xxhash.XXHash32;
+import com.fr.third.net.jpountz.xxhash.XXHashFactory;
+
+/**
+ * {@link InputStream} implementation to decode data written with
+ * {@link LZ4BlockOutputStream}. This class is not thread-safe and does not
+ * support {@link #mark(int)}/{@link #reset()}.
+ * @see LZ4BlockOutputStream
+ */
+public final class LZ4BlockInputStream extends FilterInputStream {
+
+ private final LZ4FastDecompressor decompressor;
+ private final Checksum checksum;
+ private final boolean stopOnEmptyBlock;
+ private byte[] buffer;
+ private byte[] compressedBuffer;
+ private int originalLen;
+ private int o;
+ private boolean finished;
+
+ /**
+ * Creates a new LZ4 input stream to read from the specified underlying InputStream.
+ *
+ * @param in the {@link InputStream} to poll
+ * @param decompressor the {@link LZ4FastDecompressor decompressor} instance to
+ * use
+ * @param checksum the {@link Checksum} instance to use, must be
+ * equivalent to the instance which has been used to
+ * write the stream
+ * @param stopOnEmptyBlock whether read is stopped on an empty block
+ */
+ public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor, Checksum checksum, boolean stopOnEmptyBlock) {
+ super(in);
+ this.decompressor = decompressor;
+ this.checksum = checksum;
+ this.stopOnEmptyBlock = stopOnEmptyBlock;
+ this.buffer = new byte[0];
+ this.compressedBuffer = new byte[HEADER_LENGTH];
+ o = originalLen = 0;
+ finished = false;
+ }
+
+ /**
+ * Creates a new LZ4 input stream to read from the specified underlying InputStream.
+ *
+ * @param in the {@link InputStream} to poll
+ * @param decompressor the {@link LZ4FastDecompressor decompressor} instance to
+ * use
+ * @param checksum the {@link Checksum} instance to use, must be
+ * equivalent to the instance which has been used to
+ * write the stream
+ *
+ * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor, Checksum, boolean)
+ */
+ public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor, Checksum checksum) {
+ this(in, decompressor, checksum, true);
+ }
+
+ /**
+ * Creates a new LZ4 input stream to read from the specified underlying InputStream, using {@link XXHash32} for checksuming.
+ *
+ * @param in the {@link InputStream} to poll
+ * @param decompressor the {@link LZ4FastDecompressor decompressor} instance to
+ * use
+ *
+ * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor, Checksum, boolean)
+ * @see StreamingXXHash32#asChecksum()
+ */
+ public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor) {
+ this(in, decompressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), true);
+ }
+
+ /**
+ * Creates a new LZ4 input stream to read from the specified underlying InputStream, using {@link XXHash32} for checksuming.
+ *
+ * @param in the {@link InputStream} to poll
+ * @param stopOnEmptyBlock whether read is stopped on an empty block
+ *
+ * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor, Checksum, boolean)
+ * @see LZ4Factory#fastestInstance()
+ * @see StreamingXXHash32#asChecksum()
+ */
+ public LZ4BlockInputStream(InputStream in, boolean stopOnEmptyBlock) {
+ this(in, LZ4Factory.fastestInstance().fastDecompressor(), XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), stopOnEmptyBlock);
+ }
+
+ /**
+ * Creates a new LZ4 input stream to read from the specified underlying InputStream, using {@link XXHash32} for checksuming.
+ *
+ * @param in the {@link InputStream} to poll
+ *
+ * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor)
+ * @see LZ4Factory#fastestInstance()
+ */
+ public LZ4BlockInputStream(InputStream in) {
+ this(in, LZ4Factory.fastestInstance().fastDecompressor());
+ }
+
+ @Override
+ public int available() throws IOException {
+ return originalLen - o;
+ }
+
+ @Override
+ public int read() throws IOException {
+ if (finished) {
+ return -1;
+ }
+ if (o == originalLen) {
+ refill();
+ }
+ if (finished) {
+ return -1;
+ }
+ return buffer[o++] & 0xFF;
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ SafeUtils.checkRange(b, off, len);
+ if (finished) {
+ return -1;
+ }
+ if (o == originalLen) {
+ refill();
+ }
+ if (finished) {
+ return -1;
+ }
+ len = Math.min(len, originalLen - o);
+ System.arraycopy(buffer, o, b, off, len);
+ o += len;
+ return len;
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return read(b, 0, b.length);
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ if (n <= 0 || finished) {
+ return 0;
+ }
+ if (o == originalLen) {
+ refill();
+ }
+ if (finished) {
+ return 0;
+ }
+ final int skipped = (int) Math.min(n, originalLen - o);
+ o += skipped;
+ return skipped;
+ }
+
+ private void refill() throws IOException {
+ try {
+ readFully(compressedBuffer, HEADER_LENGTH);
+ } catch (EOFException e) {
+ if (!stopOnEmptyBlock) {
+ finished = true;
+ } else {
+ throw e;
+ }
+ return;
+ }
+ for (int i = 0; i < MAGIC_LENGTH; ++i) {
+ if (compressedBuffer[i] != MAGIC[i]) {
+ throw new IOException("Stream is corrupted");
+ }
+ }
+ final int token = compressedBuffer[MAGIC_LENGTH] & 0xFF;
+ final int compressionMethod = token & 0xF0;
+ final int compressionLevel = COMPRESSION_LEVEL_BASE + (token & 0x0F);
+ if (compressionMethod != COMPRESSION_METHOD_RAW && compressionMethod != COMPRESSION_METHOD_LZ4) {
+ throw new IOException("Stream is corrupted");
+ }
+ final int compressedLen = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 1);
+ originalLen = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 5);
+ final int check = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 9);
+ assert HEADER_LENGTH == MAGIC_LENGTH + 13;
+ if (originalLen > 1 << compressionLevel
+ || originalLen < 0
+ || compressedLen < 0
+ || (originalLen == 0 && compressedLen != 0)
+ || (originalLen != 0 && compressedLen == 0)
+ || (compressionMethod == COMPRESSION_METHOD_RAW && originalLen != compressedLen)) {
+ throw new IOException("Stream is corrupted");
+ }
+ if (originalLen == 0 && compressedLen == 0) {
+ if (check != 0) {
+ throw new IOException("Stream is corrupted");
+ }
+ if (!stopOnEmptyBlock) {
+ refill();
+ } else {
+ finished = true;
+ }
+ return;
+ }
+ if (buffer.length < originalLen) {
+ buffer = new byte[Math.max(originalLen, buffer.length * 3 / 2)];
+ }
+ switch (compressionMethod) {
+ case COMPRESSION_METHOD_RAW:
+ readFully(buffer, originalLen);
+ break;
+ case COMPRESSION_METHOD_LZ4:
+ if (compressedBuffer.length < compressedLen) {
+ compressedBuffer = new byte[Math.max(compressedLen, compressedBuffer.length * 3 / 2)];
+ }
+ readFully(compressedBuffer, compressedLen);
+ try {
+ final int compressedLen2 = decompressor.decompress(compressedBuffer, 0, buffer, 0, originalLen);
+ if (compressedLen != compressedLen2) {
+ throw new IOException("Stream is corrupted");
+ }
+ } catch (LZ4Exception e) {
+ throw new IOException("Stream is corrupted", e);
+ }
+ break;
+ default:
+ throw new AssertionError();
+ }
+ checksum.reset();
+ checksum.update(buffer, 0, originalLen);
+ if ((int) checksum.getValue() != check) {
+ throw new IOException("Stream is corrupted");
+ }
+ o = 0;
+ }
+
+ private void readFully(byte[] b, int len) throws IOException {
+ int read = 0;
+ while (read < len) {
+ final int r = in.read(b, read, len - read);
+ if (r < 0) {
+ throw new EOFException("Stream ended prematurely");
+ }
+ read += r;
+ }
+ assert len == read;
+ }
+
+ @Override
+ public boolean markSupported() {
+ return false;
+ }
+
+ @SuppressWarnings("sync-override")
+ @Override
+ public void mark(int readlimit) {
+ // unsupported
+ }
+
+ @SuppressWarnings("sync-override")
+ @Override
+ public void reset() throws IOException {
+ throw new IOException("mark/reset not supported");
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(in=" + in
+ + ", decompressor=" + decompressor + ", checksum=" + checksum + ")";
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4BlockOutputStream.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4BlockOutputStream.java
new file mode 100644
index 000000000..8ab77beeb
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4BlockOutputStream.java
@@ -0,0 +1,279 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.zip.Checksum;
+
+import com.fr.third.net.jpountz.util.SafeUtils;
+import com.fr.third.net.jpountz.xxhash.StreamingXXHash32;
+import com.fr.third.net.jpountz.xxhash.XXHashFactory;
+
+/**
+ * Streaming LZ4 (not compatible with the LZ4 Frame format).
+ * This class compresses data into fixed-size blocks of compressed data.
+ * This class uses its own format and is not compatible with the LZ4 Frame format.
+ * For interoperability with other LZ4 tools, use {@link LZ4FrameOutputStream},
+ * which is compatible with the LZ4 Frame format. This class remains for backward compatibility.
+ * @see LZ4BlockInputStream
+ * @see LZ4FrameOutputStream
+ */
+public final class LZ4BlockOutputStream extends FilterOutputStream {
+
+ static final byte[] MAGIC = new byte[] { 'L', 'Z', '4', 'B', 'l', 'o', 'c', 'k' };
+ static final int MAGIC_LENGTH = MAGIC.length;
+
+ static final int HEADER_LENGTH =
+ MAGIC_LENGTH // magic bytes
+ + 1 // token
+ + 4 // compressed length
+ + 4 // decompressed length
+ + 4; // checksum
+
+ static final int COMPRESSION_LEVEL_BASE = 10;
+ static final int MIN_BLOCK_SIZE = 64;
+ static final int MAX_BLOCK_SIZE = 1 << (COMPRESSION_LEVEL_BASE + 0x0F);
+
+ static final int COMPRESSION_METHOD_RAW = 0x10;
+ static final int COMPRESSION_METHOD_LZ4 = 0x20;
+
+ static final int DEFAULT_SEED = 0x9747b28c;
+
+ private static int compressionLevel(int blockSize) {
+ if (blockSize < MIN_BLOCK_SIZE) {
+ throw new IllegalArgumentException("blockSize must be >= " + MIN_BLOCK_SIZE + ", got " + blockSize);
+ } else if (blockSize > MAX_BLOCK_SIZE) {
+ throw new IllegalArgumentException("blockSize must be <= " + MAX_BLOCK_SIZE + ", got " + blockSize);
+ }
+ int compressionLevel = 32 - Integer.numberOfLeadingZeros(blockSize - 1); // ceil of log2
+ assert (1 << compressionLevel) >= blockSize;
+ assert blockSize * 2 > (1 << compressionLevel);
+ compressionLevel = Math.max(0, compressionLevel - COMPRESSION_LEVEL_BASE);
+ assert compressionLevel >= 0 && compressionLevel <= 0x0F;
+ return compressionLevel;
+ }
+
+ private final int blockSize;
+ private final int compressionLevel;
+ private final LZ4Compressor compressor;
+ private final Checksum checksum;
+ private final byte[] buffer;
+ private final byte[] compressedBuffer;
+ private final boolean syncFlush;
+ private boolean finished;
+ private int o;
+
+ /**
+ * Creates a new {@link OutputStream} with configurable block size. Large
+ * blocks require more memory at compression and decompression time but
+ * should improve the compression ratio.
+ *
+ * @param out the {@link OutputStream} to feed
+ * @param blockSize the maximum number of bytes to try to compress at once,
+ * must be >= 64 and <= 32 M
+ * @param compressor the {@link LZ4Compressor} instance to use to compress
+ * data
+ * @param checksum the {@link Checksum} instance to use to check data for
+ * integrity.
+ * @param syncFlush true if pending data should also be flushed on {@link #flush()}
+ */
+ public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, boolean syncFlush) {
+ super(out);
+ this.blockSize = blockSize;
+ this.compressor = compressor;
+ this.checksum = checksum;
+ this.compressionLevel = compressionLevel(blockSize);
+ this.buffer = new byte[blockSize];
+ final int compressedBlockSize = HEADER_LENGTH + compressor.maxCompressedLength(blockSize);
+ this.compressedBuffer = new byte[compressedBlockSize];
+ this.syncFlush = syncFlush;
+ o = 0;
+ finished = false;
+ System.arraycopy(MAGIC, 0, compressedBuffer, 0, MAGIC_LENGTH);
+ }
+
+ /**
+ * Creates a new instance which checks stream integrity using
+ * {@link StreamingXXHash32} and doesn't sync flush.
+ *
+ * @param out the {@link OutputStream} to feed
+ * @param blockSize the maximum number of bytes to try to compress at once,
+ * must be >= 64 and <= 32 M
+ * @param compressor the {@link LZ4Compressor} instance to use to compress
+ * data
+ *
+ * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean)
+ * @see StreamingXXHash32#asChecksum()
+ */
+ public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) {
+ this(out, blockSize, compressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), false);
+ }
+
+ /**
+ * Creates a new instance which compresses with the standard LZ4 compression
+ * algorithm.
+ *
+ * @param out the {@link OutputStream} to feed
+ * @param blockSize the maximum number of bytes to try to compress at once,
+ * must be >= 64 and <= 32 M
+ *
+ * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor)
+ * @see LZ4Factory#fastCompressor()
+ */
+ public LZ4BlockOutputStream(OutputStream out, int blockSize) {
+ this(out, blockSize, LZ4Factory.fastestInstance().fastCompressor());
+ }
+
+ /**
+ * Creates a new instance which compresses into blocks of 64 KB.
+ *
+ * @param out the {@link OutputStream} to feed
+ *
+ * @see #LZ4BlockOutputStream(OutputStream, int)
+ */
+ public LZ4BlockOutputStream(OutputStream out) {
+ this(out, 1 << 16);
+ }
+
+ private void ensureNotFinished() {
+ if (finished) {
+ throw new IllegalStateException("This stream is already closed");
+ }
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ ensureNotFinished();
+ if (o == blockSize) {
+ flushBufferedData();
+ }
+ buffer[o++] = (byte) b;
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ SafeUtils.checkRange(b, off, len);
+ ensureNotFinished();
+
+ while (o + len > blockSize) {
+ final int l = blockSize - o;
+ System.arraycopy(b, off, buffer, o, blockSize - o);
+ o = blockSize;
+ flushBufferedData();
+ off += l;
+ len -= l;
+ }
+ System.arraycopy(b, off, buffer, o, len);
+ o += len;
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ ensureNotFinished();
+ write(b, 0, b.length);
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!finished) {
+ finish();
+ }
+ if (out != null) {
+ out.close();
+ out = null;
+ }
+ }
+
+ private void flushBufferedData() throws IOException {
+ if (o == 0) {
+ return;
+ }
+ checksum.reset();
+ checksum.update(buffer, 0, o);
+ final int check = (int) checksum.getValue();
+ int compressedLength = compressor.compress(buffer, 0, o, compressedBuffer, HEADER_LENGTH);
+ final int compressMethod;
+ if (compressedLength >= o) {
+ compressMethod = COMPRESSION_METHOD_RAW;
+ compressedLength = o;
+ System.arraycopy(buffer, 0, compressedBuffer, HEADER_LENGTH, o);
+ } else {
+ compressMethod = COMPRESSION_METHOD_LZ4;
+ }
+
+ compressedBuffer[MAGIC_LENGTH] = (byte) (compressMethod | compressionLevel);
+ writeIntLE(compressedLength, compressedBuffer, MAGIC_LENGTH + 1);
+ writeIntLE(o, compressedBuffer, MAGIC_LENGTH + 5);
+ writeIntLE(check, compressedBuffer, MAGIC_LENGTH + 9);
+ assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
+ out.write(compressedBuffer, 0, HEADER_LENGTH + compressedLength);
+ o = 0;
+ }
+
+ /**
+ * Flushes this compressed {@link OutputStream}.
+ *
+ * If the stream has been created with syncFlush=true
, pending
+ * data will be compressed and appended to the underlying {@link OutputStream}
+ * before calling {@link OutputStream#flush()} on the underlying stream.
+ * Otherwise, this method just flushes the underlying stream, so pending
+ * data might not be available for reading until {@link #finish()} or
+ * {@link #close()} is called.
+ */
+ @Override
+ public void flush() throws IOException {
+ if (out != null) {
+ if (syncFlush) {
+ flushBufferedData();
+ }
+ out.flush();
+ }
+ }
+
+ /**
+ * Same as {@link #close()} except that it doesn't close the underlying stream.
+ * This can be useful if you want to keep on using the underlying stream.
+ *
+ * @throws IOException if an I/O error occurs.
+ */
+ public void finish() throws IOException {
+ ensureNotFinished();
+ flushBufferedData();
+ compressedBuffer[MAGIC_LENGTH] = (byte) (COMPRESSION_METHOD_RAW | compressionLevel);
+ writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 1);
+ writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 5);
+ writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 9);
+ assert MAGIC_LENGTH + 13 == HEADER_LENGTH;
+ out.write(compressedBuffer, 0, HEADER_LENGTH);
+ finished = true;
+ out.flush();
+ }
+
+ private static void writeIntLE(int i, byte[] buf, int off) {
+ buf[off++] = (byte) i;
+ buf[off++] = (byte) (i >>> 8);
+ buf[off++] = (byte) (i >>> 16);
+ buf[off++] = (byte) (i >>> 24);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(out=" + out + ", blockSize=" + blockSize
+ + ", compressor=" + compressor + ", checksum=" + checksum + ")";
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4ByteBufferUtils.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4ByteBufferUtils.java
new file mode 100644
index 000000000..29769aa7e
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4ByteBufferUtils.java
@@ -0,0 +1,237 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.COPY_LENGTH;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.LAST_LITERALS;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.ML_BITS;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.ML_MASK;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.RUN_MASK;
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.readByte;
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.readInt;
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.readLong;
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.writeByte;
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.writeInt;
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.writeLong;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+enum LZ4ByteBufferUtils {
+ ;
+ static int hash(ByteBuffer buf, int i) {
+ return LZ4Utils.hash(readInt(buf, i));
+ }
+
+ static int hash64k(ByteBuffer buf, int i) {
+ return LZ4Utils.hash64k(readInt(buf, i));
+ }
+
+ static boolean readIntEquals(ByteBuffer buf, int i, int j) {
+ return buf.getInt(i) == buf.getInt(j);
+ }
+
+ static void safeIncrementalCopy(ByteBuffer dest, int matchOff, int dOff, int matchLen) {
+ for (int i = 0; i < matchLen; ++i) {
+ dest.put(dOff + i, dest.get(matchOff + i));
+ }
+ }
+
+ static void wildIncrementalCopy(ByteBuffer dest, int matchOff, int dOff, int matchCopyEnd) {
+ if (dOff - matchOff < 4) {
+ for (int i = 0; i < 4; ++i) {
+ writeByte(dest, dOff+i, readByte(dest, matchOff+i));
+ }
+ dOff += 4;
+ matchOff += 4;
+ int dec = 0;
+ assert dOff >= matchOff && dOff - matchOff < 8;
+ switch (dOff - matchOff) {
+ case 1:
+ matchOff -= 3;
+ break;
+ case 2:
+ matchOff -= 2;
+ break;
+ case 3:
+ matchOff -= 3;
+ dec = -1;
+ break;
+ case 5:
+ dec = 1;
+ break;
+ case 6:
+ dec = 2;
+ break;
+ case 7:
+ dec = 3;
+ break;
+ default:
+ break;
+ }
+ writeInt(dest, dOff, readInt(dest, matchOff));
+ dOff += 4;
+ matchOff -= dec;
+ } else if (dOff - matchOff < COPY_LENGTH) {
+ writeLong(dest, dOff, readLong(dest, matchOff));
+ dOff += dOff - matchOff;
+ }
+ while (dOff < matchCopyEnd) {
+ writeLong(dest, dOff, readLong(dest, matchOff));
+ dOff += 8;
+ matchOff += 8;
+ }
+ }
+
+ static int commonBytes(ByteBuffer src, int ref, int sOff, int srcLimit) {
+ int matchLen = 0;
+ while (sOff <= srcLimit - 8) {
+ if (readLong(src, sOff) == readLong(src, ref)) {
+ matchLen += 8;
+ ref += 8;
+ sOff += 8;
+ } else {
+ final int zeroBits;
+ if (src.order() == ByteOrder.BIG_ENDIAN) {
+ zeroBits = Long.numberOfLeadingZeros(readLong(src, sOff) ^ readLong(src, ref));
+ } else {
+ zeroBits = Long.numberOfTrailingZeros(readLong(src, sOff) ^ readLong(src, ref));
+ }
+ return matchLen + (zeroBits >>> 3);
+ }
+ }
+ while (sOff < srcLimit && readByte(src, ref++) == readByte(src, sOff++)) {
+ ++matchLen;
+ }
+ return matchLen;
+ }
+
+ static int commonBytesBackward(ByteBuffer b, int o1, int o2, int l1, int l2) {
+ int count = 0;
+ while (o1 > l1 && o2 > l2 && b.get(--o1) == b.get(--o2)) {
+ ++count;
+ }
+ return count;
+ }
+
+ static void safeArraycopy(ByteBuffer src, int sOff, ByteBuffer dest, int dOff, int len) {
+ for (int i = 0; i < len; ++i) {
+ dest.put(dOff + i, src.get(sOff + i));
+ }
+ }
+
+ static void wildArraycopy(ByteBuffer src, int sOff, ByteBuffer dest, int dOff, int len) {
+ assert src.order().equals(dest.order());
+ try {
+ for (int i = 0; i < len; i += 8) {
+ dest.putLong(dOff + i, src.getLong(sOff + i));
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new LZ4Exception("Malformed input at offset " + sOff);
+ }
+ }
+
+ static int encodeSequence(ByteBuffer src, int anchor, int matchOff, int matchRef, int matchLen, ByteBuffer dest, int dOff, int destEnd) {
+ final int runLen = matchOff - anchor;
+ final int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ int token;
+ if (runLen >= RUN_MASK) {
+ token = (byte) (RUN_MASK << ML_BITS);
+ dOff = writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ token = runLen << ML_BITS;
+ }
+
+ // copy literals
+ wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ // encode offset
+ final int matchDec = matchOff - matchRef;
+ dest.put(dOff++, (byte) matchDec);
+ dest.put(dOff++, (byte) (matchDec >>> 8));
+
+ // encode match len
+ matchLen -= 4;
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ if (matchLen >= ML_MASK) {
+ token |= ML_MASK;
+ dOff = writeLen(matchLen - RUN_MASK, dest, dOff);
+ } else {
+ token |= matchLen;
+ }
+
+ dest.put(tokenOff, (byte) token);
+
+ return dOff;
+ }
+
+ static int lastLiterals(ByteBuffer src, int sOff, int srcLen, ByteBuffer dest, int dOff, int destEnd) {
+ final int runLen = srcLen;
+
+ if (dOff + runLen + 1 + (runLen + 255 - RUN_MASK) / 255 > destEnd) {
+ throw new LZ4Exception();
+ }
+
+ if (runLen >= RUN_MASK) {
+ dest.put(dOff++, (byte) (RUN_MASK << ML_BITS));
+ dOff = writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ dest.put(dOff++, (byte) (runLen << ML_BITS));
+ }
+ // copy literals
+ safeArraycopy(src, sOff, dest, dOff, runLen);
+ dOff += runLen;
+
+ return dOff;
+ }
+
+ static int writeLen(int len, ByteBuffer dest, int dOff) {
+ while (len >= 0xFF) {
+ dest.put(dOff++, (byte) 0xFF);
+ len -= 0xFF;
+ }
+ dest.put(dOff++, (byte) len);
+ return dOff;
+ }
+
+ static class Match {
+ int start, ref, len;
+
+ void fix(int correction) {
+ start += correction;
+ ref += correction;
+ len -= correction;
+ }
+
+ int end() {
+ return start + len;
+ }
+ }
+
+ static void copyTo(Match m1, Match m2) {
+ m2.len = m1.len;
+ m2.start = m1.start;
+ m2.ref = m1.ref;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Compressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Compressor.java
new file mode 100644
index 000000000..2ad6724db
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Compressor.java
@@ -0,0 +1,168 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * LZ4 compressor.
+ *
+ * Instances of this class are thread-safe.
+ */
+public abstract class LZ4Compressor {
+
+ /**
+ * Returns the maximum compressed length for an input of size length
.
+ *
+ * @param length the input size in bytes
+ * @return the maximum compressed length in bytes
+ */
+ @SuppressWarnings("static-method")
+ public final int maxCompressedLength(int length) {
+ return LZ4Utils.maxCompressedLength(length);
+ }
+
+ /**
+ * Compresses src[srcOff:srcOff+srcLen]
into
+ * dest[destOff:destOff+maxDestLen]
and returns the compressed
+ * length.
+ *
+ * This method will throw a {@link LZ4Exception} if this compressor is unable
+ * to compress the input into less than maxDestLen
bytes. To
+ * prevent this exception to be thrown, you should make sure that
+ * maxDestLen >= maxCompressedLength(srcLen)
.
+ *
+ * @param src the source data
+ * @param srcOff the start offset in src
+ * @param srcLen the number of bytes to compress
+ * @param dest the destination buffer
+ * @param destOff the start offset in dest
+ * @param maxDestLen the maximum number of bytes to write in dest
+ * @throws LZ4Exception if maxDestLen is too small
+ * @return the compressed size
+ */
+ public abstract int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+
+ /**
+ * Compresses src[srcOff:srcOff+srcLen]
into
+ * dest[destOff:destOff+maxDestLen]
and returns the compressed
+ * length.
+ *
+ * This method will throw a {@link LZ4Exception} if this compressor is unable
+ * to compress the input into less than maxDestLen
bytes. To
+ * prevent this exception to be thrown, you should make sure that
+ * maxDestLen >= maxCompressedLength(srcLen)
.
+ *
+ * {@link ByteBuffer} positions remain unchanged.
+ *
+ * @param src the source data
+ * @param srcOff the start offset in src
+ * @param srcLen the number of bytes to compress
+ * @param dest the destination buffer
+ * @param destOff the start offset in dest
+ * @param maxDestLen the maximum number of bytes to write in dest
+ * @throws LZ4Exception if maxDestLen is too small
+ * @return the compressed size
+ */
+ public abstract int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen);
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #compress(byte[], int, int, byte[], int, int) compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}.
+ *
+ * @param src the source data
+ * @param srcOff the start offset in src
+ * @param srcLen the number of bytes to compress
+ * @param dest the destination buffer
+ * @param destOff the start offset in dest
+ * @throws LZ4Exception if dest is too small
+ * @return the compressed size
+ */
+ public final int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) {
+ return compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #compress(byte[], int, int, byte[], int) compress(src, 0, src.length, dest, 0)}.
+ *
+ * @param src the source data
+ * @param dest the destination buffer
+ * @throws LZ4Exception if dest is too small
+ * @return the compressed size
+ */
+ public final int compress(byte[] src, byte[] dest) {
+ return compress(src, 0, src.length, dest, 0);
+ }
+
+ /**
+ * Convenience method which returns src[srcOff:srcOff+srcLen]
+ * compressed.
+ *
Warning: this method has an + * important overhead due to the fact that it needs to allocate a buffer to + * compress into, and then needs to resize this buffer to the actual + * compressed length.
+ *Here is how this method is implemented:
+ *+ * final int maxCompressedLength = maxCompressedLength(srcLen); + * final byte[] compressed = new byte[maxCompressedLength]; + * final int compressedLength = compress(src, srcOff, srcLen, compressed, 0); + * return Arrays.copyOf(compressed, compressedLength); + *+ * + * @param src the source data + * @param srcOff the start offset in src + * @param srcLen the number of bytes to compress + * @return the compressed data + */ + public final byte[] compress(byte[] src, int srcOff, int srcLen) { + final int maxCompressedLength = maxCompressedLength(srcLen); + final byte[] compressed = new byte[maxCompressedLength]; + final int compressedLength = compress(src, srcOff, srcLen, compressed, 0); + return Arrays.copyOf(compressed, compressedLength); + } + + /** + * Convenience method, equivalent to calling + * {@link #compress(byte[], int, int) compress(src, 0, src.length)}. + * + * @param src the source data + * @return the compressed data + */ + public final byte[] compress(byte[] src) { + return compress(src, 0, src.length); + } + + /** + * Compresses
src
into dest
. Calling this method
+ * will update the positions of both {@link ByteBuffer}s.
+ *
+ * @param src the source data
+ * @param dest the destination buffer
+ * @throws LZ4Exception if dest is too small
+ */
+ public final void compress(ByteBuffer src, ByteBuffer dest) {
+ final int cpLen = compress(src, src.position(), src.remaining(), dest, dest.position(), dest.remaining());
+ src.position(src.limit());
+ dest.position(dest.position() + cpLen);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Constants.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Constants.java
new file mode 100644
index 000000000..7295dbf80
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Constants.java
@@ -0,0 +1,53 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+enum LZ4Constants {
+ ;
+
+ static final int DEFAULT_COMPRESSION_LEVEL = 8+1;
+ static final int MAX_COMPRESSION_LEVEL = 16+1;
+
+ static final int MEMORY_USAGE = 14;
+ static final int NOT_COMPRESSIBLE_DETECTION_LEVEL = 6;
+
+ static final int MIN_MATCH = 4;
+
+ static final int HASH_LOG = MEMORY_USAGE - 2;
+ static final int HASH_TABLE_SIZE = 1 << HASH_LOG;
+
+ static final int SKIP_STRENGTH = Math.max(NOT_COMPRESSIBLE_DETECTION_LEVEL, 2);
+ static final int COPY_LENGTH = 8;
+ static final int LAST_LITERALS = 5;
+ static final int MF_LIMIT = COPY_LENGTH + MIN_MATCH;
+ static final int MIN_LENGTH = MF_LIMIT + 1;
+
+ static final int MAX_DISTANCE = 1 << 16;
+
+ static final int ML_BITS = 4;
+ static final int ML_MASK = (1 << ML_BITS) - 1;
+ static final int RUN_BITS = 8 - ML_BITS;
+ static final int RUN_MASK = (1 << RUN_BITS) - 1;
+
+ static final int LZ4_64K_LIMIT = (1 << 16) + (MF_LIMIT - 1);
+ static final int HASH_LOG_64K = HASH_LOG + 1;
+ static final int HASH_TABLE_SIZE_64K = 1 << HASH_LOG_64K;
+
+ static final int HASH_LOG_HC = 15;
+ static final int HASH_TABLE_SIZE_HC = 1 << HASH_LOG_HC;
+ static final int OPTIMAL_ML = ML_MASK - 1 + MIN_MATCH;
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Decompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Decompressor.java
new file mode 100644
index 000000000..05e98c10f
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Decompressor.java
@@ -0,0 +1,25 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @deprecated Use {@link LZ4FastDecompressor} instead.
+ */
+@Deprecated
+public interface LZ4Decompressor {
+
+ int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
+
+}
\ No newline at end of file
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Exception.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Exception.java
new file mode 100644
index 000000000..78355d46f
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Exception.java
@@ -0,0 +1,36 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * LZ4 compression or decompression error.
+ */
+public class LZ4Exception extends RuntimeException {
+
+ private static final long serialVersionUID = 1L;
+
+ public LZ4Exception(String msg, Throwable t) {
+ super(msg, t);
+ }
+
+ public LZ4Exception(String msg) {
+ super(msg);
+ }
+
+ public LZ4Exception() {
+ super();
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Factory.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Factory.java
new file mode 100644
index 000000000..c0e6df16a
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Factory.java
@@ -0,0 +1,309 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.util.Arrays;
+
+import com.fr.third.net.jpountz.util.Native;
+import com.fr.third.net.jpountz.util.Utils;
+
+/**
+ * Entry point for the LZ4 API.
+ * + * This class has 3 instances
+ * Only the {@link #safeInstance() safe instance} is guaranteed to work on your + * JVM, as a consequence it is advised to use the {@link #fastestInstance()} or + * {@link #fastestJavaInstance()} to pull a {@link LZ4Factory} instance. + *
+ * All methods from this class are very costly, so you should get an instance + * once, and then reuse it whenever possible. This is typically done by storing + * a {@link LZ4Factory} instance in a static field. + */ +public final class LZ4Factory { + + private static LZ4Factory instance(String impl) { + try { + return new LZ4Factory(impl); + } catch (Exception e) { + throw new AssertionError(e); + } + } + + private static LZ4Factory NATIVE_INSTANCE, + JAVA_UNSAFE_INSTANCE, + JAVA_SAFE_INSTANCE; + + /** + * Returns a {@link LZ4Factory} instance that returns compressors and + * decompressors that are native bindings to the original C library. + *
+ * Please note that this instance has some traps you should be aware of:
+ * Please read {@link #nativeInstance() javadocs of nativeInstance()} before
+ * using this method.
+ *
+ * @return the fastest available {@link LZ4Factory} instance
+ */
+ public static LZ4Factory fastestInstance() {
+ if (Native.isLoaded()
+ || Native.class.getClassLoader() == ClassLoader.getSystemClassLoader()) {
+ try {
+ return nativeInstance();
+ } catch (Throwable t) {
+ return fastestJavaInstance();
+ }
+ } else {
+ return fastestJavaInstance();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static For current implementations, the following is true about compression level:
+ * Instances of this class are thread-safe.
+ */
+public abstract class LZ4FastDecompressor implements LZ4Decompressor {
+
+ /** Decompresses Warning: this method has an
+ * important overhead due to the fact that it needs to allocate a buffer to
+ * decompress into. Here is how this method is implemented:
+ * Not Supported:
+ * Originally based on kafka's KafkaLZ4BlockInputStream.
+ *
+ * @see LZ4 Framing Format Spec 1.5.1
+ */
+public class LZ4FrameInputStream extends FilterInputStream {
+
+ static final String PREMATURE_EOS = "Stream ended prematurely";
+ static final String NOT_SUPPORTED = "Stream unsupported";
+ static final String BLOCK_HASH_MISMATCH = "Block checksum mismatch";
+ static final String DESCRIPTOR_HASH_MISMATCH = "Stream frame descriptor corrupted";
+ static final int MAGIC_SKIPPABLE_BASE = 0x184D2A50;
+
+ private final LZ4SafeDecompressor decompressor;
+ private final XXHash32 checksum;
+ private final byte[] headerArray = new byte[LZ4FrameOutputStream.LZ4_MAX_HEADER_LENGTH];
+ private final ByteBuffer headerBuffer = ByteBuffer.wrap(headerArray).order(ByteOrder.LITTLE_ENDIAN);
+ private byte[] compressedBuffer;
+ private ByteBuffer buffer = null;
+ private byte[] rawBuffer = null;
+ private int maxBlockSize = -1;
+ private long expectedContentSize = -1L;
+ private long totalContentSize = 0L;
+
+ private LZ4FrameOutputStream.FrameInfo frameInfo = null;
+
+ /**
+ * Creates a new {@link InputStream} that will decompress data using fastest instances of {@link LZ4SafeDecompressor} and {@link XXHash32}.
+ *
+ * @param in the stream to decompress
+ * @throws IOException if an I/O error occurs
+ *
+ * @see #LZ4FrameInputStream(InputStream, LZ4SafeDecompressor, XXHash32)
+ * @see LZ4Factory#fastestInstance()
+ * @see XXHashFactory#fastestInstance()
+ */
+ public LZ4FrameInputStream(InputStream in) throws IOException {
+ this(in, LZ4Factory.fastestInstance().safeDecompressor(), XXHashFactory.fastestInstance().hash32());
+ }
+
+ /**
+ * Creates a new {@link InputStream} that will decompress data using the LZ4 algorithm.
+ *
+ * @param in the stream to decompress
+ * @param decompressor the decompressor to use
+ * @param checksum the hash function to use
+ * @throws IOException if an I/O error occurs
+ */
+ public LZ4FrameInputStream(InputStream in, LZ4SafeDecompressor decompressor, XXHash32 checksum) throws IOException {
+ super(in);
+ this.decompressor = decompressor;
+ this.checksum = checksum;
+ nextFrameInfo();
+ }
+
+
+
+ /**
+ * Try and load in the next valid frame info. This will skip over skippable frames.
+ * @return True if a frame was loaded. False if there are no more frames in the stream.
+ * @throws IOException On input stream read exception
+ */
+ private boolean nextFrameInfo() throws IOException {
+ while (true) {
+ int size = 0;
+ do {
+ final int mySize = in.read(readNumberBuff.array(), size, LZ4FrameOutputStream.INTEGER_BYTES - size);
+ if (mySize < 0) {
+ return false;
+ }
+ size += mySize;
+ } while (size < LZ4FrameOutputStream.INTEGER_BYTES);
+ final int magic = readNumberBuff.getInt(0);
+ if (magic == LZ4FrameOutputStream.MAGIC) {
+ readHeader();
+ return true;
+ } else if ((magic >>> 4) == (MAGIC_SKIPPABLE_BASE >>> 4)) {
+ skippableFrame();
+ } else {
+ throw new IOException(NOT_SUPPORTED);
+ }
+ }
+ }
+
+ private void skippableFrame() throws IOException {
+ int skipSize = readInt(in);
+ final byte[] skipBuffer = new byte[1 << 10];
+ while (skipSize > 0) {
+ final int mySize = in.read(skipBuffer, 0, Math.min(skipSize, skipBuffer.length));
+ if (mySize < 0) {
+ throw new IOException(PREMATURE_EOS);
+ }
+ skipSize -= mySize;
+ }
+ }
+
+ /**
+ * Reads the frame descriptor from the underlying {@link InputStream}.
+ *
+ * @throws IOException
+ */
+ private void readHeader() throws IOException {
+ headerBuffer.rewind();
+
+ final int flgRead = in.read();
+ if (flgRead < 0) {
+ throw new IOException(PREMATURE_EOS);
+ }
+ final int bdRead = in.read();
+ if (bdRead < 0) {
+ throw new IOException(PREMATURE_EOS);
+ }
+
+ final byte flgByte = (byte)(flgRead & 0xFF);
+ final LZ4FrameOutputStream.FLG flg = LZ4FrameOutputStream.FLG.fromByte(flgByte);
+ headerBuffer.put(flgByte);
+ final byte bdByte = (byte)(bdRead & 0xFF);
+ final LZ4FrameOutputStream.BD bd = LZ4FrameOutputStream.BD.fromByte(bdByte);
+ headerBuffer.put(bdByte);
+
+ this.frameInfo = new LZ4FrameOutputStream.FrameInfo(flg, bd);
+
+ if (flg.isEnabled(LZ4FrameOutputStream.FLG.Bits.CONTENT_SIZE)) {
+ expectedContentSize = readLong(in);
+ headerBuffer.putLong(expectedContentSize);
+ }
+ totalContentSize = 0L;
+
+ // check stream descriptor hash
+ final byte hash = (byte) ((checksum.hash(headerArray, 0, headerBuffer.position(), 0) >> 8) & 0xFF);
+ final int expectedHash = in.read();
+ if (expectedHash < 0) {
+ throw new IOException(PREMATURE_EOS);
+ }
+
+ if (hash != (byte)(expectedHash & 0xFF)) {
+ throw new IOException(DESCRIPTOR_HASH_MISMATCH);
+ }
+
+ maxBlockSize = frameInfo.getBD().getBlockMaximumSize();
+ compressedBuffer = new byte[maxBlockSize]; // Reused during different compressions
+ rawBuffer = new byte[maxBlockSize];
+ buffer = ByteBuffer.wrap(rawBuffer);
+ buffer.limit(0);
+ }
+
+ private final ByteBuffer readNumberBuff = ByteBuffer.allocate(LZ4FrameOutputStream.LONG_BYTES).order(ByteOrder.LITTLE_ENDIAN);
+
+ private long readLong(InputStream stream) throws IOException {
+ int offset = 0;
+ do {
+ final int mySize = stream.read(readNumberBuff.array(), offset, LZ4FrameOutputStream.LONG_BYTES - offset);
+ if (mySize < 0) {
+ throw new IOException(PREMATURE_EOS);
+ }
+ offset += mySize;
+ } while (offset < LZ4FrameOutputStream.LONG_BYTES);
+ return readNumberBuff.getLong(0);
+ }
+
+ private int readInt(InputStream stream) throws IOException {
+ int offset = 0;
+ do {
+ final int mySize = stream.read(readNumberBuff.array(), offset, LZ4FrameOutputStream.INTEGER_BYTES - offset);
+ if (mySize < 0) {
+ throw new IOException(PREMATURE_EOS);
+ }
+ offset += mySize;
+ } while (offset < LZ4FrameOutputStream.INTEGER_BYTES);
+ return readNumberBuff.getInt(0);
+ }
+
+ /**
+ * Decompress (if necessary) buffered data, optionally computes and validates a XXHash32 checksum, and writes the
+ * result to a buffer.
+ *
+ * @throws IOException
+ */
+ private void readBlock() throws IOException {
+ int blockSize = readInt(in);
+ final boolean compressed = (blockSize & LZ4FrameOutputStream.LZ4_FRAME_INCOMPRESSIBLE_MASK) == 0;
+ blockSize &= ~LZ4FrameOutputStream.LZ4_FRAME_INCOMPRESSIBLE_MASK;
+
+ // Check for EndMark
+ if (blockSize == 0) {
+ if (frameInfo.isEnabled(LZ4FrameOutputStream.FLG.Bits.CONTENT_CHECKSUM)) {
+ final int contentChecksum = readInt(in);
+ if (contentChecksum != frameInfo.currentStreamHash()) {
+ throw new IOException("Content checksum mismatch");
+ }
+ }
+ if (frameInfo.isEnabled(LZ4FrameOutputStream.FLG.Bits.CONTENT_SIZE) && expectedContentSize != totalContentSize) {
+ throw new IOException("Size check mismatch");
+ }
+ frameInfo.finish();
+ return;
+ }
+
+ final byte[] tmpBuffer; // Use a temporary buffer, potentially one used for compression
+ if (compressed) {
+ tmpBuffer = compressedBuffer;
+ } else {
+ tmpBuffer = rawBuffer;
+ }
+ if (blockSize > maxBlockSize) {
+ throw new IOException(String.format(Locale.ROOT, "Block size %s exceeded max: %s", blockSize, maxBlockSize));
+ }
+
+ int offset = 0;
+ while (offset < blockSize) {
+ final int lastRead = in.read(tmpBuffer, offset, blockSize - offset);
+ if (lastRead < 0) {
+ throw new IOException(PREMATURE_EOS);
+ }
+ offset += lastRead;
+ }
+
+ // verify block checksum
+ if (frameInfo.isEnabled(LZ4FrameOutputStream.FLG.Bits.BLOCK_CHECKSUM)) {
+ final int hashCheck = readInt(in);
+ if (hashCheck != checksum.hash(tmpBuffer, 0, blockSize, 0)) {
+ throw new IOException(BLOCK_HASH_MISMATCH);
+ }
+ }
+
+ final int currentBufferSize;
+ if (compressed) {
+ try {
+ currentBufferSize = decompressor.decompress(tmpBuffer, 0, blockSize, rawBuffer, 0, rawBuffer.length);
+ } catch (LZ4Exception e) {
+ throw new IOException(e);
+ }
+ } else {
+ currentBufferSize = blockSize;
+ }
+ if (frameInfo.isEnabled(LZ4FrameOutputStream.FLG.Bits.CONTENT_CHECKSUM)) {
+ frameInfo.updateStreamHash(rawBuffer, 0, currentBufferSize);
+ }
+ totalContentSize += currentBufferSize;
+ buffer.limit(currentBufferSize);
+ buffer.rewind();
+ }
+
+ @Override
+ public int read() throws IOException {
+ while (buffer.remaining() == 0) {
+ if (frameInfo.isFinished()) {
+ if (!nextFrameInfo()) {
+ return -1;
+ }
+ }
+ readBlock();
+ }
+ return (int)buffer.get() & 0xFF;
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ if ((off < 0) || (len < 0) || (off + len > b.length)) {
+ throw new IndexOutOfBoundsException();
+ }
+ while (buffer.remaining() == 0) {
+ if (frameInfo.isFinished()) {
+ if (!nextFrameInfo()) {
+ return -1;
+ }
+ }
+ readBlock();
+ }
+ len = Math.min(len, buffer.remaining());
+ buffer.get(b, off, len);
+ return len;
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ if (n <= 0) {
+ return 0;
+ }
+ while (buffer.remaining() == 0) {
+ if (frameInfo.isFinished()) {
+ if (!nextFrameInfo()) {
+ return 0;
+ }
+ }
+ readBlock();
+ }
+ n = Math.min(n, buffer.remaining());
+ buffer.position(buffer.position() + (int)n);
+ return n;
+ }
+
+ @Override
+ public int available() throws IOException {
+ return buffer.remaining();
+ }
+
+ @Override
+ public void close() throws IOException {
+ super.close();
+ }
+
+ @Override
+ public synchronized void mark(int readlimit) {
+ throw new UnsupportedOperationException("mark not supported");
+ }
+
+ @Override
+ public synchronized void reset() throws IOException {
+ throw new UnsupportedOperationException("reset not supported");
+ }
+
+ @Override
+ public boolean markSupported() {
+ return false;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FrameOutputStream.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FrameOutputStream.java
new file mode 100644
index 000000000..a1ec09add
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FrameOutputStream.java
@@ -0,0 +1,434 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.fr.third.net.jpountz.xxhash.StreamingXXHash32;
+import com.fr.third.net.jpountz.xxhash.XXHash32;
+import com.fr.third.net.jpountz.xxhash.XXHashFactory;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.Locale;
+
+/**
+ * Implementation of the v1.5.1 LZ4 Frame format. This class is NOT thread safe.
+ *
+ * Not Supported:
+ * Originally based on kafka's KafkaLZ4BlockOutputStream.
+ *
+ * @see LZ4 Framing Format Spec 1.5.1
+ */
+public class LZ4FrameOutputStream extends FilterOutputStream {
+
+ static final int INTEGER_BYTES = Integer.SIZE >>> 3; // or Integer.BYTES in Java 1.8
+ static final int LONG_BYTES = Long.SIZE >>> 3; // or Long.BYTES in Java 1.8
+
+ static final int MAGIC = 0x184D2204;
+ static final int LZ4_MAX_HEADER_LENGTH =
+ 4 + // magic
+ 1 + // FLG
+ 1 + // BD
+ 8 + // Content Size
+ 1; // HC
+ static final int LZ4_FRAME_INCOMPRESSIBLE_MASK = 0x80000000;
+ static final FLG.Bits[] DEFAULT_FEATURES = new FLG.Bits[]{FLG.Bits.BLOCK_INDEPENDENCE};
+
+ static final String CLOSED_STREAM = "The stream is already closed";
+
+ public static enum BLOCKSIZE {
+ SIZE_64KB(4), SIZE_256KB(5), SIZE_1MB(6), SIZE_4MB(7);
+ private final int indicator;
+ BLOCKSIZE(int indicator) {
+ this.indicator = indicator;
+ }
+ public int getIndicator() {
+ return this.indicator;
+ }
+ public static BLOCKSIZE valueOf(int indicator) {
+ switch(indicator) {
+ case 7: return SIZE_4MB;
+ case 6: return SIZE_1MB;
+ case 5: return SIZE_256KB;
+ case 4: return SIZE_64KB;
+ default: throw new IllegalArgumentException(String.format(Locale.ROOT, "Block size must be 4-7. Cannot use value of [%d]", indicator));
+ }
+ }
+ }
+
+ private final LZ4Compressor compressor;
+ private final XXHash32 checksum;
+ private final ByteBuffer buffer; // Buffer for uncompressed input data
+ private final byte[] compressedBuffer; // Only allocated once so it can be reused
+ private final int maxBlockSize;
+ private final long knownSize;
+ private final ByteBuffer intLEBuffer = ByteBuffer.allocate(INTEGER_BYTES).order(ByteOrder.LITTLE_ENDIAN);
+
+ private FrameInfo frameInfo = null;
+
+
+ /**
+ * Creates a new {@link OutputStream} that will compress data of unknown size using the LZ4 algorithm.
+ *
+ * @param out the output stream to compress
+ * @param blockSize the BLOCKSIZE to use
+ * @param bits a set of features to use
+ * @throws IOException if an I/O error occurs
+ *
+ * @see #LZ4FrameOutputStream(OutputStream, BLOCKSIZE, long, FLG.Bits...)
+ */
+ public LZ4FrameOutputStream(OutputStream out, BLOCKSIZE blockSize, FLG.Bits... bits) throws IOException {
+ this(out, blockSize, -1L, bits);
+ }
+
+ /**
+ * Creates a new {@link OutputStream} that will compress data using using fastest instances of {@link LZ4Compressor} and {@link XXHash32}.
+ *
+ * @param out the output stream to compress
+ * @param blockSize the BLOCKSIZE to use
+ * @param knownSize the size of the uncompressed data. A value less than zero means unknown.
+ * @param bits a set of features to use
+ * @throws IOException if an I/O error occurs
+ */
+ public LZ4FrameOutputStream(OutputStream out, BLOCKSIZE blockSize, long knownSize, FLG.Bits... bits) throws IOException {
+ super(out);
+ compressor = LZ4Factory.fastestInstance().fastCompressor();
+ checksum = XXHashFactory.fastestInstance().hash32();
+ frameInfo = new FrameInfo(new FLG(FLG.DEFAULT_VERSION, bits), new BD(blockSize));
+ maxBlockSize = frameInfo.getBD().getBlockMaximumSize();
+ buffer = ByteBuffer.allocate(maxBlockSize).order(ByteOrder.LITTLE_ENDIAN);
+ compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)];
+ if (frameInfo.getFLG().isEnabled(FLG.Bits.CONTENT_SIZE) && knownSize < 0) {
+ throw new IllegalArgumentException("Known size must be greater than zero in order to use the known size feature");
+ }
+ this.knownSize = knownSize;
+ writeHeader();
+ }
+
+ /**
+ * Creates a new {@link OutputStream} that will compress data using the LZ4 algorithm. The block independence flag is set, and none of the other flags are set.
+ *
+ * @param out The stream to compress
+ * @param blockSize the BLOCKSIZE to use
+ * @throws IOException if an I/O error occurs
+ *
+ * @see #LZ4FrameOutputStream(OutputStream, BLOCKSIZE, FLG.Bits...)
+ */
+ public LZ4FrameOutputStream(OutputStream out, BLOCKSIZE blockSize) throws IOException {
+ this(out, blockSize, DEFAULT_FEATURES);
+ }
+
+ /**
+ * Creates a new {@link OutputStream} that will compress data using the LZ4 algorithm with 4-MB blocks.
+ *
+ * @param out the output stream to compress
+ * @throws IOException if an I/O error occurs
+ *
+ * @see #LZ4FrameOutputStream(OutputStream, BLOCKSIZE)
+ */
+ public LZ4FrameOutputStream(OutputStream out) throws IOException {
+ this(out, BLOCKSIZE.SIZE_4MB);
+ }
+
+ /**
+ * Writes the magic number and frame descriptor to the underlying {@link OutputStream}.
+ *
+ * @throws IOException
+ */
+ private void writeHeader() throws IOException {
+ final ByteBuffer headerBuffer = ByteBuffer.allocate(LZ4_MAX_HEADER_LENGTH).order(ByteOrder.LITTLE_ENDIAN);
+ headerBuffer.putInt(MAGIC);
+ headerBuffer.put(frameInfo.getFLG().toByte());
+ headerBuffer.put(frameInfo.getBD().toByte());
+ if (frameInfo.isEnabled(FLG.Bits.CONTENT_SIZE)) {
+ headerBuffer.putLong(knownSize);
+ }
+ // compute checksum on all descriptor fields
+ final int hash = (checksum.hash(headerBuffer.array(), INTEGER_BYTES, headerBuffer.position() - INTEGER_BYTES, 0) >> 8) & 0xFF;
+ headerBuffer.put((byte) hash);
+ // write out frame descriptor
+ out.write(headerBuffer.array(), 0, headerBuffer.position());
+ }
+
+ /**
+ * Compresses buffered data, optionally computes an XXHash32 checksum, and writes the result to the underlying
+ * {@link OutputStream}.
+ *
+ * @throws IOException
+ */
+ private void writeBlock() throws IOException {
+ if (buffer.position() == 0) {
+ return;
+ }
+ // Make sure there's no stale data
+ Arrays.fill(compressedBuffer, (byte) 0);
+
+ int compressedLength = compressor.compress(buffer.array(), 0, buffer.position(), compressedBuffer, 0);
+ final byte[] bufferToWrite;
+ final int compressMethod;
+
+ // Store block uncompressed if compressed length is greater (incompressible)
+ if (compressedLength >= buffer.position()) {
+ compressedLength = buffer.position();
+ bufferToWrite = Arrays.copyOf(buffer.array(), compressedLength);
+ compressMethod = LZ4_FRAME_INCOMPRESSIBLE_MASK;
+ } else {
+ bufferToWrite = compressedBuffer;
+ compressMethod = 0;
+ }
+
+ // Write content
+ intLEBuffer.putInt(0, compressedLength | compressMethod);
+ out.write(intLEBuffer.array());
+ out.write(bufferToWrite, 0, compressedLength);
+
+ // Calculate and write block checksum
+ if (frameInfo.isEnabled(FLG.Bits.BLOCK_CHECKSUM)) {
+ intLEBuffer.putInt(0, checksum.hash(bufferToWrite, 0, compressedLength, 0));
+ out.write(intLEBuffer.array());
+ }
+ buffer.rewind();
+ }
+
+ /**
+ * Similar to the {@link #writeBlock()} method. Writes a 0-length block (without block checksum) to signal the end
+ * of the block stream.
+ *
+ * @throws IOException
+ */
+ private void writeEndMark() throws IOException {
+ intLEBuffer.putInt(0, 0);
+ out.write(intLEBuffer.array());
+ if (frameInfo.isEnabled(FLG.Bits.CONTENT_CHECKSUM)) {
+ intLEBuffer.putInt(0, frameInfo.currentStreamHash());
+ out.write(intLEBuffer.array());
+ }
+ frameInfo.finish();
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ ensureNotFinished();
+ if (buffer.position() == maxBlockSize) {
+ writeBlock();
+ }
+ buffer.put((byte) b);
+
+ if (frameInfo.isEnabled(FLG.Bits.CONTENT_CHECKSUM)) {
+ frameInfo.updateStreamHash(new byte[]{(byte) b}, 0, 1);
+ }
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ if ((off < 0) || (len < 0) || (off + len > b.length)) {
+ throw new IndexOutOfBoundsException();
+ }
+ ensureNotFinished();
+
+ // while b will fill the buffer
+ while (len > buffer.remaining()) {
+ int sizeWritten = buffer.remaining();
+ // fill remaining space in buffer
+ buffer.put(b, off, sizeWritten);
+ if (frameInfo.isEnabled(FLG.Bits.CONTENT_CHECKSUM)) {
+ frameInfo.updateStreamHash(b, off, sizeWritten);
+ }
+ writeBlock();
+ // compute new offset and length
+ off += sizeWritten;
+ len -= sizeWritten;
+ }
+ buffer.put(b, off, len);
+
+ if (frameInfo.isEnabled(FLG.Bits.CONTENT_CHECKSUM)) {
+ frameInfo.updateStreamHash(b, off, len);
+ }
+ }
+
+ @Override
+ public void flush() throws IOException {
+ if (!frameInfo.isFinished()) {
+ writeBlock();
+ }
+ super.flush();
+ }
+
+ /**
+ * A simple state check to ensure the stream is still open.
+ */
+ private void ensureNotFinished() {
+ if (frameInfo.isFinished()) {
+ throw new IllegalStateException(CLOSED_STREAM);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!frameInfo.isFinished()) {
+ flush();
+ writeEndMark();
+ }
+ super.close();
+ }
+
+ public static class FLG {
+ private static final int DEFAULT_VERSION = 1;
+
+ private final BitSet bitSet;
+ private final int version;
+
+ public enum Bits {
+ RESERVED_0(0),
+ RESERVED_1(1),
+ CONTENT_CHECKSUM(2),
+ CONTENT_SIZE(3),
+ BLOCK_CHECKSUM(4),
+ BLOCK_INDEPENDENCE(5);
+
+ private final int position;
+ Bits(int position) {
+ this.position = position;
+ }
+ }
+
+ public FLG(int version, Bits... bits) {
+ this.bitSet = new BitSet(8);
+ this.version = version;
+ if (bits != null) {
+ for (Bits bit : bits) {
+ bitSet.set(bit.position);
+ }
+ }
+ validate();
+ }
+
+ private FLG(int version, byte b) {
+ this.bitSet = BitSet.valueOf(new byte[]{b});
+ this.version = version;
+ validate();
+ }
+
+ public static FLG fromByte(byte flg) {
+ final byte versionMask = (byte)(flg & (3 << 6));
+ return new FLG(versionMask >>> 6, (byte) (flg ^ versionMask));
+ }
+
+ public byte toByte() {
+ return (byte)(bitSet.toByteArray()[0] | ((version & 3) << 6));
+ }
+
+ private void validate() {
+ if (bitSet.get(Bits.RESERVED_0.position)) {
+ throw new RuntimeException("Reserved0 field must be 0");
+ }
+ if (bitSet.get(Bits.RESERVED_1.position)) {
+ throw new RuntimeException("Reserved1 field must be 0");
+ }
+ if (!bitSet.get(Bits.BLOCK_INDEPENDENCE.position)) {
+ throw new RuntimeException("Dependent block stream is unsupported (BLOCK_INDEPENDENCE must be set)");
+ }
+ if (version != DEFAULT_VERSION) {
+ throw new RuntimeException(String.format(Locale.ROOT, "Version %d is unsupported", version));
+ }
+ }
+
+ public boolean isEnabled(Bits bit) {
+ return bitSet.get(bit.position);
+ }
+
+ public int getVersion() {
+ return version;
+ }
+ }
+
+ public static class BD {
+ private static final int RESERVED_MASK = 0x8F;
+
+ private final BLOCKSIZE blockSizeValue;
+
+ private BD(BLOCKSIZE blockSizeValue) {
+ this.blockSizeValue = blockSizeValue;
+ }
+
+ public static BD fromByte(byte bd) {
+ int blockMaximumSize = (bd >>> 4) & 7;
+ if ((bd & RESERVED_MASK) > 0) {
+ throw new RuntimeException("Reserved fields must be 0");
+ }
+
+ return new BD(BLOCKSIZE.valueOf(blockMaximumSize));
+ }
+
+ // 2^(2n+8)
+ public int getBlockMaximumSize() {
+ return 1 << ((2 * blockSizeValue.getIndicator()) + 8);
+ }
+
+ public byte toByte() {
+ return (byte) ((blockSizeValue.getIndicator() & 7) << 4);
+ }
+ }
+
+ static class FrameInfo {
+ private final FLG flg;
+ private final BD bd;
+ private final StreamingXXHash32 streamHash;
+ private boolean finished = false;
+
+ public FrameInfo(FLG flg, BD bd) {
+ this.flg = flg;
+ this.bd = bd;
+ this.streamHash = flg.isEnabled(FLG.Bits.CONTENT_CHECKSUM) ? XXHashFactory.fastestInstance().newStreamingHash32(0) : null;
+ }
+
+ public boolean isEnabled(FLG.Bits bit) {
+ return flg.isEnabled(bit);
+ }
+
+ public FLG getFLG() {
+ return this.flg;
+ }
+
+ public BD getBD() {
+ return this.bd;
+ }
+
+ public void updateStreamHash(byte[] buff, int off, int len) {
+ this.streamHash.update(buff, off, len);
+ }
+
+ public int currentStreamHash() {
+ return this.streamHash.getValue();
+ }
+
+ public void finish() {
+ this.finished = true;
+ }
+
+ public boolean isFinished() {
+ return this.finished;
+ }
+ }
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJNICompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJNICompressor.java
new file mode 100644
index 000000000..30f098c94
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJNICompressor.java
@@ -0,0 +1,86 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+/**
+ * High compression {@link LZ4Compressor}s implemented with JNI bindings to the
+ * original C implementation of LZ4.
+ */
+final class LZ4HCJNICompressor extends LZ4Compressor {
+
+ public static final LZ4HCJNICompressor INSTANCE = new LZ4HCJNICompressor();
+ private static LZ4Compressor SAFE_INSTANCE;
+
+ private final int compressionLevel;
+
+ LZ4HCJNICompressor() { this(LZ4Constants.DEFAULT_COMPRESSION_LEVEL); }
+ LZ4HCJNICompressor(int compressionLevel) {
+ this.compressionLevel = compressionLevel;
+ }
+
+ @Override
+ public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+ SafeUtils.checkRange(src, srcOff, srcLen);
+ SafeUtils.checkRange(dest, destOff, maxDestLen);
+ final int result = LZ4JNI.LZ4_compressHC(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen, compressionLevel);
+ if (result <= 0) {
+ throw new LZ4Exception();
+ }
+ return result;
+ }
+
+ @Override
+ public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
+ ByteBufferUtils.checkNotReadOnly(dest);
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
+
+ if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
+ byte[] srcArr = null, destArr = null;
+ ByteBuffer srcBuf = null, destBuf = null;
+ if (src.hasArray()) {
+ srcArr = src.array();
+ srcOff += src.arrayOffset();
+ } else {
+ assert src.isDirect();
+ srcBuf = src;
+ }
+ if (dest.hasArray()) {
+ destArr = dest.array();
+ destOff += dest.arrayOffset();
+ } else {
+ assert dest.isDirect();
+ destBuf = dest;
+ }
+
+ final int result = LZ4JNI.LZ4_compressHC(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen, compressionLevel);
+ if (result <= 0) {
+ throw new LZ4Exception();
+ }
+ return result;
+ } else {
+ LZ4Compressor safeInstance = SAFE_INSTANCE;
+ if (safeInstance == null) {
+ safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().highCompressor(compressionLevel);
+ }
+ return safeInstance.compress(src, srcOff, srcLen, dest, destOff, maxDestLen);
+ }
+ }
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJavaSafeCompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJavaSafeCompressor.java
new file mode 100644
index 000000000..7147358f0
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJavaSafeCompressor.java
@@ -0,0 +1,550 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+import static com.fr.third.net.jpountz.lz4.LZ4Utils.*;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import com.fr.third.net.jpountz.lz4.LZ4Utils.Match;
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+/**
+ * High compression compressor.
+ */
+final class LZ4HCJavaSafeCompressor extends LZ4Compressor {
+
+ public static final LZ4Compressor INSTANCE = new LZ4HCJavaSafeCompressor();
+
+ private final int maxAttempts;
+ final int compressionLevel;
+
+ LZ4HCJavaSafeCompressor() { this(DEFAULT_COMPRESSION_LEVEL); }
+ LZ4HCJavaSafeCompressor(int compressionLevel) {
+ this.maxAttempts = 1<<(compressionLevel-1);
+ this.compressionLevel = compressionLevel;
+ }
+
+ private class HashTable {
+ static final int MASK = MAX_DISTANCE - 1;
+ int nextToUpdate;
+ private final int base;
+ private final int[] hashTable;
+ private final short[] chainTable;
+
+ HashTable(int base) {
+ this.base = base;
+ nextToUpdate = base;
+ hashTable = new int[HASH_TABLE_SIZE_HC];
+ Arrays.fill(hashTable, -1);
+ chainTable = new short[MAX_DISTANCE];
+ }
+
+ private int hashPointer(byte[] bytes, int off) {
+ final int v = SafeUtils.readInt(bytes, off);
+ return hashPointer(v);
+ }
+
+ private int hashPointer(ByteBuffer bytes, int off) {
+ final int v = ByteBufferUtils.readInt(bytes, off);
+ return hashPointer(v);
+ }
+
+ private int hashPointer(int v) {
+ final int h = hashHC(v);
+ return hashTable[h];
+ }
+
+ private int next(int off) {
+ return off - (chainTable[off & MASK] & 0xFFFF);
+ }
+
+ private void addHash(byte[] bytes, int off) {
+ final int v = SafeUtils.readInt(bytes, off);
+ addHash(v, off);
+ }
+
+ private void addHash(ByteBuffer bytes, int off) {
+ final int v = ByteBufferUtils.readInt(bytes, off);
+ addHash(v, off);
+ }
+
+ private void addHash(int v, int off) {
+ final int h = hashHC(v);
+ int delta = off - hashTable[h];
+ assert delta > 0 : delta;
+ if (delta >= MAX_DISTANCE) {
+ delta = MAX_DISTANCE - 1;
+ }
+ chainTable[off & MASK] = (short) delta;
+ hashTable[h] = off;
+ }
+
+ void insert(int off, byte[] bytes) {
+ for (; nextToUpdate < off; ++nextToUpdate) {
+ addHash(bytes, nextToUpdate);
+ }
+ }
+
+ void insert(int off, ByteBuffer bytes) {
+ for (; nextToUpdate < off; ++nextToUpdate) {
+ addHash(bytes, nextToUpdate);
+ }
+ }
+
+
+
+ boolean insertAndFindBestMatch(byte[] buf, int off, int matchLimit, Match match) {
+ match.start = off;
+ match.len = 0;
+ int delta = 0;
+ int repl = 0;
+
+ insert(off, buf);
+
+ int ref = hashPointer(buf, off);
+
+ if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition
+ if (LZ4SafeUtils.readIntEquals(buf, ref, off)) { // confirmed
+ delta = off - ref;
+ repl = match.len = MIN_MATCH + LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ match.ref = ref;
+ }
+ ref = next(ref);
+ }
+
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4SafeUtils.readIntEquals(buf, ref, off)) {
+ final int matchLen = MIN_MATCH + LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ if (matchLen > match.len) {
+ match.ref = ref;
+ match.len = matchLen;
+ }
+ }
+ ref = next(ref);
+ }
+
+ if (repl != 0) {
+ int ptr = off;
+ final int end = off + repl - (MIN_MATCH - 1);
+ while (ptr < end - delta) {
+ chainTable[ptr & MASK] = (short) delta; // pre load
+ ++ptr;
+ }
+ do {
+ chainTable[ptr & MASK] = (short) delta;
+ hashTable[hashHC(SafeUtils.readInt(buf, ptr))] = ptr;
+ ++ptr;
+ } while (ptr < end);
+ nextToUpdate = end;
+ }
+
+ return match.len != 0;
+ }
+
+ boolean insertAndFindWiderMatch(byte[] buf, int off, int startLimit, int matchLimit, int minLen, Match match) {
+ match.len = minLen;
+
+ insert(off, buf);
+
+ final int delta = off - startLimit;
+ int ref = hashPointer(buf, off);
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4SafeUtils.readIntEquals(buf, ref, off)) {
+ final int matchLenForward = MIN_MATCH +LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ final int matchLenBackward = LZ4SafeUtils.commonBytesBackward(buf, ref, off, base, startLimit);
+ final int matchLen = matchLenBackward + matchLenForward;
+ if (matchLen > match.len) {
+ match.len = matchLen;
+ match.ref = ref - matchLenBackward;
+ match.start = off - matchLenBackward;
+ }
+ }
+ ref = next(ref);
+ }
+
+ return match.len > minLen;
+ }
+
+
+ boolean insertAndFindBestMatch(ByteBuffer buf, int off, int matchLimit, Match match) {
+ match.start = off;
+ match.len = 0;
+ int delta = 0;
+ int repl = 0;
+
+ insert(off, buf);
+
+ int ref = hashPointer(buf, off);
+
+ if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition
+ if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { // confirmed
+ delta = off - ref;
+ repl = match.len = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ match.ref = ref;
+ }
+ ref = next(ref);
+ }
+
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) {
+ final int matchLen = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ if (matchLen > match.len) {
+ match.ref = ref;
+ match.len = matchLen;
+ }
+ }
+ ref = next(ref);
+ }
+
+ if (repl != 0) {
+ int ptr = off;
+ final int end = off + repl - (MIN_MATCH - 1);
+ while (ptr < end - delta) {
+ chainTable[ptr & MASK] = (short) delta; // pre load
+ ++ptr;
+ }
+ do {
+ chainTable[ptr & MASK] = (short) delta;
+ hashTable[hashHC(ByteBufferUtils.readInt(buf, ptr))] = ptr;
+ ++ptr;
+ } while (ptr < end);
+ nextToUpdate = end;
+ }
+
+ return match.len != 0;
+ }
+
+ boolean insertAndFindWiderMatch(ByteBuffer buf, int off, int startLimit, int matchLimit, int minLen, Match match) {
+ match.len = minLen;
+
+ insert(off, buf);
+
+ final int delta = off - startLimit;
+ int ref = hashPointer(buf, off);
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) {
+ final int matchLenForward = MIN_MATCH +LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ final int matchLenBackward = LZ4ByteBufferUtils.commonBytesBackward(buf, ref, off, base, startLimit);
+ final int matchLen = matchLenBackward + matchLenForward;
+ if (matchLen > match.len) {
+ match.len = matchLen;
+ match.ref = ref - matchLenBackward;
+ match.start = off - matchLenBackward;
+ }
+ }
+ ref = next(ref);
+ }
+
+ return match.len > minLen;
+ }
+
+
+ }
+
+ @Override
+ public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+
+ SafeUtils.checkRange(src, srcOff, srcLen);
+ SafeUtils.checkRange(dest, destOff, maxDestLen);
+
+ final int srcEnd = srcOff + srcLen;
+ final int destEnd = destOff + maxDestLen;
+ final int mfLimit = srcEnd - MF_LIMIT;
+ final int matchLimit = srcEnd - LAST_LITERALS;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+ int anchor = sOff++;
+
+ final HashTable ht = new HashTable(srcOff);
+ final Match match0 = new Match();
+ final Match match1 = new Match();
+ final Match match2 = new Match();
+ final Match match3 = new Match();
+
+ main:
+ while (sOff < mfLimit) {
+ if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) {
+ ++sOff;
+ continue;
+ }
+
+ // saved, in case we would skip too much
+ copyTo(match1, match0);
+
+ search2:
+ while (true) {
+ assert match1.start >= anchor;
+ if (match1.end() >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) {
+ // no better match
+ dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ continue main;
+ }
+
+ if (match0.start < match1.start) {
+ if (match2.start < match1.start + match0.len) { // empirical
+ copyTo(match0, match1);
+ }
+ }
+ assert match2.start > match1.start;
+
+ if (match2.start - match1.start < 3) { // First Match too small : removed
+ copyTo(match2, match1);
+ continue search2;
+ }
+
+ search3:
+ while (true) {
+ if (match2.start - match1.start < OPTIMAL_ML) {
+ int newMatchLen = match1.len;
+ if (newMatchLen > OPTIMAL_ML) {
+ newMatchLen = OPTIMAL_ML;
+ }
+ if (match1.start + newMatchLen > match2.end() - MIN_MATCH) {
+ newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH;
+ }
+ final int correction = newMatchLen - (match2.start - match1.start);
+ if (correction > 0) {
+ match2.fix(correction);
+ }
+ }
+
+ if (match2.start + match2.len >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) {
+ // no better match -> 2 sequences to encode
+ if (match2.start < match1.end()) {
+ match1.len = match2.start - match1.start;
+ }
+ // encode seq 1
+ dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ // encode seq 2
+ dOff = LZ4SafeUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd);
+ anchor = sOff = match2.end();
+ continue main;
+ }
+
+ if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it
+ if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
+ if (match2.start < match1.end()) {
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ if (match2.len < MIN_MATCH) {
+ copyTo(match3, match2);
+ }
+ }
+
+ dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match3, match1);
+ copyTo(match2, match0);
+
+ continue search2;
+ }
+
+ copyTo(match3, match2);
+ continue search3;
+ }
+
+ // OK, now we have 3 ascending matches; let's write at least the first one
+ if (match2.start < match1.end()) {
+ if (match2.start - match1.start < ML_MASK) {
+ if (match1.len > OPTIMAL_ML) {
+ match1.len = OPTIMAL_ML;
+ }
+ if (match1.end() > match2.end() - MIN_MATCH) {
+ match1.len = match2.end() - match1.start - MIN_MATCH;
+ }
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ } else {
+ match1.len = match2.start - match1.start;
+ }
+ }
+
+ dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match2, match1);
+ copyTo(match3, match2);
+
+ continue search3;
+ }
+
+ }
+
+ }
+
+ dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+ @Override
+ public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
+
+ final int srcEnd = srcOff + srcLen;
+ final int destEnd = destOff + maxDestLen;
+ final int mfLimit = srcEnd - MF_LIMIT;
+ final int matchLimit = srcEnd - LAST_LITERALS;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+ int anchor = sOff++;
+
+ final HashTable ht = new HashTable(srcOff);
+ final Match match0 = new Match();
+ final Match match1 = new Match();
+ final Match match2 = new Match();
+ final Match match3 = new Match();
+
+ main:
+ while (sOff < mfLimit) {
+ if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) {
+ ++sOff;
+ continue;
+ }
+
+ // saved, in case we would skip too much
+ copyTo(match1, match0);
+
+ search2:
+ while (true) {
+ assert match1.start >= anchor;
+ if (match1.end() >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) {
+ // no better match
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ continue main;
+ }
+
+ if (match0.start < match1.start) {
+ if (match2.start < match1.start + match0.len) { // empirical
+ copyTo(match0, match1);
+ }
+ }
+ assert match2.start > match1.start;
+
+ if (match2.start - match1.start < 3) { // First Match too small : removed
+ copyTo(match2, match1);
+ continue search2;
+ }
+
+ search3:
+ while (true) {
+ if (match2.start - match1.start < OPTIMAL_ML) {
+ int newMatchLen = match1.len;
+ if (newMatchLen > OPTIMAL_ML) {
+ newMatchLen = OPTIMAL_ML;
+ }
+ if (match1.start + newMatchLen > match2.end() - MIN_MATCH) {
+ newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH;
+ }
+ final int correction = newMatchLen - (match2.start - match1.start);
+ if (correction > 0) {
+ match2.fix(correction);
+ }
+ }
+
+ if (match2.start + match2.len >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) {
+ // no better match -> 2 sequences to encode
+ if (match2.start < match1.end()) {
+ match1.len = match2.start - match1.start;
+ }
+ // encode seq 1
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ // encode seq 2
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd);
+ anchor = sOff = match2.end();
+ continue main;
+ }
+
+ if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it
+ if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
+ if (match2.start < match1.end()) {
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ if (match2.len < MIN_MATCH) {
+ copyTo(match3, match2);
+ }
+ }
+
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match3, match1);
+ copyTo(match2, match0);
+
+ continue search2;
+ }
+
+ copyTo(match3, match2);
+ continue search3;
+ }
+
+ // OK, now we have 3 ascending matches; let's write at least the first one
+ if (match2.start < match1.end()) {
+ if (match2.start - match1.start < ML_MASK) {
+ if (match1.len > OPTIMAL_ML) {
+ match1.len = OPTIMAL_ML;
+ }
+ if (match1.end() > match2.end() - MIN_MATCH) {
+ match1.len = match2.end() - match1.start - MIN_MATCH;
+ }
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ } else {
+ match1.len = match2.start - match1.start;
+ }
+ }
+
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match2, match1);
+ copyTo(match3, match2);
+
+ continue search3;
+ }
+
+ }
+
+ }
+
+ dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJavaUnsafeCompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJavaUnsafeCompressor.java
new file mode 100644
index 000000000..e4b445e09
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4HCJavaUnsafeCompressor.java
@@ -0,0 +1,550 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+import static com.fr.third.net.jpountz.lz4.LZ4Utils.*;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import com.fr.third.net.jpountz.lz4.LZ4Utils.Match;
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.UnsafeUtils;
+
+/**
+ * High compression compressor.
+ */
+final class LZ4HCJavaUnsafeCompressor extends LZ4Compressor {
+
+ public static final LZ4Compressor INSTANCE = new LZ4HCJavaUnsafeCompressor();
+
+ private final int maxAttempts;
+ final int compressionLevel;
+
+ LZ4HCJavaUnsafeCompressor() { this(DEFAULT_COMPRESSION_LEVEL); }
+ LZ4HCJavaUnsafeCompressor(int compressionLevel) {
+ this.maxAttempts = 1<<(compressionLevel-1);
+ this.compressionLevel = compressionLevel;
+ }
+
+ private class HashTable {
+ static final int MASK = MAX_DISTANCE - 1;
+ int nextToUpdate;
+ private final int base;
+ private final int[] hashTable;
+ private final short[] chainTable;
+
+ HashTable(int base) {
+ this.base = base;
+ nextToUpdate = base;
+ hashTable = new int[HASH_TABLE_SIZE_HC];
+ Arrays.fill(hashTable, -1);
+ chainTable = new short[MAX_DISTANCE];
+ }
+
+ private int hashPointer(byte[] bytes, int off) {
+ final int v = UnsafeUtils.readInt(bytes, off);
+ return hashPointer(v);
+ }
+
+ private int hashPointer(ByteBuffer bytes, int off) {
+ final int v = ByteBufferUtils.readInt(bytes, off);
+ return hashPointer(v);
+ }
+
+ private int hashPointer(int v) {
+ final int h = hashHC(v);
+ return hashTable[h];
+ }
+
+ private int next(int off) {
+ return off - (chainTable[off & MASK] & 0xFFFF);
+ }
+
+ private void addHash(byte[] bytes, int off) {
+ final int v = UnsafeUtils.readInt(bytes, off);
+ addHash(v, off);
+ }
+
+ private void addHash(ByteBuffer bytes, int off) {
+ final int v = ByteBufferUtils.readInt(bytes, off);
+ addHash(v, off);
+ }
+
+ private void addHash(int v, int off) {
+ final int h = hashHC(v);
+ int delta = off - hashTable[h];
+ assert delta > 0 : delta;
+ if (delta >= MAX_DISTANCE) {
+ delta = MAX_DISTANCE - 1;
+ }
+ chainTable[off & MASK] = (short) delta;
+ hashTable[h] = off;
+ }
+
+ void insert(int off, byte[] bytes) {
+ for (; nextToUpdate < off; ++nextToUpdate) {
+ addHash(bytes, nextToUpdate);
+ }
+ }
+
+ void insert(int off, ByteBuffer bytes) {
+ for (; nextToUpdate < off; ++nextToUpdate) {
+ addHash(bytes, nextToUpdate);
+ }
+ }
+
+
+
+ boolean insertAndFindBestMatch(byte[] buf, int off, int matchLimit, Match match) {
+ match.start = off;
+ match.len = 0;
+ int delta = 0;
+ int repl = 0;
+
+ insert(off, buf);
+
+ int ref = hashPointer(buf, off);
+
+ if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition
+ if (LZ4UnsafeUtils.readIntEquals(buf, ref, off)) { // confirmed
+ delta = off - ref;
+ repl = match.len = MIN_MATCH + LZ4UnsafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ match.ref = ref;
+ }
+ ref = next(ref);
+ }
+
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4UnsafeUtils.readIntEquals(buf, ref, off)) {
+ final int matchLen = MIN_MATCH + LZ4UnsafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ if (matchLen > match.len) {
+ match.ref = ref;
+ match.len = matchLen;
+ }
+ }
+ ref = next(ref);
+ }
+
+ if (repl != 0) {
+ int ptr = off;
+ final int end = off + repl - (MIN_MATCH - 1);
+ while (ptr < end - delta) {
+ chainTable[ptr & MASK] = (short) delta; // pre load
+ ++ptr;
+ }
+ do {
+ chainTable[ptr & MASK] = (short) delta;
+ hashTable[hashHC(UnsafeUtils.readInt(buf, ptr))] = ptr;
+ ++ptr;
+ } while (ptr < end);
+ nextToUpdate = end;
+ }
+
+ return match.len != 0;
+ }
+
+ boolean insertAndFindWiderMatch(byte[] buf, int off, int startLimit, int matchLimit, int minLen, Match match) {
+ match.len = minLen;
+
+ insert(off, buf);
+
+ final int delta = off - startLimit;
+ int ref = hashPointer(buf, off);
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4UnsafeUtils.readIntEquals(buf, ref, off)) {
+ final int matchLenForward = MIN_MATCH +LZ4UnsafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ final int matchLenBackward = LZ4UnsafeUtils.commonBytesBackward(buf, ref, off, base, startLimit);
+ final int matchLen = matchLenBackward + matchLenForward;
+ if (matchLen > match.len) {
+ match.len = matchLen;
+ match.ref = ref - matchLenBackward;
+ match.start = off - matchLenBackward;
+ }
+ }
+ ref = next(ref);
+ }
+
+ return match.len > minLen;
+ }
+
+
+ boolean insertAndFindBestMatch(ByteBuffer buf, int off, int matchLimit, Match match) {
+ match.start = off;
+ match.len = 0;
+ int delta = 0;
+ int repl = 0;
+
+ insert(off, buf);
+
+ int ref = hashPointer(buf, off);
+
+ if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition
+ if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { // confirmed
+ delta = off - ref;
+ repl = match.len = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ match.ref = ref;
+ }
+ ref = next(ref);
+ }
+
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) {
+ final int matchLen = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ if (matchLen > match.len) {
+ match.ref = ref;
+ match.len = matchLen;
+ }
+ }
+ ref = next(ref);
+ }
+
+ if (repl != 0) {
+ int ptr = off;
+ final int end = off + repl - (MIN_MATCH - 1);
+ while (ptr < end - delta) {
+ chainTable[ptr & MASK] = (short) delta; // pre load
+ ++ptr;
+ }
+ do {
+ chainTable[ptr & MASK] = (short) delta;
+ hashTable[hashHC(ByteBufferUtils.readInt(buf, ptr))] = ptr;
+ ++ptr;
+ } while (ptr < end);
+ nextToUpdate = end;
+ }
+
+ return match.len != 0;
+ }
+
+ boolean insertAndFindWiderMatch(ByteBuffer buf, int off, int startLimit, int matchLimit, int minLen, Match match) {
+ match.len = minLen;
+
+ insert(off, buf);
+
+ final int delta = off - startLimit;
+ int ref = hashPointer(buf, off);
+ for (int i = 0; i < maxAttempts; ++i) {
+ if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) {
+ break;
+ }
+ if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) {
+ final int matchLenForward = MIN_MATCH +LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit);
+ final int matchLenBackward = LZ4ByteBufferUtils.commonBytesBackward(buf, ref, off, base, startLimit);
+ final int matchLen = matchLenBackward + matchLenForward;
+ if (matchLen > match.len) {
+ match.len = matchLen;
+ match.ref = ref - matchLenBackward;
+ match.start = off - matchLenBackward;
+ }
+ }
+ ref = next(ref);
+ }
+
+ return match.len > minLen;
+ }
+
+
+ }
+
+ @Override
+ public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+
+ UnsafeUtils.checkRange(src, srcOff, srcLen);
+ UnsafeUtils.checkRange(dest, destOff, maxDestLen);
+
+ final int srcEnd = srcOff + srcLen;
+ final int destEnd = destOff + maxDestLen;
+ final int mfLimit = srcEnd - MF_LIMIT;
+ final int matchLimit = srcEnd - LAST_LITERALS;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+ int anchor = sOff++;
+
+ final HashTable ht = new HashTable(srcOff);
+ final Match match0 = new Match();
+ final Match match1 = new Match();
+ final Match match2 = new Match();
+ final Match match3 = new Match();
+
+ main:
+ while (sOff < mfLimit) {
+ if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) {
+ ++sOff;
+ continue;
+ }
+
+ // saved, in case we would skip too much
+ copyTo(match1, match0);
+
+ search2:
+ while (true) {
+ assert match1.start >= anchor;
+ if (match1.end() >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) {
+ // no better match
+ dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ continue main;
+ }
+
+ if (match0.start < match1.start) {
+ if (match2.start < match1.start + match0.len) { // empirical
+ copyTo(match0, match1);
+ }
+ }
+ assert match2.start > match1.start;
+
+ if (match2.start - match1.start < 3) { // First Match too small : removed
+ copyTo(match2, match1);
+ continue search2;
+ }
+
+ search3:
+ while (true) {
+ if (match2.start - match1.start < OPTIMAL_ML) {
+ int newMatchLen = match1.len;
+ if (newMatchLen > OPTIMAL_ML) {
+ newMatchLen = OPTIMAL_ML;
+ }
+ if (match1.start + newMatchLen > match2.end() - MIN_MATCH) {
+ newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH;
+ }
+ final int correction = newMatchLen - (match2.start - match1.start);
+ if (correction > 0) {
+ match2.fix(correction);
+ }
+ }
+
+ if (match2.start + match2.len >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) {
+ // no better match -> 2 sequences to encode
+ if (match2.start < match1.end()) {
+ match1.len = match2.start - match1.start;
+ }
+ // encode seq 1
+ dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ // encode seq 2
+ dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd);
+ anchor = sOff = match2.end();
+ continue main;
+ }
+
+ if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it
+ if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
+ if (match2.start < match1.end()) {
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ if (match2.len < MIN_MATCH) {
+ copyTo(match3, match2);
+ }
+ }
+
+ dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match3, match1);
+ copyTo(match2, match0);
+
+ continue search2;
+ }
+
+ copyTo(match3, match2);
+ continue search3;
+ }
+
+ // OK, now we have 3 ascending matches; let's write at least the first one
+ if (match2.start < match1.end()) {
+ if (match2.start - match1.start < ML_MASK) {
+ if (match1.len > OPTIMAL_ML) {
+ match1.len = OPTIMAL_ML;
+ }
+ if (match1.end() > match2.end() - MIN_MATCH) {
+ match1.len = match2.end() - match1.start - MIN_MATCH;
+ }
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ } else {
+ match1.len = match2.start - match1.start;
+ }
+ }
+
+ dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match2, match1);
+ copyTo(match3, match2);
+
+ continue search3;
+ }
+
+ }
+
+ }
+
+ dOff = LZ4UnsafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+ @Override
+ public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
+
+ final int srcEnd = srcOff + srcLen;
+ final int destEnd = destOff + maxDestLen;
+ final int mfLimit = srcEnd - MF_LIMIT;
+ final int matchLimit = srcEnd - LAST_LITERALS;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+ int anchor = sOff++;
+
+ final HashTable ht = new HashTable(srcOff);
+ final Match match0 = new Match();
+ final Match match1 = new Match();
+ final Match match2 = new Match();
+ final Match match3 = new Match();
+
+ main:
+ while (sOff < mfLimit) {
+ if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) {
+ ++sOff;
+ continue;
+ }
+
+ // saved, in case we would skip too much
+ copyTo(match1, match0);
+
+ search2:
+ while (true) {
+ assert match1.start >= anchor;
+ if (match1.end() >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) {
+ // no better match
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ continue main;
+ }
+
+ if (match0.start < match1.start) {
+ if (match2.start < match1.start + match0.len) { // empirical
+ copyTo(match0, match1);
+ }
+ }
+ assert match2.start > match1.start;
+
+ if (match2.start - match1.start < 3) { // First Match too small : removed
+ copyTo(match2, match1);
+ continue search2;
+ }
+
+ search3:
+ while (true) {
+ if (match2.start - match1.start < OPTIMAL_ML) {
+ int newMatchLen = match1.len;
+ if (newMatchLen > OPTIMAL_ML) {
+ newMatchLen = OPTIMAL_ML;
+ }
+ if (match1.start + newMatchLen > match2.end() - MIN_MATCH) {
+ newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH;
+ }
+ final int correction = newMatchLen - (match2.start - match1.start);
+ if (correction > 0) {
+ match2.fix(correction);
+ }
+ }
+
+ if (match2.start + match2.len >= mfLimit
+ || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) {
+ // no better match -> 2 sequences to encode
+ if (match2.start < match1.end()) {
+ match1.len = match2.start - match1.start;
+ }
+ // encode seq 1
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+ // encode seq 2
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd);
+ anchor = sOff = match2.end();
+ continue main;
+ }
+
+ if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it
+ if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
+ if (match2.start < match1.end()) {
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ if (match2.len < MIN_MATCH) {
+ copyTo(match3, match2);
+ }
+ }
+
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match3, match1);
+ copyTo(match2, match0);
+
+ continue search2;
+ }
+
+ copyTo(match3, match2);
+ continue search3;
+ }
+
+ // OK, now we have 3 ascending matches; let's write at least the first one
+ if (match2.start < match1.end()) {
+ if (match2.start - match1.start < ML_MASK) {
+ if (match1.len > OPTIMAL_ML) {
+ match1.len = OPTIMAL_ML;
+ }
+ if (match1.end() > match2.end() - MIN_MATCH) {
+ match1.len = match2.end() - match1.start - MIN_MATCH;
+ }
+ final int correction = match1.end() - match2.start;
+ match2.fix(correction);
+ } else {
+ match1.len = match2.start - match1.start;
+ }
+ }
+
+ dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd);
+ anchor = sOff = match1.end();
+
+ copyTo(match2, match1);
+ copyTo(match3, match2);
+
+ continue search3;
+ }
+
+ }
+
+ }
+
+ dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNI.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNI.java
new file mode 100644
index 000000000..2c036d0ba
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNI.java
@@ -0,0 +1,41 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.Native;
+
+
+/**
+ * JNI bindings to the original C implementation of LZ4.
+ */
+enum LZ4JNI {
+ ;
+
+ static {
+ Native.load();
+ init();
+ }
+
+ static native void init();
+ static native int LZ4_compress_limitedOutput(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen);
+ static native int LZ4_compressHC(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen, int compressionLevel);
+ static native int LZ4_decompress_fast(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, byte[] destArray, ByteBuffer destBuffer, int destOff, int destLen);
+ static native int LZ4_decompress_safe(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen);
+ static native int LZ4_compressBound(int len);
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNICompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNICompressor.java
new file mode 100644
index 000000000..4afda18b6
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNICompressor.java
@@ -0,0 +1,80 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.checkNotReadOnly;
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.checkRange;
+import static com.fr.third.net.jpountz.util.SafeUtils.checkRange;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Fast {@link LZ4FastCompressor}s implemented with JNI bindings to the original C
+ * implementation of LZ4.
+ */
+final class LZ4JNICompressor extends LZ4Compressor {
+
+ public static final LZ4Compressor INSTANCE = new LZ4JNICompressor();
+ private static LZ4Compressor SAFE_INSTANCE;
+
+ @Override
+ public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+ checkRange(src, srcOff, srcLen);
+ checkRange(dest, destOff, maxDestLen);
+ final int result = LZ4JNI.LZ4_compress_limitedOutput(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen);
+ if (result <= 0) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ return result;
+ }
+
+ @Override
+ public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
+ checkNotReadOnly(dest);
+ checkRange(src, srcOff, srcLen);
+ checkRange(dest, destOff, maxDestLen);
+
+ if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
+ byte[] srcArr = null, destArr = null;
+ ByteBuffer srcBuf = null, destBuf = null;
+ if (src.hasArray()) {
+ srcArr = src.array();
+ srcOff += src.arrayOffset();
+ } else {
+ assert src.isDirect();
+ srcBuf = src;
+ }
+ if (dest.hasArray()) {
+ destArr = dest.array();
+ destOff += dest.arrayOffset();
+ } else {
+ assert dest.isDirect();
+ destBuf = dest;
+ }
+
+ final int result = LZ4JNI.LZ4_compress_limitedOutput(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen);
+ if (result <= 0) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ return result;
+ } else {
+ LZ4Compressor safeInstance = SAFE_INSTANCE;
+ if (safeInstance == null) {
+ safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().fastCompressor();
+ }
+ return safeInstance.compress(src, srcOff, srcLen, dest, destOff, maxDestLen);
+ }
+ }
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNIFastDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNIFastDecompressor.java
new file mode 100644
index 000000000..132637b90
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNIFastDecompressor.java
@@ -0,0 +1,82 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+
+/**
+ * {@link LZ4FastDecompressor} implemented with JNI bindings to the original C
+ * implementation of LZ4.
+ */
+final class LZ4JNIFastDecompressor extends LZ4FastDecompressor {
+
+ public static final LZ4JNIFastDecompressor INSTANCE = new LZ4JNIFastDecompressor();
+ private static LZ4FastDecompressor SAFE_INSTANCE;
+
+ @Override
+ public final int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen) {
+ SafeUtils.checkRange(src, srcOff);
+ SafeUtils.checkRange(dest, destOff, destLen);
+ final int result = LZ4JNI.LZ4_decompress_fast(src, null, srcOff, dest, null, destOff, destLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ }
+
+ @Override
+ public int decompress(ByteBuffer src, int srcOff, ByteBuffer dest, int destOff, int destLen) {
+ ByteBufferUtils.checkNotReadOnly(dest);
+ ByteBufferUtils.checkRange(src, srcOff);
+ ByteBufferUtils.checkRange(dest, destOff, destLen);
+
+ if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
+ byte[] srcArr = null, destArr = null;
+ ByteBuffer srcBuf = null, destBuf = null;
+ if (src.hasArray()) {
+ srcArr = src.array();
+ srcOff += src.arrayOffset();
+ } else {
+ assert src.isDirect();
+ srcBuf = src;
+ }
+ if (dest.hasArray()) {
+ destArr = dest.array();
+ destOff += dest.arrayOffset();
+ } else {
+ assert dest.isDirect();
+ destBuf = dest;
+ }
+
+ final int result = LZ4JNI.LZ4_decompress_fast(srcArr, srcBuf, srcOff, destArr, destBuf, destOff, destLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ } else {
+ LZ4FastDecompressor safeInstance = SAFE_INSTANCE;
+ if (safeInstance == null) {
+ safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().fastDecompressor();
+ }
+ return safeInstance.decompress(src, srcOff, dest, destOff, destLen);
+ }
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNISafeDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNISafeDecompressor.java
new file mode 100644
index 000000000..1dfac61a9
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JNISafeDecompressor.java
@@ -0,0 +1,81 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+/**
+ * {@link LZ4SafeDecompressor} implemented with JNI bindings to the original C
+ * implementation of LZ4.
+ */
+final class LZ4JNISafeDecompressor extends LZ4SafeDecompressor {
+
+ public static final LZ4JNISafeDecompressor INSTANCE = new LZ4JNISafeDecompressor();
+ private static LZ4SafeDecompressor SAFE_INSTANCE;
+
+ @Override
+ public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
+ SafeUtils.checkRange(src, srcOff, srcLen);
+ SafeUtils.checkRange(dest, destOff, maxDestLen);
+ final int result = LZ4JNI.LZ4_decompress_safe(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ }
+
+ @Override
+ public int decompress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) {
+ ByteBufferUtils.checkNotReadOnly(dest);
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
+
+ if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) {
+ byte[] srcArr = null, destArr = null;
+ ByteBuffer srcBuf = null, destBuf = null;
+ if (src.hasArray()) {
+ srcArr = src.array();
+ srcOff += src.arrayOffset();
+ } else {
+ assert src.isDirect();
+ srcBuf = src;
+ }
+ if (dest.hasArray()) {
+ destArr = dest.array();
+ destOff += dest.arrayOffset();
+ } else {
+ assert dest.isDirect();
+ destBuf = dest;
+ }
+
+ final int result = LZ4JNI.LZ4_decompress_safe(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen);
+ if (result < 0) {
+ throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer");
+ }
+ return result;
+ } else {
+ LZ4SafeDecompressor safeInstance = SAFE_INSTANCE;
+ if (safeInstance == null) {
+ safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().safeDecompressor();
+ }
+ return safeInstance.decompress(src, srcOff, srcLen, dest, destOff, maxDestLen);
+ }
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeCompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeCompressor.java
new file mode 100644
index 000000000..6970034be
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeCompressor.java
@@ -0,0 +1,511 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+import static com.fr.third.net.jpountz.lz4.LZ4Utils.*;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+/**
+ * Compressor.
+ */
+final class LZ4JavaSafeCompressor extends LZ4Compressor {
+
+ public static final LZ4Compressor INSTANCE = new LZ4JavaSafeCompressor();
+
+ static int compress64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int destEnd) {
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+
+ int anchor = sOff;
+
+ if (srcLen >= MIN_LENGTH) {
+
+ final short[] hashTable = new short[HASH_TABLE_SIZE_64K];
+
+ ++sOff;
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash64k(SafeUtils.readInt(src, sOff));
+ ref = srcOff + SafeUtils.readShort(hashTable, h);
+ SafeUtils.writeShort(hashTable, h, sOff - srcOff);
+ } while (!LZ4SafeUtils.readIntEquals(src, ref, sOff));
+
+ // catch up
+ final int excess = LZ4SafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ SafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4SafeUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ SafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4SafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ SafeUtils.writeShortLE(dest, dOff, (short) (sOff - ref));
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ ref += MIN_MATCH;
+ final int matchLen = LZ4SafeUtils.commonBytes(src, ref, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4SafeUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ SafeUtils.writeShort(hashTable, hash64k(SafeUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff);
+
+ // test next position
+ final int h = hash64k(SafeUtils.readInt(src, sOff));
+ ref = srcOff + SafeUtils.readShort(hashTable, h);
+ SafeUtils.writeShort(hashTable, h, sOff - srcOff);
+
+ if (!LZ4SafeUtils.readIntEquals(src, sOff, ref)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ SafeUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+ }
+
+ dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+ @Override
+ public int compress(byte[] src, final int srcOff, int srcLen, byte[] dest, final int destOff, int maxDestLen) {
+
+ SafeUtils.checkRange(src, srcOff, srcLen);
+ SafeUtils.checkRange(dest, destOff, maxDestLen);
+ final int destEnd = destOff + maxDestLen;
+
+ if (srcLen < LZ4_64K_LIMIT) {
+ return compress64k(src, srcOff, srcLen, dest, destOff, destEnd);
+ }
+
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+ int anchor = sOff++;
+
+ final int[] hashTable = new int[HASH_TABLE_SIZE];
+ Arrays.fill(hashTable, anchor);
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ int back;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash(SafeUtils.readInt(src, sOff));
+ ref = SafeUtils.readInt(hashTable, h);
+ back = sOff - ref;
+ SafeUtils.writeInt(hashTable, h, sOff);
+ } while (back >= MAX_DISTANCE || !LZ4SafeUtils.readIntEquals(src, ref, sOff));
+
+
+ final int excess = LZ4SafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ SafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4SafeUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ SafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4SafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ SafeUtils.writeShortLE(dest, dOff, back);
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ final int matchLen = LZ4SafeUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4SafeUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ SafeUtils.writeInt(hashTable, hash(SafeUtils.readInt(src, sOff - 2)), sOff - 2);
+
+ // test next position
+ final int h = hash(SafeUtils.readInt(src, sOff));
+ ref = SafeUtils.readInt(hashTable, h);
+ SafeUtils.writeInt(hashTable, h, sOff);
+ back = sOff - ref;
+
+ if (back >= MAX_DISTANCE || !LZ4SafeUtils.readIntEquals(src, ref, sOff)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ SafeUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+
+ dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+ static int compress64k(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int destEnd) {
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+
+ int anchor = sOff;
+
+ if (srcLen >= MIN_LENGTH) {
+
+ final short[] hashTable = new short[HASH_TABLE_SIZE_64K];
+
+ ++sOff;
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash64k(ByteBufferUtils.readInt(src, sOff));
+ ref = srcOff + SafeUtils.readShort(hashTable, h);
+ SafeUtils.writeShort(hashTable, h, sOff - srcOff);
+ } while (!LZ4ByteBufferUtils.readIntEquals(src, ref, sOff));
+
+ // catch up
+ final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ ByteBufferUtils.writeShortLE(dest, dOff, (short) (sOff - ref));
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ ref += MIN_MATCH;
+ final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ SafeUtils.writeShort(hashTable, hash64k(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff);
+
+ // test next position
+ final int h = hash64k(ByteBufferUtils.readInt(src, sOff));
+ ref = srcOff + SafeUtils.readShort(hashTable, h);
+ SafeUtils.writeShort(hashTable, h, sOff - srcOff);
+
+ if (!LZ4ByteBufferUtils.readIntEquals(src, sOff, ref)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ ByteBufferUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+ }
+
+ dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+ @Override
+ public int compress(ByteBuffer src, final int srcOff, int srcLen, ByteBuffer dest, final int destOff, int maxDestLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
+ final int destEnd = destOff + maxDestLen;
+
+ if (srcLen < LZ4_64K_LIMIT) {
+ return compress64k(src, srcOff, srcLen, dest, destOff, destEnd);
+ }
+
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+ int anchor = sOff++;
+
+ final int[] hashTable = new int[HASH_TABLE_SIZE];
+ Arrays.fill(hashTable, anchor);
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ int back;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash(ByteBufferUtils.readInt(src, sOff));
+ ref = SafeUtils.readInt(hashTable, h);
+ back = sOff - ref;
+ SafeUtils.writeInt(hashTable, h, sOff);
+ } while (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff));
+
+
+ final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ ByteBufferUtils.writeShortLE(dest, dOff, back);
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ SafeUtils.writeInt(hashTable, hash(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2);
+
+ // test next position
+ final int h = hash(ByteBufferUtils.readInt(src, sOff));
+ ref = SafeUtils.readInt(hashTable, h);
+ SafeUtils.writeInt(hashTable, h, sOff);
+ back = sOff - ref;
+
+ if (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ ByteBufferUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+
+ dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeFastDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeFastDecompressor.java
new file mode 100644
index 000000000..5a28e39d7
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeFastDecompressor.java
@@ -0,0 +1,205 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+/**
+ * Decompressor.
+ */
+final class LZ4JavaSafeFastDecompressor extends LZ4FastDecompressor {
+
+ public static final LZ4FastDecompressor INSTANCE = new LZ4JavaSafeFastDecompressor();
+
+ @Override
+ public int decompress(byte[] src, final int srcOff, byte[] dest, final int destOff, int destLen) {
+
+
+ SafeUtils.checkRange(src, srcOff);
+ SafeUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (SafeUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Malformed input at " + srcOff);
+ }
+ return 1;
+ }
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = SafeUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH) {
+ if (literalCopyEnd != destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4SafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4SafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = SafeUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4SafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4SafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return sOff - srcOff;
+
+ }
+
+ @Override
+ public int decompress(ByteBuffer src, final int srcOff, ByteBuffer dest, final int destOff, int destLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return decompress(src.array(), srcOff + src.arrayOffset(), dest.array(), destOff + dest.arrayOffset(), destLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+
+ ByteBufferUtils.checkRange(src, srcOff);
+ ByteBufferUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (ByteBufferUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Malformed input at " + srcOff);
+ }
+ return 1;
+ }
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH) {
+ if (literalCopyEnd != destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = ByteBufferUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return sOff - srcOff;
+
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeSafeDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeSafeDecompressor.java
new file mode 100644
index 000000000..21b427c40
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaSafeSafeDecompressor.java
@@ -0,0 +1,213 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+/**
+ * Decompressor.
+ */
+final class LZ4JavaSafeSafeDecompressor extends LZ4SafeDecompressor {
+
+ public static final LZ4SafeDecompressor INSTANCE = new LZ4JavaSafeSafeDecompressor();
+
+ @Override
+ public int decompress(byte[] src, final int srcOff, final int srcLen , byte[] dest, final int destOff, int destLen) {
+
+
+ SafeUtils.checkRange(src, srcOff, srcLen);
+ SafeUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (srcLen != 1 || SafeUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Output buffer too small");
+ }
+ return 0;
+ }
+
+ final int srcEnd = srcOff + srcLen;
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = SafeUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) {
+ if (literalCopyEnd > destEnd) {
+ throw new LZ4Exception();
+ } else if (sOff + literalLen != srcEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4SafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4SafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = SafeUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4SafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4SafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return dOff - destOff;
+
+ }
+
+ @Override
+ public int decompress(ByteBuffer src, final int srcOff, final int srcLen , ByteBuffer dest, final int destOff, int destLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return decompress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), destLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (srcLen != 1 || ByteBufferUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Output buffer too small");
+ }
+ return 0;
+ }
+
+ final int srcEnd = srcOff + srcLen;
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) {
+ if (literalCopyEnd > destEnd) {
+ throw new LZ4Exception();
+ } else if (sOff + literalLen != srcEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = ByteBufferUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return dOff - destOff;
+
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeCompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeCompressor.java
new file mode 100644
index 000000000..d81f2f2f9
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeCompressor.java
@@ -0,0 +1,511 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+import static com.fr.third.net.jpountz.lz4.LZ4Utils.*;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.UnsafeUtils;
+
+/**
+ * Compressor.
+ */
+final class LZ4JavaUnsafeCompressor extends LZ4Compressor {
+
+ public static final LZ4Compressor INSTANCE = new LZ4JavaUnsafeCompressor();
+
+ static int compress64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int destEnd) {
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+
+ int anchor = sOff;
+
+ if (srcLen >= MIN_LENGTH) {
+
+ final short[] hashTable = new short[HASH_TABLE_SIZE_64K];
+
+ ++sOff;
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash64k(UnsafeUtils.readInt(src, sOff));
+ ref = srcOff + UnsafeUtils.readShort(hashTable, h);
+ UnsafeUtils.writeShort(hashTable, h, sOff - srcOff);
+ } while (!LZ4UnsafeUtils.readIntEquals(src, ref, sOff));
+
+ // catch up
+ final int excess = LZ4UnsafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ UnsafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4UnsafeUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ UnsafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4UnsafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ UnsafeUtils.writeShortLE(dest, dOff, (short) (sOff - ref));
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ ref += MIN_MATCH;
+ final int matchLen = LZ4UnsafeUtils.commonBytes(src, ref, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4UnsafeUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ UnsafeUtils.writeShort(hashTable, hash64k(UnsafeUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff);
+
+ // test next position
+ final int h = hash64k(UnsafeUtils.readInt(src, sOff));
+ ref = srcOff + UnsafeUtils.readShort(hashTable, h);
+ UnsafeUtils.writeShort(hashTable, h, sOff - srcOff);
+
+ if (!LZ4UnsafeUtils.readIntEquals(src, sOff, ref)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ UnsafeUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+ }
+
+ dOff = LZ4UnsafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+ @Override
+ public int compress(byte[] src, final int srcOff, int srcLen, byte[] dest, final int destOff, int maxDestLen) {
+
+ UnsafeUtils.checkRange(src, srcOff, srcLen);
+ UnsafeUtils.checkRange(dest, destOff, maxDestLen);
+ final int destEnd = destOff + maxDestLen;
+
+ if (srcLen < LZ4_64K_LIMIT) {
+ return compress64k(src, srcOff, srcLen, dest, destOff, destEnd);
+ }
+
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+ int anchor = sOff++;
+
+ final int[] hashTable = new int[HASH_TABLE_SIZE];
+ Arrays.fill(hashTable, anchor);
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ int back;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash(UnsafeUtils.readInt(src, sOff));
+ ref = UnsafeUtils.readInt(hashTable, h);
+ back = sOff - ref;
+ UnsafeUtils.writeInt(hashTable, h, sOff);
+ } while (back >= MAX_DISTANCE || !LZ4UnsafeUtils.readIntEquals(src, ref, sOff));
+
+
+ final int excess = LZ4UnsafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ UnsafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4UnsafeUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ UnsafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4UnsafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ UnsafeUtils.writeShortLE(dest, dOff, back);
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ final int matchLen = LZ4UnsafeUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4UnsafeUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ UnsafeUtils.writeInt(hashTable, hash(UnsafeUtils.readInt(src, sOff - 2)), sOff - 2);
+
+ // test next position
+ final int h = hash(UnsafeUtils.readInt(src, sOff));
+ ref = UnsafeUtils.readInt(hashTable, h);
+ UnsafeUtils.writeInt(hashTable, h, sOff);
+ back = sOff - ref;
+
+ if (back >= MAX_DISTANCE || !LZ4UnsafeUtils.readIntEquals(src, ref, sOff)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ UnsafeUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+
+ dOff = LZ4UnsafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+ static int compress64k(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int destEnd) {
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+
+ int anchor = sOff;
+
+ if (srcLen >= MIN_LENGTH) {
+
+ final short[] hashTable = new short[HASH_TABLE_SIZE_64K];
+
+ ++sOff;
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash64k(ByteBufferUtils.readInt(src, sOff));
+ ref = srcOff + UnsafeUtils.readShort(hashTable, h);
+ UnsafeUtils.writeShort(hashTable, h, sOff - srcOff);
+ } while (!LZ4ByteBufferUtils.readIntEquals(src, ref, sOff));
+
+ // catch up
+ final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ ByteBufferUtils.writeShortLE(dest, dOff, (short) (sOff - ref));
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ ref += MIN_MATCH;
+ final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ UnsafeUtils.writeShort(hashTable, hash64k(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff);
+
+ // test next position
+ final int h = hash64k(ByteBufferUtils.readInt(src, sOff));
+ ref = srcOff + UnsafeUtils.readShort(hashTable, h);
+ UnsafeUtils.writeShort(hashTable, h, sOff - srcOff);
+
+ if (!LZ4ByteBufferUtils.readIntEquals(src, sOff, ref)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ ByteBufferUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+ }
+
+ dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+ @Override
+ public int compress(ByteBuffer src, final int srcOff, int srcLen, ByteBuffer dest, final int destOff, int maxDestLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, maxDestLen);
+ final int destEnd = destOff + maxDestLen;
+
+ if (srcLen < LZ4_64K_LIMIT) {
+ return compress64k(src, srcOff, srcLen, dest, destOff, destEnd);
+ }
+
+ final int srcEnd = srcOff + srcLen;
+ final int srcLimit = srcEnd - LAST_LITERALS;
+ final int mflimit = srcEnd - MF_LIMIT;
+
+ int sOff = srcOff, dOff = destOff;
+ int anchor = sOff++;
+
+ final int[] hashTable = new int[HASH_TABLE_SIZE];
+ Arrays.fill(hashTable, anchor);
+
+ main:
+ while (true) {
+
+ // find a match
+ int forwardOff = sOff;
+
+ int ref;
+ int step = 1;
+ int searchMatchNb = 1 << SKIP_STRENGTH;
+ int back;
+ do {
+ sOff = forwardOff;
+ forwardOff += step;
+ step = searchMatchNb++ >>> SKIP_STRENGTH;
+
+ if (forwardOff > mflimit) {
+ break main;
+ }
+
+ final int h = hash(ByteBufferUtils.readInt(src, sOff));
+ ref = UnsafeUtils.readInt(hashTable, h);
+ back = sOff - ref;
+ UnsafeUtils.writeInt(hashTable, h, sOff);
+ } while (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff));
+
+
+ final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor);
+ sOff -= excess;
+ ref -= excess;
+
+ // sequence == refsequence
+ final int runLen = sOff - anchor;
+
+ // encode literal length
+ int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ if (runLen >= RUN_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS);
+ dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS);
+ }
+
+ // copy literals
+ LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ while (true) {
+ // encode offset
+ ByteBufferUtils.writeShortLE(dest, dOff, back);
+ dOff += 2;
+
+ // count nb matches
+ sOff += MIN_MATCH;
+ final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit);
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ sOff += matchLen;
+
+ // encode match len
+ if (matchLen >= ML_MASK) {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK);
+ dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff);
+ } else {
+ ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen);
+ }
+
+ // test end of chunk
+ if (sOff > mflimit) {
+ anchor = sOff;
+ break main;
+ }
+
+ // fill table
+ UnsafeUtils.writeInt(hashTable, hash(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2);
+
+ // test next position
+ final int h = hash(ByteBufferUtils.readInt(src, sOff));
+ ref = UnsafeUtils.readInt(hashTable, h);
+ UnsafeUtils.writeInt(hashTable, h, sOff);
+ back = sOff - ref;
+
+ if (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)) {
+ break;
+ }
+
+ tokenOff = dOff++;
+ ByteBufferUtils.writeByte(dest, tokenOff, 0);
+ }
+
+ // prepare next loop
+ anchor = sOff++;
+ }
+
+ dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd);
+ return dOff - destOff;
+ }
+
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeFastDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeFastDecompressor.java
new file mode 100644
index 000000000..ddac21fc2
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeFastDecompressor.java
@@ -0,0 +1,205 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.UnsafeUtils;
+
+/**
+ * Decompressor.
+ */
+final class LZ4JavaUnsafeFastDecompressor extends LZ4FastDecompressor {
+
+ public static final LZ4FastDecompressor INSTANCE = new LZ4JavaUnsafeFastDecompressor();
+
+ @Override
+ public int decompress(byte[] src, final int srcOff, byte[] dest, final int destOff, int destLen) {
+
+
+ UnsafeUtils.checkRange(src, srcOff);
+ UnsafeUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (UnsafeUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Malformed input at " + srcOff);
+ }
+ return 1;
+ }
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = UnsafeUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH) {
+ if (literalCopyEnd != destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4UnsafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4UnsafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = UnsafeUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4UnsafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4UnsafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return sOff - srcOff;
+
+ }
+
+ @Override
+ public int decompress(ByteBuffer src, final int srcOff, ByteBuffer dest, final int destOff, int destLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return decompress(src.array(), srcOff + src.arrayOffset(), dest.array(), destOff + dest.arrayOffset(), destLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+
+ ByteBufferUtils.checkRange(src, srcOff);
+ ByteBufferUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (ByteBufferUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Malformed input at " + srcOff);
+ }
+ return 1;
+ }
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH) {
+ if (literalCopyEnd != destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = ByteBufferUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return sOff - srcOff;
+
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeSafeDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeSafeDecompressor.java
new file mode 100644
index 000000000..9ee2aae82
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4JavaUnsafeSafeDecompressor.java
@@ -0,0 +1,213 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.lz4;
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.*;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.UnsafeUtils;
+
+/**
+ * Decompressor.
+ */
+final class LZ4JavaUnsafeSafeDecompressor extends LZ4SafeDecompressor {
+
+ public static final LZ4SafeDecompressor INSTANCE = new LZ4JavaUnsafeSafeDecompressor();
+
+ @Override
+ public int decompress(byte[] src, final int srcOff, final int srcLen , byte[] dest, final int destOff, int destLen) {
+
+
+ UnsafeUtils.checkRange(src, srcOff, srcLen);
+ UnsafeUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (srcLen != 1 || UnsafeUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Output buffer too small");
+ }
+ return 0;
+ }
+
+ final int srcEnd = srcOff + srcLen;
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = UnsafeUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) {
+ if (literalCopyEnd > destEnd) {
+ throw new LZ4Exception();
+ } else if (sOff + literalLen != srcEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4UnsafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4UnsafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = UnsafeUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4UnsafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4UnsafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return dOff - destOff;
+
+ }
+
+ @Override
+ public int decompress(ByteBuffer src, final int srcOff, final int srcLen , ByteBuffer dest, final int destOff, int destLen) {
+
+ if (src.hasArray() && dest.hasArray()) {
+ return decompress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), destLen);
+ }
+ src = ByteBufferUtils.inNativeByteOrder(src);
+ dest = ByteBufferUtils.inNativeByteOrder(dest);
+
+
+ ByteBufferUtils.checkRange(src, srcOff, srcLen);
+ ByteBufferUtils.checkRange(dest, destOff, destLen);
+
+ if (destLen == 0) {
+ if (srcLen != 1 || ByteBufferUtils.readByte(src, srcOff) != 0) {
+ throw new LZ4Exception("Output buffer too small");
+ }
+ return 0;
+ }
+
+ final int srcEnd = srcOff + srcLen;
+
+
+ final int destEnd = destOff + destLen;
+
+ int sOff = srcOff;
+ int dOff = destOff;
+
+ while (true) {
+ final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF;
+ ++sOff;
+
+ // literals
+ int literalLen = token >>> ML_BITS;
+ if (literalLen == RUN_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ literalLen += 0xFF;
+ }
+ literalLen += len & 0xFF;
+ }
+
+ final int literalCopyEnd = dOff + literalLen;
+
+ if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) {
+ if (literalCopyEnd > destEnd) {
+ throw new LZ4Exception();
+ } else if (sOff + literalLen != srcEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+
+ } else {
+ LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+ break; // EOF
+ }
+ }
+
+ LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen);
+ sOff += literalLen;
+ dOff = literalCopyEnd;
+
+ // matchs
+ final int matchDec = ByteBufferUtils.readShortLE(src, sOff);
+ sOff += 2;
+ int matchOff = dOff - matchDec;
+
+ if (matchOff < destOff) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+
+ int matchLen = token & ML_MASK;
+ if (matchLen == ML_MASK) {
+ byte len = (byte) 0xFF;
+ while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) {
+ matchLen += 0xFF;
+ }
+ matchLen += len & 0xFF;
+ }
+ matchLen += MIN_MATCH;
+
+ final int matchCopyEnd = dOff + matchLen;
+
+ if (matchCopyEnd > destEnd - COPY_LENGTH) {
+ if (matchCopyEnd > destEnd) {
+ throw new LZ4Exception("Malformed input at " + sOff);
+ }
+ LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen);
+ } else {
+ LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd);
+ }
+ dOff = matchCopyEnd;
+ }
+
+
+ return dOff - destOff;
+
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4SafeDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4SafeDecompressor.java
new file mode 100644
index 000000000..edd189519
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4SafeDecompressor.java
@@ -0,0 +1,155 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * LZ4 decompressor that requires the size of the compressed data to be known.
+ *
+ * Implementations of this class are usually a little slower than those of
+ * {@link LZ4FastDecompressor} but do not require the size of the original data to
+ * be known.
+ */
+public abstract class LZ4SafeDecompressor implements LZ4UnknownSizeDecompressor {
+
+ /**
+ * Decompresses Warning: this method has an
+ * important overhead due to the fact that it needs to allocate a buffer to
+ * decompress into, and then needs to resize this buffer to the actual
+ * decompressed length. Here is how this method is implemented: LZ4 compression. The entry point of the API is the
+ {@link net.jpountz.lz4.LZ4Factory} class, which gives access to
+ {@link com.fr.third.net.jpountz.lz4.LZ4Compressor compressors} and
+ {@link com.fr.third.net.jpountz.lz4.LZ4SafeDecompressor decompressors}. Sample usage: Utility classes.
+ * This API is compatible with the {@link XXHash32 block API} and the following
+ * code samples are equivalent:
+ *
+ * Instances of this class are not thread-safe.
+ */
+public abstract class StreamingXXHash32 {
+
+ interface Factory {
+
+ StreamingXXHash32 newStreamingHash(int seed);
+
+ }
+
+ final int seed;
+
+ StreamingXXHash32(int seed) {
+ this.seed = seed;
+ }
+
+ /**
+ * Returns the value of the checksum.
+ *
+ * @return the checksum
+ */
+ public abstract int getValue();
+
+ /**
+ * Updates the value of the hash with buf[off:off+len].
+ *
+ * @param buf the input data
+ * @param off the start offset in buf
+ * @param len the number of bytes to hash
+ */
+ public abstract void update(byte[] buf, int off, int len);
+
+ /**
+ * Resets this instance to the state it had right after instantiation. The
+ * seed remains unchanged.
+ */
+ public abstract void reset();
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(seed=" + seed + ")";
+ }
+
+ /**
+ * Returns a {@link Checksum} view of this instance. Modifications to the view
+ * will modify this instance too and vice-versa.
+ *
+ * @return the {@link Checksum} object representing this instance
+ */
+ public final Checksum asChecksum() {
+ return new Checksum() {
+
+ @Override
+ public long getValue() {
+ return StreamingXXHash32.this.getValue() & 0xFFFFFFFL;
+ }
+
+ @Override
+ public void reset() {
+ StreamingXXHash32.this.reset();
+ }
+
+ @Override
+ public void update(int b) {
+ StreamingXXHash32.this.update(new byte[] {(byte) b}, 0, 1);
+ }
+
+ @Override
+ public void update(byte[] b, int off, int len) {
+ StreamingXXHash32.this.update(b, off, len);
+ }
+
+ @Override
+ public String toString() {
+ return StreamingXXHash32.this.toString();
+ }
+
+ };
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JNI.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JNI.java
new file mode 100644
index 000000000..133c86103
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JNI.java
@@ -0,0 +1,71 @@
+package com.fr.third.net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+final class StreamingXXHash32JNI extends StreamingXXHash32 {
+
+ static class Factory implements StreamingXXHash32.Factory {
+
+ public static final StreamingXXHash32.Factory INSTANCE = new Factory();
+
+ @Override
+ public StreamingXXHash32 newStreamingHash(int seed) {
+ return new StreamingXXHash32JNI(seed);
+ }
+
+ }
+
+ private long state;
+
+ StreamingXXHash32JNI(int seed) {
+ super(seed);
+ state = XXHashJNI.XXH32_init(seed);
+ }
+
+ private void checkState() {
+ if (state == 0) {
+ throw new AssertionError("Already finalized");
+ }
+ }
+
+ @Override
+ public void reset() {
+ checkState();
+ XXHashJNI.XXH32_free(state);
+ state = XXHashJNI.XXH32_init(seed);
+ }
+
+ @Override
+ public int getValue() {
+ checkState();
+ return XXHashJNI.XXH32_digest(state);
+ }
+
+ @Override
+ public void update(byte[] bytes, int off, int len) {
+ checkState();
+ XXHashJNI.XXH32_update(state, bytes, off, len);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ super.finalize();
+ // free memory
+ XXHashJNI.XXH32_free(state);
+ state = 0;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JavaSafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JavaSafe.java
new file mode 100644
index 000000000..fde930721
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JavaSafe.java
@@ -0,0 +1,142 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static com.fr.third.net.jpountz.util.SafeUtils.*;
+import static com.fr.third.net.jpountz.util.SafeUtils.checkRange;
+import static java.lang.Integer.rotateLeft;
+
+/**
+ * Streaming xxhash.
+ */
+final class StreamingXXHash32JavaSafe extends AbstractStreamingXXHash32Java {
+
+ static class Factory implements StreamingXXHash32.Factory {
+
+ public static final StreamingXXHash32.Factory INSTANCE = new Factory();
+
+ @Override
+ public StreamingXXHash32 newStreamingHash(int seed) {
+ return new StreamingXXHash32JavaSafe(seed);
+ }
+
+ }
+
+ StreamingXXHash32JavaSafe(int seed) {
+ super(seed);
+ }
+
+ @Override
+ public int getValue() {
+ int h32;
+ if (totalLen >= 16) {
+ h32 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+ } else {
+ h32 = seed + PRIME5;
+ }
+
+ h32 += totalLen;
+
+ int off = 0;
+ while (off <= memSize - 4) {
+ h32 += readIntLE(memory, off) * PRIME3;
+ h32 = rotateLeft(h32, 17) * PRIME4;
+ off += 4;
+ }
+
+ while (off < memSize) {
+ h32 += (readByte(memory, off) & 0xFF) * PRIME5;
+ h32 = rotateLeft(h32, 11) * PRIME1;
+ ++off;
+ }
+
+ h32 ^= h32 >>> 15;
+ h32 *= PRIME2;
+ h32 ^= h32 >>> 13;
+ h32 *= PRIME3;
+ h32 ^= h32 >>> 16;
+
+ return h32;
+ }
+
+ @Override
+ public void update(byte[] buf, int off, int len) {
+ checkRange(buf, off, len);
+
+ totalLen += len;
+
+ if (memSize + len < 16) { // fill in tmp buffer
+ System.arraycopy(buf, off, memory, memSize, len);
+ memSize += len;
+ return;
+ }
+
+ final int end = off + len;
+
+ if (memSize > 0) { // data left from previous update
+ System.arraycopy(buf, off, memory, memSize, 16 - memSize);
+
+ v1 += readIntLE(memory, 0) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+
+ v2 += readIntLE(memory, 4) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+
+ v3 += readIntLE(memory, 8) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+
+ v4 += readIntLE(memory, 12) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+
+ off += 16 - memSize;
+ memSize = 0;
+ }
+
+ {
+ final int limit = end - 16;
+ int v1 = this.v1;
+ int v2 = this.v2;
+ int v3 = this.v3;
+ int v4 = this.v4;
+
+ while (off <= limit) {
+ v1 += readIntLE(buf, off) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+ off += 4;
+
+ v2 += readIntLE(buf, off) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+ off += 4;
+
+ v3 += readIntLE(buf, off) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+ off += 4;
+
+ v4 += readIntLE(buf, off) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+ off += 4;
+ }
+
+ this.v1 = v1;
+ this.v2 = v2;
+ this.v3 = v3;
+ this.v4 = v4;
+ }
+
+ if (off < end) {
+ System.arraycopy(buf, off, memory, 0, end - off);
+ memSize = end - off;
+ }
+ }
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JavaUnsafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JavaUnsafe.java
new file mode 100644
index 000000000..bd67ebad3
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash32JavaUnsafe.java
@@ -0,0 +1,142 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static com.fr.third.net.jpountz.util.UnsafeUtils.*;
+import static com.fr.third.net.jpountz.util.SafeUtils.checkRange;
+import static java.lang.Integer.rotateLeft;
+
+/**
+ * Streaming xxhash.
+ */
+final class StreamingXXHash32JavaUnsafe extends AbstractStreamingXXHash32Java {
+
+ static class Factory implements StreamingXXHash32.Factory {
+
+ public static final StreamingXXHash32.Factory INSTANCE = new Factory();
+
+ @Override
+ public StreamingXXHash32 newStreamingHash(int seed) {
+ return new StreamingXXHash32JavaUnsafe(seed);
+ }
+
+ }
+
+ StreamingXXHash32JavaUnsafe(int seed) {
+ super(seed);
+ }
+
+ @Override
+ public int getValue() {
+ int h32;
+ if (totalLen >= 16) {
+ h32 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+ } else {
+ h32 = seed + PRIME5;
+ }
+
+ h32 += totalLen;
+
+ int off = 0;
+ while (off <= memSize - 4) {
+ h32 += readIntLE(memory, off) * PRIME3;
+ h32 = rotateLeft(h32, 17) * PRIME4;
+ off += 4;
+ }
+
+ while (off < memSize) {
+ h32 += (readByte(memory, off) & 0xFF) * PRIME5;
+ h32 = rotateLeft(h32, 11) * PRIME1;
+ ++off;
+ }
+
+ h32 ^= h32 >>> 15;
+ h32 *= PRIME2;
+ h32 ^= h32 >>> 13;
+ h32 *= PRIME3;
+ h32 ^= h32 >>> 16;
+
+ return h32;
+ }
+
+ @Override
+ public void update(byte[] buf, int off, int len) {
+ checkRange(buf, off, len);
+
+ totalLen += len;
+
+ if (memSize + len < 16) { // fill in tmp buffer
+ System.arraycopy(buf, off, memory, memSize, len);
+ memSize += len;
+ return;
+ }
+
+ final int end = off + len;
+
+ if (memSize > 0) { // data left from previous update
+ System.arraycopy(buf, off, memory, memSize, 16 - memSize);
+
+ v1 += readIntLE(memory, 0) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+
+ v2 += readIntLE(memory, 4) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+
+ v3 += readIntLE(memory, 8) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+
+ v4 += readIntLE(memory, 12) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+
+ off += 16 - memSize;
+ memSize = 0;
+ }
+
+ {
+ final int limit = end - 16;
+ int v1 = this.v1;
+ int v2 = this.v2;
+ int v3 = this.v3;
+ int v4 = this.v4;
+
+ while (off <= limit) {
+ v1 += readIntLE(buf, off) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+ off += 4;
+
+ v2 += readIntLE(buf, off) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+ off += 4;
+
+ v3 += readIntLE(buf, off) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+ off += 4;
+
+ v4 += readIntLE(buf, off) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+ off += 4;
+ }
+
+ this.v1 = v1;
+ this.v2 = v2;
+ this.v3 = v3;
+ this.v4 = v4;
+ }
+
+ if (off < end) {
+ System.arraycopy(buf, off, memory, 0, end - off);
+ memSize = end - off;
+ }
+ }
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64.java
new file mode 100644
index 000000000..e988f5218
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64.java
@@ -0,0 +1,119 @@
+package com.fr.third.net.jpountz.xxhash;
+
+import java.util.zip.Checksum;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+/**
+ * Streaming interface for {@link XXHash64}.
+ *
+ * This API is compatible with the {@link XXHash64 block API} and the following
+ * code samples are equivalent:
+ *
+ * Instances of this class are not thread-safe.
+ */
+public abstract class StreamingXXHash64 {
+
+ interface Factory {
+
+ StreamingXXHash64 newStreamingHash(long seed);
+
+ }
+
+ final long seed;
+
+ StreamingXXHash64(long seed) {
+ this.seed = seed;
+ }
+
+ /**
+ * Returns the value of the checksum.
+ *
+ * @return the checksum
+ */
+ public abstract long getValue();
+
+ /**
+ * Updates the value of the hash with buf[off:off+len].
+ *
+ * @param buf the input data
+ * @param off the start offset in buf
+ * @param len the number of bytes to hash
+ */
+ public abstract void update(byte[] buf, int off, int len);
+
+ /**
+ * Resets this instance to the state it had right after instantiation. The
+ * seed remains unchanged.
+ */
+ public abstract void reset();
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(seed=" + seed + ")";
+ }
+
+ /**
+ * Returns a {@link Checksum} view of this instance. Modifications to the view
+ * will modify this instance too and vice-versa.
+ *
+ * @return the {@link Checksum} object representing this instance
+ */
+ public final Checksum asChecksum() {
+ return new Checksum() {
+
+ @Override
+ public long getValue() {
+ return StreamingXXHash64.this.getValue();
+ }
+
+ @Override
+ public void reset() {
+ StreamingXXHash64.this.reset();
+ }
+
+ @Override
+ public void update(int b) {
+ StreamingXXHash64.this.update(new byte[] {(byte) b}, 0, 1);
+ }
+
+ @Override
+ public void update(byte[] b, int off, int len) {
+ StreamingXXHash64.this.update(b, off, len);
+ }
+
+ @Override
+ public String toString() {
+ return StreamingXXHash64.this.toString();
+ }
+
+ };
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JNI.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JNI.java
new file mode 100644
index 000000000..5d8f9b840
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JNI.java
@@ -0,0 +1,71 @@
+package com.fr.third.net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+final class StreamingXXHash64JNI extends StreamingXXHash64 {
+
+ static class Factory implements StreamingXXHash64.Factory {
+
+ public static final StreamingXXHash64.Factory INSTANCE = new Factory();
+
+ @Override
+ public StreamingXXHash64 newStreamingHash(long seed) {
+ return new StreamingXXHash64JNI(seed);
+ }
+
+ }
+
+ private long state;
+
+ StreamingXXHash64JNI(long seed) {
+ super(seed);
+ state = XXHashJNI.XXH64_init(seed);
+ }
+
+ private void checkState() {
+ if (state == 0) {
+ throw new AssertionError("Already finalized");
+ }
+ }
+
+ @Override
+ public void reset() {
+ checkState();
+ XXHashJNI.XXH64_free(state);
+ state = XXHashJNI.XXH64_init(seed);
+ }
+
+ @Override
+ public long getValue() {
+ checkState();
+ return XXHashJNI.XXH64_digest(state);
+ }
+
+ @Override
+ public void update(byte[] bytes, int off, int len) {
+ checkState();
+ XXHashJNI.XXH64_update(state, bytes, off, len);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ super.finalize();
+ // free memory
+ XXHashJNI.XXH64_free(state);
+ state = 0;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JavaSafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JavaSafe.java
new file mode 100644
index 000000000..2445ff415
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JavaSafe.java
@@ -0,0 +1,166 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static com.fr.third.net.jpountz.util.SafeUtils.*;
+import static com.fr.third.net.jpountz.util.SafeUtils.checkRange;
+import static java.lang.Long.rotateLeft;
+
+/**
+ * Streaming xxhash.
+ */
+final class StreamingXXHash64JavaSafe extends AbstractStreamingXXHash64Java {
+
+ static class Factory implements StreamingXXHash64.Factory {
+
+ public static final StreamingXXHash64.Factory INSTANCE = new Factory();
+
+ @Override
+ public StreamingXXHash64 newStreamingHash(long seed) {
+ return new StreamingXXHash64JavaSafe(seed);
+ }
+
+ }
+
+ StreamingXXHash64JavaSafe(long seed) {
+ super(seed);
+ }
+
+ @Override
+ public long getValue() {
+ long h64;
+ if (totalLen >= 32) {
+ long v1 = this.v1;
+ long v2 = this.v2;
+ long v3 = this.v3;
+ long v4 = this.v4;
+
+ h64 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ v1 *= PRIME64_2; v1 = rotateLeft(v1, 31); v1 *= PRIME64_1; h64 ^= v1;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+
+ v2 *= PRIME64_2; v2 = rotateLeft(v2, 31); v2 *= PRIME64_1; h64 ^= v2;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+
+ v3 *= PRIME64_2; v3 = rotateLeft(v3, 31); v3 *= PRIME64_1; h64 ^= v3;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+
+ v4 *= PRIME64_2; v4 = rotateLeft(v4, 31); v4 *= PRIME64_1; h64 ^= v4;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += totalLen;
+
+ int off = 0;
+ while (off <= memSize - 8) {
+ long k1 = readLongLE(memory, off);
+ k1 *= PRIME64_2; k1 = rotateLeft(k1, 31); k1 *= PRIME64_1; h64 ^= k1;
+ h64 = rotateLeft(h64, 27) * PRIME64_1 + PRIME64_4;
+ off += 8;
+ }
+
+ if (off <= memSize - 4) {
+ h64 ^= (readIntLE(memory, off) & 0xFFFFFFFFL) * PRIME64_1;
+ h64 = rotateLeft(h64, 23) * PRIME64_2 + PRIME64_3;
+ off += 4;
+ }
+
+ while (off < memSize) {
+ h64 ^= (memory[off] & 0xFF) * PRIME64_5;
+ h64 = rotateLeft(h64, 11) * PRIME64_1;
+ ++off;
+ }
+
+ h64 ^= h64 >>> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >>> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >>> 32;
+
+ return h64;
+ }
+
+ @Override
+ public void update(byte[] buf, int off, int len) {
+ checkRange(buf, off, len);
+
+ totalLen += len;
+
+ if (memSize + len < 32) { // fill in tmp buffer
+ System.arraycopy(buf, off, memory, memSize, len);
+ memSize += len;
+ return;
+ }
+
+ final int end = off + len;
+
+ if (memSize > 0) { // data left from previous update
+ System.arraycopy(buf, off, memory, memSize, 32 - memSize);
+
+ v1 += readLongLE(memory, 0) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+
+ v2 += readLongLE(memory, 8) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+
+ v3 += readLongLE(memory, 16) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+
+ v4 += readLongLE(memory, 24) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+
+ off += 32 - memSize;
+ memSize = 0;
+ }
+
+ {
+ final int limit = end - 32;
+ long v1 = this.v1;
+ long v2 = this.v2;
+ long v3 = this.v3;
+ long v4 = this.v4;
+
+ while (off <= limit) {
+ v1 += readLongLE(buf, off) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+ off += 8;
+
+ v2 += readLongLE(buf, off) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+ off += 8;
+
+ v3 += readLongLE(buf, off) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+ off += 8;
+
+ v4 += readLongLE(buf, off) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+ off += 8;
+ }
+
+ this.v1 = v1;
+ this.v2 = v2;
+ this.v3 = v3;
+ this.v4 = v4;
+ }
+
+ if (off < end) {
+ System.arraycopy(buf, off, memory, 0, end - off);
+ memSize = end - off;
+ }
+ }
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JavaUnsafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JavaUnsafe.java
new file mode 100644
index 000000000..2042bea9d
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/StreamingXXHash64JavaUnsafe.java
@@ -0,0 +1,166 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static com.fr.third.net.jpountz.util.UnsafeUtils.*;
+import static com.fr.third.net.jpountz.util.SafeUtils.checkRange;
+import static java.lang.Long.rotateLeft;
+
+/**
+ * Streaming xxhash.
+ */
+final class StreamingXXHash64JavaUnsafe extends AbstractStreamingXXHash64Java {
+
+ static class Factory implements StreamingXXHash64.Factory {
+
+ public static final StreamingXXHash64.Factory INSTANCE = new Factory();
+
+ @Override
+ public StreamingXXHash64 newStreamingHash(long seed) {
+ return new StreamingXXHash64JavaUnsafe(seed);
+ }
+
+ }
+
+ StreamingXXHash64JavaUnsafe(long seed) {
+ super(seed);
+ }
+
+ @Override
+ public long getValue() {
+ long h64;
+ if (totalLen >= 32) {
+ long v1 = this.v1;
+ long v2 = this.v2;
+ long v3 = this.v3;
+ long v4 = this.v4;
+
+ h64 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ v1 *= PRIME64_2; v1 = rotateLeft(v1, 31); v1 *= PRIME64_1; h64 ^= v1;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+
+ v2 *= PRIME64_2; v2 = rotateLeft(v2, 31); v2 *= PRIME64_1; h64 ^= v2;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+
+ v3 *= PRIME64_2; v3 = rotateLeft(v3, 31); v3 *= PRIME64_1; h64 ^= v3;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+
+ v4 *= PRIME64_2; v4 = rotateLeft(v4, 31); v4 *= PRIME64_1; h64 ^= v4;
+ h64 = h64*PRIME64_1 + PRIME64_4;
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += totalLen;
+
+ int off = 0;
+ while (off <= memSize - 8) {
+ long k1 = readLongLE(memory, off);
+ k1 *= PRIME64_2; k1 = rotateLeft(k1, 31); k1 *= PRIME64_1; h64 ^= k1;
+ h64 = rotateLeft(h64, 27) * PRIME64_1 + PRIME64_4;
+ off += 8;
+ }
+
+ if (off <= memSize - 4) {
+ h64 ^= (readIntLE(memory, off) & 0xFFFFFFFFL) * PRIME64_1;
+ h64 = rotateLeft(h64, 23) * PRIME64_2 + PRIME64_3;
+ off += 4;
+ }
+
+ while (off < memSize) {
+ h64 ^= (memory[off] & 0xFF) * PRIME64_5;
+ h64 = rotateLeft(h64, 11) * PRIME64_1;
+ ++off;
+ }
+
+ h64 ^= h64 >>> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >>> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >>> 32;
+
+ return h64;
+ }
+
+ @Override
+ public void update(byte[] buf, int off, int len) {
+ checkRange(buf, off, len);
+
+ totalLen += len;
+
+ if (memSize + len < 32) { // fill in tmp buffer
+ System.arraycopy(buf, off, memory, memSize, len);
+ memSize += len;
+ return;
+ }
+
+ final int end = off + len;
+
+ if (memSize > 0) { // data left from previous update
+ System.arraycopy(buf, off, memory, memSize, 32 - memSize);
+
+ v1 += readLongLE(memory, 0) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+
+ v2 += readLongLE(memory, 8) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+
+ v3 += readLongLE(memory, 16) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+
+ v4 += readLongLE(memory, 24) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+
+ off += 32 - memSize;
+ memSize = 0;
+ }
+
+ {
+ final int limit = end - 32;
+ long v1 = this.v1;
+ long v2 = this.v2;
+ long v3 = this.v3;
+ long v4 = this.v4;
+
+ while (off <= limit) {
+ v1 += readLongLE(buf, off) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+ off += 8;
+
+ v2 += readLongLE(buf, off) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+ off += 8;
+
+ v3 += readLongLE(buf, off) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+ off += 8;
+
+ v4 += readLongLE(buf, off) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+ off += 8;
+ }
+
+ this.v1 = v1;
+ this.v2 = v2;
+ this.v3 = v3;
+ this.v4 = v4;
+ }
+
+ if (off < end) {
+ System.arraycopy(buf, off, memory, 0, end - off);
+ memSize = end - off;
+ }
+ }
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32.java
new file mode 100644
index 000000000..d40579796
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32.java
@@ -0,0 +1,71 @@
+package com.fr.third.net.jpountz.xxhash;
+
+import java.nio.ByteBuffer;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A 32-bits hash.
+ *
+ * Instances of this class are thread-safe.
+ */
+public abstract class XXHash32 {
+
+ /**
+ * Computes the 32-bits hash of
+ * Instances of this class are thread-safe.
+ */
+public abstract class XXHash64 {
+
+ /**
+ * Computes the 64-bits hash of
+ * This class has 3 instances
+ * Only the {@link #safeInstance() safe instance} is guaranteed to work on your
+ * JVM, as a consequence it is advised to use the {@link #fastestInstance()} or
+ * {@link #fastestJavaInstance()} to pull a {@link XXHashFactory} instance.
+ *
+ * All methods from this class are very costly, so you should get an instance
+ * once, and then reuse it whenever possible. This is typically done by storing
+ * a {@link XXHashFactory} instance in a static field.
+ */
+public final class XXHashFactory {
+
+ private static XXHashFactory instance(String impl) {
+ try {
+ return new XXHashFactory(impl);
+ } catch (Exception e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ private static XXHashFactory NATIVE_INSTANCE,
+ JAVA_UNSAFE_INSTANCE,
+ JAVA_SAFE_INSTANCE;
+
+ /**
+ * Returns a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * are native bindings to the original C API.
+ *
+ * Please note that this instance has some traps you should be aware of:
+ * Please read {@link #nativeInstance() javadocs of nativeInstance()} before
+ * using this method.
+ *
+ * @return the fastest available {@link XXHashFactory} instance.
+ */
+ public static XXHashFactory fastestInstance() {
+ if (Native.isLoaded()
+ || Native.class.getClassLoader() == ClassLoader.getSystemClassLoader()) {
+ try {
+ return nativeInstance();
+ } catch (Throwable t) {
+ return fastestJavaInstance();
+ }
+ } else {
+ return fastestJavaInstance();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static xxhash hashing. This package supports both block hashing via
+{@link net.jpountz.xxhash.XXHash32} and streaming hashing via
+{@link net.jpountz.xxhash.StreamingXXHash32}. Have a look at
+{@link net.jpountz.xxhash.XXHashFactory} to know how to get instances of these
+interfaces. Streaming hashing is a little slower but doesn't require to load the whole
+stream into memory. Sample block usage: Sample streaming usage:
+ *
+ *
+ * @param compressionLevel the compression level between [1, 17]; the higher the level, the higher the compression ratio
+ * @return a {@link LZ4Compressor} which requires more memory than
+ * {@link #fastCompressor()} and is slower but compresses more efficiently.
+ */
+ public LZ4Compressor highCompressor(int compressionLevel) {
+ if(compressionLevel > LZ4Constants.MAX_COMPRESSION_LEVEL) {
+ compressionLevel = LZ4Constants.MAX_COMPRESSION_LEVEL;
+ } else if (compressionLevel < 1) {
+ compressionLevel = LZ4Constants.DEFAULT_COMPRESSION_LEVEL;
+ }
+ return highCompressors[compressionLevel];
+ }
+
+ /**
+ * Returns a {@link LZ4FastDecompressor} instance.
+ *
+ * @return a {@link LZ4FastDecompressor} instance
+ */
+ public LZ4FastDecompressor fastDecompressor() {
+ return fastDecompressor;
+ }
+
+ /**
+ * Returns a {@link LZ4SafeDecompressor} instance.
+ *
+ * @return a {@link LZ4SafeDecompressor} instance
+ */
+ public LZ4SafeDecompressor safeDecompressor() {
+ return safeDecompressor;
+ }
+
+ /**
+ * Returns a {@link LZ4UnknownSizeDecompressor} instance.
+ * @deprecated use {@link #safeDecompressor()}
+ *
+ * @return a {@link LZ4UnknownSizeDecompressor} instance
+ */
+ public LZ4UnknownSizeDecompressor unknownSizeDecompressor() {
+ return safeDecompressor();
+ }
+
+ /**
+ * Returns a {@link LZ4Decompressor} instance.
+ * @deprecated use {@link #fastDecompressor()}
+ *
+ * @return a {@link LZ4Decompressor} instance
+ */
+ public LZ4Decompressor decompressor() {
+ return fastDecompressor();
+ }
+
+ /**
+ * Prints the fastest instance.
+ *
+ * @param args no argument required
+ */
+ public static void main(String[] args) {
+ System.out.println("Fastest instance is " + fastestInstance());
+ System.out.println("Fastest Java instance is " + fastestJavaInstance());
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + ":" + impl;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FastDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FastDecompressor.java
new file mode 100644
index 000000000..88e9559c6
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FastDecompressor.java
@@ -0,0 +1,135 @@
+package com.fr.third.net.jpountz.lz4;
+
+import java.nio.ByteBuffer;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * LZ4 decompressor that requires the size of the original input to be known.
+ * Use {@link LZ4SafeDecompressor} if you only know the size of the
+ * compressed stream.
+ * src[srcOff:]
into dest[destOff:destOff+destLen]
+ * and returns the number of bytes read from src
.
+ * destLen
must be exactly the size of the decompressed data.
+ *
+ * @param src the compressed data
+ * @param srcOff the start offset in src
+ * @param dest the destination buffer to store the decompressed data
+ * @param destOff the start offset in dest
+ * @param destLen the exact size of the original input
+ * @return the number of bytes read to restore the original input
+ */
+ public abstract int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen);
+
+ /** Decompresses src[srcOff:]
into dest[destOff:destOff+destLen]
+ * and returns the number of bytes read from src
.
+ * destLen
must be exactly the size of the decompressed data.
+ * The positions and limits of the {@link ByteBuffer}s remain unchanged.
+ *
+ * @param src the compressed data
+ * @param srcOff the start offset in src
+ * @param dest the destination buffer to store the decompressed data
+ * @param destOff the start offset in dest
+ * @param destLen the exact size of the original input
+ * @return the number of bytes read to restore the original input
+ */
+ public abstract int decompress(ByteBuffer src, int srcOff, ByteBuffer dest, int destOff, int destLen);
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, byte[], int, int) decompress(src, 0, dest, 0, destLen)}.
+ *
+ * @param src the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ * @param destLen the exact size of the original input
+ * @return the number of bytes read to restore the original input
+ */
+ public final int decompress(byte[] src, byte[] dest, int destLen) {
+ return decompress(src, 0, dest, 0, destLen);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], byte[], int) decompress(src, dest, dest.length)}.
+ *
+ * @param src the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ * @return the number of bytes read to restore the original input
+ */
+ public final int decompress(byte[] src, byte[] dest) {
+ return decompress(src, dest, dest.length);
+ }
+
+ /**
+ * Convenience method which returns src[srcOff:?]
+ * decompressed.
+ *
+ * final byte[] decompressed = new byte[destLen];
+ * decompress(src, srcOff, decompressed, 0, destLen);
+ * return decompressed;
+ *
+ *
+ * @param src the compressed data
+ * @param srcOff the start offset in src
+ * @param destLen the exact size of the original input
+ * @return the decompressed data
+ */
+ public final byte[] decompress(byte[] src, int srcOff, int destLen) {
+ final byte[] decompressed = new byte[destLen];
+ decompress(src, srcOff, decompressed, 0, destLen);
+ return decompressed;
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int) decompress(src, 0, destLen)}.
+ *
+ * @param src the compressed data
+ * @param destLen the exact size of the original input
+ * @return the decompressed data
+ */
+ public final byte[] decompress(byte[] src, int destLen) {
+ return decompress(src, 0, destLen);
+ }
+
+ /**
+ * Decompresses src
into dest
. dest
's
+ * {@link ByteBuffer#remaining()} must be exactly the size of the decompressed
+ * data. This method moves the positions of the buffers.
+ *
+ * @param src the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ */
+ public final void decompress(ByteBuffer src, ByteBuffer dest) {
+ final int read = decompress(src, src.position(), dest, dest.position(), dest.remaining());
+ dest.position(dest.limit());
+ src.position(src.position() + read);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FrameInputStream.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FrameInputStream.java
new file mode 100644
index 000000000..dda725536
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4FrameInputStream.java
@@ -0,0 +1,351 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.fr.third.net.jpountz.xxhash.XXHash32;
+import com.fr.third.net.jpountz.xxhash.XXHashFactory;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.Locale;
+
+/**
+ * Implementation of the v1.5.1 LZ4 Frame format. This class is NOT thread safe.
+ *
+ *
+ *
+ *
+ * src[srcOff:srcOff+srcLen]
into
+ * dest[destOff:destOff+maxDestLen]
and returns the number of
+ * decompressed bytes written into dest
.
+ *
+ * @param src the compressed data
+ * @param srcOff the start offset in src
+ * @param srcLen the exact size of the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ * @param destOff the start offset in dest
+ * @param maxDestLen the maximum number of bytes to write in dest
+ * @return the original input size
+ * @throws LZ4Exception if maxDestLen is too small
+ */
+ public abstract int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+
+ /**
+ * Decompresses src[srcOff:srcOff+srcLen]
into
+ * dest[destOff:destOff+maxDestLen]
and returns the number of
+ * decompressed bytes written into dest
.
+ *
+ * @param src the compressed data
+ * @param srcOff the start offset in src
+ * @param srcLen the exact size of the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ * @param destOff the start offset in dest
+ * @param maxDestLen the maximum number of bytes to write in dest
+ * @return the original input size
+ * @throws LZ4Exception if maxDestLen is too small
+ */
+ public abstract int decompress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen);
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int, byte[], int, int) decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}.
+ *
+ * @param src the compressed data
+ * @param srcOff the start offset in src
+ * @param srcLen the exact size of the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ * @param destOff the start offset in dest
+ * @return the original input size
+ * @throws LZ4Exception if dest is too small
+ */
+ public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) {
+ return decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff);
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int, byte[], int) decompress(src, 0, src.length, dest, 0)}
+ *
+ * @param src the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ * @return the original input size
+ * @throws LZ4Exception if dest is too small
+ */
+ public final int decompress(byte[] src, byte[] dest) {
+ return decompress(src, 0, src.length, dest, 0);
+ }
+
+ /**
+ * Convenience method which returns src[srcOff:srcOff+srcLen]
+ * decompressed.
+ *
+ * byte[] decompressed = new byte[maxDestLen];
+ * final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen);
+ * if (decompressedLength != decompressed.length) {
+ * decompressed = Arrays.copyOf(decompressed, decompressedLength);
+ * }
+ * return decompressed;
+ *
+ *
+ * @param src the compressed data
+ * @param srcOff the start offset in src
+ * @param srcLen the exact size of the compressed data
+ * @param maxDestLen the maximum number of bytes to write in dest
+ * @return the decompressed data
+ * @throws LZ4Exception if maxDestLen is too small
+ */
+ public final byte[] decompress(byte[] src, int srcOff, int srcLen, int maxDestLen) {
+ byte[] decompressed = new byte[maxDestLen];
+ final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen);
+ if (decompressedLength != decompressed.length) {
+ decompressed = Arrays.copyOf(decompressed, decompressedLength);
+ }
+ return decompressed;
+ }
+
+ /**
+ * Convenience method, equivalent to calling
+ * {@link #decompress(byte[], int, int, int) decompress(src, 0, src.length, maxDestLen)}.
+ *
+ * @param src the compressed data
+ * @param maxDestLen the maximum number of bytes to write in dest
+ * @return the decompressed data
+ * @throws LZ4Exception if maxDestLen is too small
+ */
+ public final byte[] decompress(byte[] src, int maxDestLen) {
+ return decompress(src, 0, src.length, maxDestLen);
+ }
+
+ /**
+ * Decompresses src
into dest
. src
's
+ * {@link ByteBuffer#remaining()} must be exactly the size of the compressed
+ * data. This method moves the positions of the buffers.
+ * @param src the compressed data
+ * @param dest the destination buffer to store the decompressed data
+ * @throws LZ4Exception if dest is too small
+ */
+ public final void decompress(ByteBuffer src, ByteBuffer dest) {
+ final int decompressed = decompress(src, src.position(), src.remaining(), dest, dest.position(), dest.remaining());
+ src.position(src.limit());
+ dest.position(dest.position() + decompressed);
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4SafeUtils.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4SafeUtils.java
new file mode 100644
index 000000000..b00f99af2
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4SafeUtils.java
@@ -0,0 +1,179 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.LAST_LITERALS;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.ML_BITS;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.ML_MASK;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.RUN_MASK;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+enum LZ4SafeUtils {
+ ;
+
+ static int hash(byte[] buf, int i) {
+ return LZ4Utils.hash(SafeUtils.readInt(buf, i));
+ }
+
+ static int hash64k(byte[] buf, int i) {
+ return LZ4Utils.hash64k(SafeUtils.readInt(buf, i));
+ }
+
+ static boolean readIntEquals(byte[] buf, int i, int j) {
+ return buf[i] == buf[j] && buf[i+1] == buf[j+1] && buf[i+2] == buf[j+2] && buf[i+3] == buf[j+3];
+ }
+
+ static void safeIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchLen) {
+ for (int i = 0; i < matchLen; ++i) {
+ dest[dOff + i] = dest[matchOff + i];
+ }
+ }
+
+ static void wildIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchCopyEnd) {
+ do {
+ copy8Bytes(dest, matchOff, dest, dOff);
+ matchOff += 8;
+ dOff += 8;
+ } while (dOff < matchCopyEnd);
+ }
+
+ static void copy8Bytes(byte[] src, int sOff, byte[] dest, int dOff) {
+ for (int i = 0; i < 8; ++i) {
+ dest[dOff + i] = src[sOff + i];
+ }
+ }
+
+ static int commonBytes(byte[] b, int o1, int o2, int limit) {
+ int count = 0;
+ while (o2 < limit && b[o1++] == b[o2++]) {
+ ++count;
+ }
+ return count;
+ }
+
+ static int commonBytesBackward(byte[] b, int o1, int o2, int l1, int l2) {
+ int count = 0;
+ while (o1 > l1 && o2 > l2 && b[--o1] == b[--o2]) {
+ ++count;
+ }
+ return count;
+ }
+
+ static void safeArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) {
+ System.arraycopy(src, sOff, dest, dOff, len);
+ }
+
+ static void wildArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) {
+ try {
+ for (int i = 0; i < len; i += 8) {
+ copy8Bytes(src, sOff + i, dest, dOff + i);
+ }
+ } catch (ArrayIndexOutOfBoundsException e) {
+ throw new LZ4Exception("Malformed input at offset " + sOff);
+ }
+ }
+
+ static int encodeSequence(byte[] src, int anchor, int matchOff, int matchRef, int matchLen, byte[] dest, int dOff, int destEnd) {
+ final int runLen = matchOff - anchor;
+ final int tokenOff = dOff++;
+
+ if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+
+ int token;
+ if (runLen >= RUN_MASK) {
+ token = (byte) (RUN_MASK << ML_BITS);
+ dOff = writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ token = runLen << ML_BITS;
+ }
+
+ // copy literals
+ wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ // encode offset
+ final int matchDec = matchOff - matchRef;
+ dest[dOff++] = (byte) matchDec;
+ dest[dOff++] = (byte) (matchDec >>> 8);
+
+ // encode match len
+ matchLen -= 4;
+ if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ if (matchLen >= ML_MASK) {
+ token |= ML_MASK;
+ dOff = writeLen(matchLen - RUN_MASK, dest, dOff);
+ } else {
+ token |= matchLen;
+ }
+
+ dest[tokenOff] = (byte) token;
+
+ return dOff;
+ }
+
+ static int lastLiterals(byte[] src, int sOff, int srcLen, byte[] dest, int dOff, int destEnd) {
+ final int runLen = srcLen;
+
+ if (dOff + runLen + 1 + (runLen + 255 - RUN_MASK) / 255 > destEnd) {
+ throw new LZ4Exception();
+ }
+
+ if (runLen >= RUN_MASK) {
+ dest[dOff++] = (byte) (RUN_MASK << ML_BITS);
+ dOff = writeLen(runLen - RUN_MASK, dest, dOff);
+ } else {
+ dest[dOff++] = (byte) (runLen << ML_BITS);
+ }
+ // copy literals
+ System.arraycopy(src, sOff, dest, dOff, runLen);
+ dOff += runLen;
+
+ return dOff;
+ }
+
+ static int writeLen(int len, byte[] dest, int dOff) {
+ while (len >= 0xFF) {
+ dest[dOff++] = (byte) 0xFF;
+ len -= 0xFF;
+ }
+ dest[dOff++] = (byte) len;
+ return dOff;
+ }
+
+ static class Match {
+ int start, ref, len;
+
+ void fix(int correction) {
+ start += correction;
+ ref += correction;
+ len -= correction;
+ }
+
+ int end() {
+ return start + len;
+ }
+ }
+
+ static void copyTo(Match m1, Match m2) {
+ m2.len = m1.len;
+ m2.start = m1.start;
+ m2.ref = m1.ref;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4UnknownSizeDecompressor.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4UnknownSizeDecompressor.java
new file mode 100644
index 000000000..c8bcdde96
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4UnknownSizeDecompressor.java
@@ -0,0 +1,27 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @deprecated Use {@link LZ4SafeDecompressor} instead.
+ */
+@Deprecated
+public interface LZ4UnknownSizeDecompressor {
+
+ int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen);
+
+ int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff);
+
+}
\ No newline at end of file
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4UnsafeUtils.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4UnsafeUtils.java
new file mode 100644
index 000000000..4e6f521cf
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4UnsafeUtils.java
@@ -0,0 +1,200 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.fr.third.net.jpountz.util.UnsafeUtils;
+import com.fr.third.net.jpountz.util.Utils;
+
+import static com.fr.third.net.jpountz.util.UnsafeUtils.readInt;
+import static com.fr.third.net.jpountz.util.UnsafeUtils.readShort;
+import static com.fr.third.net.jpountz.util.UnsafeUtils.writeByte;
+import static com.fr.third.net.jpountz.util.UnsafeUtils.writeInt;
+import static com.fr.third.net.jpountz.util.UnsafeUtils.writeShort;
+
+import java.nio.ByteOrder;
+
+enum LZ4UnsafeUtils {
+ ;
+
+ static void safeArraycopy(byte[] src, int srcOff, byte[] dest, int destOff, int len) {
+ final int fastLen = len & 0xFFFFFFF8;
+ wildArraycopy(src, srcOff, dest, destOff, fastLen);
+ for (int i = 0, slowLen = len & 0x7; i < slowLen; i += 1) {
+ UnsafeUtils.writeByte(dest, destOff + fastLen + i, UnsafeUtils.readByte(src, srcOff + fastLen + i));
+ }
+ }
+
+ static void wildArraycopy(byte[] src, int srcOff, byte[] dest, int destOff, int len) {
+ for (int i = 0; i < len; i += 8) {
+ UnsafeUtils.writeLong(dest, destOff + i, UnsafeUtils.readLong(src, srcOff + i));
+ }
+ }
+
+ static void wildIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchCopyEnd) {
+ if (dOff - matchOff < 4) {
+ for (int i = 0; i < 4; ++i) {
+ UnsafeUtils.writeByte(dest, dOff+i, UnsafeUtils.readByte(dest, matchOff+i));
+ }
+ dOff += 4;
+ matchOff += 4;
+ int dec = 0;
+ assert dOff >= matchOff && dOff - matchOff < 8;
+ switch (dOff - matchOff) {
+ case 1:
+ matchOff -= 3;
+ break;
+ case 2:
+ matchOff -= 2;
+ break;
+ case 3:
+ matchOff -= 3;
+ dec = -1;
+ break;
+ case 5:
+ dec = 1;
+ break;
+ case 6:
+ dec = 2;
+ break;
+ case 7:
+ dec = 3;
+ break;
+ default:
+ break;
+ }
+ UnsafeUtils.writeInt(dest, dOff, UnsafeUtils.readInt(dest, matchOff));
+ dOff += 4;
+ matchOff -= dec;
+ } else if (dOff - matchOff < LZ4Constants.COPY_LENGTH) {
+ UnsafeUtils.writeLong(dest, dOff, UnsafeUtils.readLong(dest, matchOff));
+ dOff += dOff - matchOff;
+ }
+ while (dOff < matchCopyEnd) {
+ UnsafeUtils.writeLong(dest, dOff, UnsafeUtils.readLong(dest, matchOff));
+ dOff += 8;
+ matchOff += 8;
+ }
+ }
+
+ static void safeIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchLen) {
+ for (int i = 0; i < matchLen; ++i) {
+ dest[dOff + i] = dest[matchOff + i];
+ UnsafeUtils.writeByte(dest, dOff + i, UnsafeUtils.readByte(dest, matchOff + i));
+ }
+ }
+
+ static int readShortLittleEndian(byte[] src, int srcOff) {
+ short s = UnsafeUtils.readShort(src, srcOff);
+ if (Utils.NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ s = Short.reverseBytes(s);
+ }
+ return s & 0xFFFF;
+ }
+
+ static void writeShortLittleEndian(byte[] dest, int destOff, int value) {
+ short s = (short) value;
+ if (Utils.NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ s = Short.reverseBytes(s);
+ }
+ UnsafeUtils.writeShort(dest, destOff, s);
+ }
+
+ static boolean readIntEquals(byte[] src, int ref, int sOff) {
+ return UnsafeUtils.readInt(src, ref) == UnsafeUtils.readInt(src, sOff);
+ }
+
+ static int commonBytes(byte[] src, int ref, int sOff, int srcLimit) {
+ int matchLen = 0;
+ while (sOff <= srcLimit - 8) {
+ if (UnsafeUtils.readLong(src, sOff) == UnsafeUtils.readLong(src, ref)) {
+ matchLen += 8;
+ ref += 8;
+ sOff += 8;
+ } else {
+ final int zeroBits;
+ if (Utils.NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ zeroBits = Long.numberOfLeadingZeros(UnsafeUtils.readLong(src, sOff) ^ UnsafeUtils.readLong(src, ref));
+ } else {
+ zeroBits = Long.numberOfTrailingZeros(UnsafeUtils.readLong(src, sOff) ^ UnsafeUtils.readLong(src, ref));
+ }
+ return matchLen + (zeroBits >>> 3);
+ }
+ }
+ while (sOff < srcLimit && UnsafeUtils.readByte(src, ref++) == UnsafeUtils.readByte(src, sOff++)) {
+ ++matchLen;
+ }
+ return matchLen;
+ }
+
+ static int writeLen(int len, byte[] dest, int dOff) {
+ while (len >= 0xFF) {
+ UnsafeUtils.writeByte(dest, dOff++, 0xFF);
+ len -= 0xFF;
+ }
+ UnsafeUtils.writeByte(dest, dOff++, len);
+ return dOff;
+ }
+
+ static int encodeSequence(byte[] src, int anchor, int matchOff, int matchRef, int matchLen, byte[] dest, int dOff, int destEnd) {
+ final int runLen = matchOff - anchor;
+ final int tokenOff = dOff++;
+ int token;
+
+ if (runLen >= LZ4Constants.RUN_MASK) {
+ token = (byte) (LZ4Constants.RUN_MASK << LZ4Constants.ML_BITS);
+ dOff = writeLen(runLen - LZ4Constants.RUN_MASK, dest, dOff);
+ } else {
+ token = runLen << LZ4Constants.ML_BITS;
+ }
+
+ // copy literals
+ wildArraycopy(src, anchor, dest, dOff, runLen);
+ dOff += runLen;
+
+ // encode offset
+ final int matchDec = matchOff - matchRef;
+ dest[dOff++] = (byte) matchDec;
+ dest[dOff++] = (byte) (matchDec >>> 8);
+
+ // encode match len
+ matchLen -= 4;
+ if (dOff + (1 + LZ4Constants.LAST_LITERALS) + (matchLen >>> 8) > destEnd) {
+ throw new LZ4Exception("maxDestLen is too small");
+ }
+ if (matchLen >= LZ4Constants.ML_MASK) {
+ token |= LZ4Constants.ML_MASK;
+ dOff = writeLen(matchLen - LZ4Constants.RUN_MASK, dest, dOff);
+ } else {
+ token |= matchLen;
+ }
+
+ dest[tokenOff] = (byte) token;
+
+ return dOff;
+ }
+
+ static int commonBytesBackward(byte[] b, int o1, int o2, int l1, int l2) {
+ int count = 0;
+ while (o1 > l1 && o2 > l2 && UnsafeUtils.readByte(b, --o1) == UnsafeUtils.readByte(b, --o2)) {
+ ++count;
+ }
+ return count;
+ }
+
+ static int lastLiterals(byte[] src, int sOff, int srcLen, byte[] dest, int dOff, int destEnd) {
+ return LZ4SafeUtils.lastLiterals(src, sOff, srcLen, dest, dOff, destEnd);
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Utils.java b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Utils.java
new file mode 100644
index 000000000..ee2301242
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/LZ4Utils.java
@@ -0,0 +1,68 @@
+package com.fr.third.net.jpountz.lz4;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.HASH_LOG;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.HASH_LOG_64K;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.HASH_LOG_HC;
+import static com.fr.third.net.jpountz.lz4.LZ4Constants.MIN_MATCH;
+
+enum LZ4Utils {
+ ;
+
+ private static final int MAX_INPUT_SIZE = 0x7E000000;
+
+ static int maxCompressedLength(int length) {
+ if (length < 0) {
+ throw new IllegalArgumentException("length must be >= 0, got " + length);
+ } else if (length >= MAX_INPUT_SIZE) {
+ throw new IllegalArgumentException("length must be < " + MAX_INPUT_SIZE);
+ }
+ return length + length / 255 + 16;
+ }
+
+ static int hash(int i) {
+ return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG);
+ }
+
+ static int hash64k(int i) {
+ return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_64K);
+ }
+
+ static int hashHC(int i) {
+ return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_HC);
+ }
+
+ static class Match {
+ int start, ref, len;
+
+ void fix(int correction) {
+ start += correction;
+ ref += correction;
+ len -= correction;
+ }
+
+ int end() {
+ return start + len;
+ }
+ }
+
+ static void copyTo(Match m1, Match m2) {
+ m2.len = m1.len;
+ m2.start = m1.start;
+ m2.ref = m1.ref;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/lz4/package.html b/fine-lz4/src/com/fr/third/net/jpountz/lz4/package.html
new file mode 100644
index 000000000..332673e08
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/lz4/package.html
@@ -0,0 +1,55 @@
+
+
+
+
+
+
+
+
+ LZ4Factory factory = LZ4Factory.fastestInstance();
+
+ byte[] data = "12345345234572".getBytes("UTF-8");
+ final int decompressedLength = data.length;
+
+ // compress data
+ LZ4Compressor compressor = factory.fastCompressor();
+ int maxCompressedLength = compressor.maxCompressedLength(decompressedLength);
+ byte[] compressed = new byte[maxCompressedLength];
+ int compressedLength = compressor.compress(data, 0, decompressedLength, compressed, 0, maxCompressedLength);
+
+ // decompress data
+ // - method 1: when the decompressed length is known
+ LZ4FastDecompressor decompressor = factory.fastDecompressor();
+ byte[] restored = new byte[decompressedLength];
+ int compressedLength2 = decompressor.decompress(compressed, 0, restored, 0, decompressedLength);
+ // compressedLength == compressedLength2
+
+ // - method 2: when the compressed length is known (a little slower)
+ // the destination buffer needs to be over-sized
+ LZ4SafeDecompressor decompressor2 = factory.safeDecompressor();
+ int decompressedLength2 = decompressor2.decompress(compressed, 0, compressedLength, restored, 0);
+ // decompressedLength == decompressedLength2
+
+
+
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/util/ByteBufferUtils.java b/fine-lz4/src/com/fr/third/net/jpountz/util/ByteBufferUtils.java
new file mode 100644
index 000000000..291f38b92
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/util/ByteBufferUtils.java
@@ -0,0 +1,92 @@
+package com.fr.third.net.jpountz.util;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.ReadOnlyBufferException;
+
+public enum ByteBufferUtils {
+ ;
+
+ public static void checkRange(ByteBuffer buf, int off, int len) {
+ SafeUtils.checkLength(len);
+ if (len > 0) {
+ checkRange(buf, off);
+ checkRange(buf, off + len - 1);
+ }
+ }
+
+ public static void checkRange(ByteBuffer buf, int off) {
+ if (off < 0 || off >= buf.capacity()) {
+ throw new ArrayIndexOutOfBoundsException(off);
+ }
+ }
+
+ public static ByteBuffer inLittleEndianOrder(ByteBuffer buf) {
+ if (buf.order().equals(ByteOrder.LITTLE_ENDIAN)) {
+ return buf;
+ } else {
+ return buf.duplicate().order(ByteOrder.LITTLE_ENDIAN);
+ }
+ }
+
+ public static ByteBuffer inNativeByteOrder(ByteBuffer buf) {
+ if (buf.order().equals(Utils.NATIVE_BYTE_ORDER)) {
+ return buf;
+ } else {
+ return buf.duplicate().order(Utils.NATIVE_BYTE_ORDER);
+ }
+ }
+
+ public static byte readByte(ByteBuffer buf, int i) {
+ return buf.get(i);
+ }
+
+ public static void writeInt(ByteBuffer buf, int i, int v) {
+ assert buf.order() == Utils.NATIVE_BYTE_ORDER;
+ buf.putInt(i, v);
+ }
+
+ public static int readInt(ByteBuffer buf, int i) {
+ assert buf.order() == Utils.NATIVE_BYTE_ORDER;
+ return buf.getInt(i);
+ }
+
+ public static int readIntLE(ByteBuffer buf, int i) {
+ assert buf.order() == ByteOrder.LITTLE_ENDIAN;
+ return buf.getInt(i);
+ }
+
+ public static void writeLong(ByteBuffer buf, int i, long v) {
+ assert buf.order() == Utils.NATIVE_BYTE_ORDER;
+ buf.putLong(i, v);
+ }
+
+ public static long readLong(ByteBuffer buf, int i) {
+ assert buf.order() == Utils.NATIVE_BYTE_ORDER;
+ return buf.getLong(i);
+ }
+
+ public static long readLongLE(ByteBuffer buf, int i) {
+ assert buf.order() == ByteOrder.LITTLE_ENDIAN;
+ return buf.getLong(i);
+ }
+
+ public static void writeByte(ByteBuffer dest, int off, int i) {
+ dest.put(off, (byte) i);
+ }
+
+ public static void writeShortLE(ByteBuffer dest, int off, int i) {
+ dest.put(off, (byte) i);
+ dest.put(off + 1, (byte) (i >>> 8));
+ }
+
+ public static void checkNotReadOnly(ByteBuffer buffer) {
+ if (buffer.isReadOnly()) {
+ throw new ReadOnlyBufferException();
+ }
+ }
+
+ public static int readShortLE(ByteBuffer buf, int i) {
+ return (buf.get(i) & 0xFF) | ((buf.get(i+1) & 0xFF) << 8);
+ }
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/util/Native.java b/fine-lz4/src/com/fr/third/net/jpountz/util/Native.java
new file mode 100644
index 000000000..d2fada471
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/util/Native.java
@@ -0,0 +1,133 @@
+package com.fr.third.net.jpountz.util;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+/** FOR INTERNAL USE ONLY */
+public enum Native {
+ ;
+
+ private enum OS {
+ // Even on Windows, the default compiler from cpptasks (gcc) uses .so as a shared lib extension
+ WINDOWS("win32", "so"), LINUX("linux", "so"), MAC("darwin", "dylib"), SOLARIS("solaris", "so");
+ public final String name, libExtension;
+
+ private OS(String name, String libExtension) {
+ this.name = name;
+ this.libExtension = libExtension;
+ }
+ }
+
+ private static String arch() {
+ return System.getProperty("os.arch");
+ }
+
+ private static OS os() {
+ String osName = System.getProperty("os.name");
+ if (osName.contains("Linux")) {
+ return OS.LINUX;
+ } else if (osName.contains("Mac")) {
+ return OS.MAC;
+ } else if (osName.contains("Windows")) {
+ return OS.WINDOWS;
+ } else if (osName.contains("Solaris") || osName.contains("SunOS")) {
+ return OS.SOLARIS;
+ } else {
+ throw new UnsupportedOperationException("Unsupported operating system: "
+ + osName);
+ }
+ }
+
+ private static String resourceName() {
+ OS os = os();
+ String packagePrefix = Native.class.getPackage().getName().replace('.', '/');
+
+ return "/" + packagePrefix + "/" + os.name + "/" + arch() + "/liblz4-java." + os.libExtension;
+ }
+
+ private static boolean loaded = false;
+
+ public static synchronized boolean isLoaded() {
+ return loaded;
+ }
+
+ public static synchronized void load() {
+ if (loaded) {
+ return;
+ }
+
+ // Try to load lz4-java (liblz4-java.so on Linux) from the java.library.path.
+ try {
+ System.loadLibrary("lz4-java");
+ loaded = true;
+ return;
+ } catch (UnsatisfiedLinkError ex) {
+ // Doesn't exist, so proceed to loading bundled library.
+ }
+
+ String resourceName = resourceName();
+ InputStream is = Native.class.getResourceAsStream(resourceName);
+ if (is == null) {
+ throw new UnsupportedOperationException("Unsupported OS/arch, cannot find " + resourceName + ". Please try building from source.");
+ }
+ File tempLib;
+ try {
+ tempLib = File.createTempFile("liblz4-java", "." + os().libExtension);
+ // copy to tempLib
+ FileOutputStream out = new FileOutputStream(tempLib);
+ try {
+ byte[] buf = new byte[4096];
+ while (true) {
+ int read = is.read(buf);
+ if (read == -1) {
+ break;
+ }
+ out.write(buf, 0, read);
+ }
+ try {
+ out.close();
+ out = null;
+ } catch (IOException e) {
+ // ignore
+ }
+ System.load(tempLib.getAbsolutePath());
+ loaded = true;
+ } finally {
+ try {
+ if (out != null) {
+ out.close();
+ }
+ } catch (IOException e) {
+ // ignore
+ }
+ if (tempLib != null && tempLib.exists()) {
+ if (!loaded) {
+ tempLib.delete();
+ } else {
+ // try to delete on exit, does it work on Windows?
+ tempLib.deleteOnExit();
+ }
+ }
+ }
+ } catch (IOException e) {
+ throw new ExceptionInInitializerError("Cannot unpack liblz4-java");
+ }
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/util/SafeUtils.java b/fine-lz4/src/com/fr/third/net/jpountz/util/SafeUtils.java
new file mode 100644
index 000000000..3a572d8c5
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/util/SafeUtils.java
@@ -0,0 +1,95 @@
+package com.fr.third.net.jpountz.util;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteOrder;
+
+public enum SafeUtils {
+ ;
+
+ public static void checkRange(byte[] buf, int off) {
+ if (off < 0 || off >= buf.length) {
+ throw new ArrayIndexOutOfBoundsException(off);
+ }
+ }
+
+ public static void checkRange(byte[] buf, int off, int len) {
+ checkLength(len);
+ if (len > 0) {
+ checkRange(buf, off);
+ checkRange(buf, off + len - 1);
+ }
+ }
+
+ public static void checkLength(int len) {
+ if (len < 0) {
+ throw new IllegalArgumentException("lengths must be >= 0");
+ }
+ }
+
+ public static byte readByte(byte[] buf, int i) {
+ return buf[i];
+ }
+
+ public static int readIntBE(byte[] buf, int i) {
+ return ((buf[i] & 0xFF) << 24) | ((buf[i+1] & 0xFF) << 16) | ((buf[i+2] & 0xFF) << 8) | (buf[i+3] & 0xFF);
+ }
+
+ public static int readIntLE(byte[] buf, int i) {
+ return (buf[i] & 0xFF) | ((buf[i+1] & 0xFF) << 8) | ((buf[i+2] & 0xFF) << 16) | ((buf[i+3] & 0xFF) << 24);
+ }
+
+ public static int readInt(byte[] buf, int i) {
+ if (Utils.NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ return readIntBE(buf, i);
+ } else {
+ return readIntLE(buf, i);
+ }
+ }
+
+ public static long readLongLE(byte[] buf, int i) {
+ return (buf[i] & 0xFFL) | ((buf[i+1] & 0xFFL) << 8) | ((buf[i+2] & 0xFFL) << 16) | ((buf[i+3] & 0xFFL) << 24)
+ | ((buf[i+4] & 0xFFL) << 32) | ((buf[i+5] & 0xFFL) << 40) | ((buf[i+6] & 0xFFL) << 48) | ((buf[i+7] & 0xFFL) << 56);
+ }
+
+ public static void writeShortLE(byte[] buf, int off, int v) {
+ buf[off++] = (byte) v;
+ buf[off++] = (byte) (v >>> 8);
+ }
+
+ public static void writeInt(int[] buf, int off, int v) {
+ buf[off] = v;
+ }
+
+ public static int readInt(int[] buf, int off) {
+ return buf[off];
+ }
+
+ public static void writeByte(byte[] dest, int off, int i) {
+ dest[off] = (byte) i;
+ }
+
+ public static void writeShort(short[] buf, int off, int v) {
+ buf[off] = (short) v;
+ }
+
+ public static int readShortLE(byte[] buf, int i) {
+ return (buf[i] & 0xFF) | ((buf[i+1] & 0xFF) << 8);
+ }
+
+ public static int readShort(short[] buf, int off) {
+ return buf[off] & 0xFFFF;
+ }
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/util/UnsafeUtils.java b/fine-lz4/src/com/fr/third/net/jpountz/util/UnsafeUtils.java
new file mode 100644
index 000000000..a494592ce
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/util/UnsafeUtils.java
@@ -0,0 +1,147 @@
+package com.fr.third.net.jpountz.util;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static com.fr.third.net.jpountz.util.Utils.NATIVE_BYTE_ORDER;
+
+import java.lang.reflect.Field;
+import java.nio.ByteOrder;
+
+import sun.misc.Unsafe;
+
+public enum UnsafeUtils {
+ ;
+
+ private static final Unsafe UNSAFE;
+ private static final long BYTE_ARRAY_OFFSET;
+ private static final int BYTE_ARRAY_SCALE;
+ private static final long INT_ARRAY_OFFSET;
+ private static final int INT_ARRAY_SCALE;
+ private static final long SHORT_ARRAY_OFFSET;
+ private static final int SHORT_ARRAY_SCALE;
+
+ static {
+ try {
+ Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
+ theUnsafe.setAccessible(true);
+ UNSAFE = (Unsafe) theUnsafe.get(null);
+ BYTE_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
+ BYTE_ARRAY_SCALE = UNSAFE.arrayIndexScale(byte[].class);
+ INT_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(int[].class);
+ INT_ARRAY_SCALE = UNSAFE.arrayIndexScale(int[].class);
+ SHORT_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(short[].class);
+ SHORT_ARRAY_SCALE = UNSAFE.arrayIndexScale(short[].class);
+ } catch (IllegalAccessException e) {
+ throw new ExceptionInInitializerError("Cannot access Unsafe");
+ } catch (NoSuchFieldException e) {
+ throw new ExceptionInInitializerError("Cannot access Unsafe");
+ } catch (SecurityException e) {
+ throw new ExceptionInInitializerError("Cannot access Unsafe");
+ }
+ }
+
+ public static void checkRange(byte[] buf, int off) {
+ SafeUtils.checkRange(buf, off);
+ }
+
+ public static void checkRange(byte[] buf, int off, int len) {
+ SafeUtils.checkRange(buf, off, len);
+ }
+
+ public static void checkLength(int len) {
+ SafeUtils.checkLength(len);
+ }
+
+ public static byte readByte(byte[] src, int srcOff) {
+ return UNSAFE.getByte(src, BYTE_ARRAY_OFFSET + BYTE_ARRAY_SCALE * srcOff);
+ }
+
+ public static void writeByte(byte[] src, int srcOff, byte value) {
+ UNSAFE.putByte(src, BYTE_ARRAY_OFFSET + BYTE_ARRAY_SCALE * srcOff, (byte) value);
+ }
+
+ public static void writeByte(byte[] src, int srcOff, int value) {
+ writeByte(src, srcOff, (byte) value);
+ }
+
+ public static long readLong(byte[] src, int srcOff) {
+ return UNSAFE.getLong(src, BYTE_ARRAY_OFFSET + srcOff);
+ }
+
+ public static long readLongLE(byte[] src, int srcOff) {
+ long i = readLong(src, srcOff);
+ if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ i = Long.reverseBytes(i);
+ }
+ return i;
+ }
+
+ public static void writeLong(byte[] dest, int destOff, long value) {
+ UNSAFE.putLong(dest, BYTE_ARRAY_OFFSET + destOff, value);
+ }
+
+ public static int readInt(byte[] src, int srcOff) {
+ return UNSAFE.getInt(src, BYTE_ARRAY_OFFSET + srcOff);
+ }
+
+ public static int readIntLE(byte[] src, int srcOff) {
+ int i = readInt(src, srcOff);
+ if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ i = Integer.reverseBytes(i);
+ }
+ return i;
+ }
+
+ public static void writeInt(byte[] dest, int destOff, int value) {
+ UNSAFE.putInt(dest, BYTE_ARRAY_OFFSET + destOff, value);
+ }
+
+ public static short readShort(byte[] src, int srcOff) {
+ return UNSAFE.getShort(src, BYTE_ARRAY_OFFSET + srcOff);
+ }
+
+ public static int readShortLE(byte[] src, int srcOff) {
+ short s = readShort(src, srcOff);
+ if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) {
+ s = Short.reverseBytes(s);
+ }
+ return s & 0xFFFF;
+ }
+
+ public static void writeShort(byte[] dest, int destOff, short value) {
+ UNSAFE.putShort(dest, BYTE_ARRAY_OFFSET + destOff, value);
+ }
+
+ public static void writeShortLE(byte[] buf, int off, int v) {
+ writeByte(buf, off, (byte) v);
+ writeByte(buf, off + 1, (byte) (v >>> 8));
+ }
+
+ public static int readInt(int[] src, int srcOff) {
+ return UNSAFE.getInt(src, INT_ARRAY_OFFSET + INT_ARRAY_SCALE * srcOff);
+ }
+
+ public static void writeInt(int[] dest, int destOff, int value) {
+ UNSAFE.putInt(dest, INT_ARRAY_OFFSET + INT_ARRAY_SCALE * destOff, value);
+ }
+
+ public static int readShort(short[] src, int srcOff) {
+ return UNSAFE.getShort(src, SHORT_ARRAY_OFFSET + SHORT_ARRAY_SCALE * srcOff) & 0xFFFF;
+ }
+
+ public static void writeShort(short[] dest, int destOff, int value) {
+ UNSAFE.putShort(dest, SHORT_ARRAY_OFFSET + SHORT_ARRAY_SCALE * destOff, (short) value);
+ }
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/util/Utils.java b/fine-lz4/src/com/fr/third/net/jpountz/util/Utils.java
new file mode 100644
index 000000000..9610862cf
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/util/Utils.java
@@ -0,0 +1,36 @@
+package com.fr.third.net.jpountz.util;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.nio.ByteOrder;
+
+public enum Utils {
+ ;
+
+ public static final ByteOrder NATIVE_BYTE_ORDER = ByteOrder.nativeOrder();
+
+ private static final boolean unalignedAccessAllowed;
+ static {
+ String arch = System.getProperty("os.arch");
+ unalignedAccessAllowed = arch.equals("i386") || arch.equals("x86")
+ || arch.equals("amd64") || arch.equals("x86_64")
+ || arch.equals("aarch64") || arch.equals("ppc64le");
+ }
+
+ public static boolean isUnalignedAccessAllowed() {
+ return unalignedAccessAllowed;
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/util/package.html b/fine-lz4/src/com/fr/third/net/jpountz/util/package.html
new file mode 100644
index 000000000..4b3ceb980
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/util/package.html
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
+
+ * int hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, int seed) {
+ * return xxhashFactory.hash32().hash(buf, off, len, seed);
+ * }
+ *
+ *
+ * int hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, int seed) {
+ * StreamingXXHash32 sh32 = xxhashFactory.newStreamingHash32(seed);
+ * sh32.update(buf, off, len);
+ * return sh32.getValue();
+ * }
+ *
+ *
+ * long hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, long seed) {
+ * return xxhashFactory.hash64().hash(buf, off, len, seed);
+ * }
+ *
+ *
+ * long hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, long seed) {
+ * StreamingXXHash64 sh64 = xxhashFactory.newStreamingHash64(seed);
+ * sh64.update(buf, off, len);
+ * return sh64.getValue();
+ * }
+ *
+ * buf[off:off+len]
using seed
+ * seed
.
+ *
+ * @param buf the input data
+ * @param off the start offset in buf
+ * @param len the number of bytes to hash
+ * @param seed the seed to use
+ * @return the hash value
+ */
+ public abstract int hash(byte[] buf, int off, int len, int seed);
+
+ /**
+ * Computes the hash of the given slice of the {@link ByteBuffer}.
+ * {@link ByteBuffer#position() position} and {@link ByteBuffer#limit() limit}
+ * are not modified.
+ *
+ * @param buf the input data
+ * @param off the start offset in buf
+ * @param len the number of bytes to hash
+ * @param seed the seed to use
+ * @return the hash value
+ */
+ public abstract int hash(ByteBuffer buf, int off, int len, int seed);
+
+ /**
+ * Computes the hash of the given {@link ByteBuffer}. The
+ * {@link ByteBuffer#position() position} is moved in order to reflect bytes
+ * which have been read.
+ *
+ * @param buf the input data
+ * @param seed the seed to use
+ * @return the hash value
+ */
+ public final int hash(ByteBuffer buf, int seed) {
+ final int hash = hash(buf, buf.position(), buf.remaining(), seed);
+ buf.position(buf.limit());
+ return hash;
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JNI.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JNI.java
new file mode 100644
index 000000000..144dd3026
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JNI.java
@@ -0,0 +1,52 @@
+package com.fr.third.net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.checkRange;
+import static com.fr.third.net.jpountz.util.SafeUtils.checkRange;
+
+import java.nio.ByteBuffer;
+
+final class XXHash32JNI extends XXHash32 {
+
+ public static final XXHash32 INSTANCE = new XXHash32JNI();
+ private static XXHash32 SAFE_INSTANCE;
+
+ @Override
+ public int hash(byte[] buf, int off, int len, int seed) {
+ SafeUtils.checkRange(buf, off, len);
+ return XXHashJNI.XXH32(buf, off, len, seed);
+ }
+
+ @Override
+ public int hash(ByteBuffer buf, int off, int len, int seed) {
+ if (buf.isDirect()) {
+ ByteBufferUtils.checkRange(buf, off, len);
+ return XXHashJNI.XXH32BB(buf, off, len, seed);
+ } else if (buf.hasArray()) {
+ return hash(buf.array(), off + buf.arrayOffset(), len, seed);
+ } else {
+ XXHash32 safeInstance = SAFE_INSTANCE;
+ if (safeInstance == null) {
+ safeInstance = SAFE_INSTANCE = XXHashFactory.safeInstance().hash32();
+ }
+ return safeInstance.hash(buf, off, len, seed);
+ }
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JavaSafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JavaSafe.java
new file mode 100644
index 000000000..e08754103
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JavaSafe.java
@@ -0,0 +1,154 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static java.lang.Integer.rotateLeft;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.SafeUtils;
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+
+/**
+ * {@link XXHash32} implementation.
+ */
+final class XXHash32JavaSafe extends XXHash32 {
+
+ public static final XXHash32 INSTANCE = new XXHash32JavaSafe();
+
+ @Override
+ public int hash(byte[] buf, int off, int len, int seed) {
+
+ SafeUtils.checkRange(buf, off, len);
+
+ final int end = off + len;
+ int h32;
+
+ if (len >= 16) {
+ final int limit = end - 16;
+ int v1 = seed + PRIME1 + PRIME2;
+ int v2 = seed + PRIME2;
+ int v3 = seed + 0;
+ int v4 = seed - PRIME1;
+ do {
+ v1 += SafeUtils.readIntLE(buf, off) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+ off += 4;
+
+ v2 += SafeUtils.readIntLE(buf, off) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+ off += 4;
+
+ v3 += SafeUtils.readIntLE(buf, off) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+ off += 4;
+
+ v4 += SafeUtils.readIntLE(buf, off) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+ off += 4;
+ } while (off <= limit);
+
+ h32 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+ } else {
+ h32 = seed + PRIME5;
+ }
+
+ h32 += len;
+
+ while (off <= end - 4) {
+ h32 += SafeUtils.readIntLE(buf, off) * PRIME3;
+ h32 = rotateLeft(h32, 17) * PRIME4;
+ off += 4;
+ }
+
+ while (off < end) {
+ h32 += (SafeUtils.readByte(buf, off) & 0xFF) * PRIME5;
+ h32 = rotateLeft(h32, 11) * PRIME1;
+ ++off;
+ }
+
+ h32 ^= h32 >>> 15;
+ h32 *= PRIME2;
+ h32 ^= h32 >>> 13;
+ h32 *= PRIME3;
+ h32 ^= h32 >>> 16;
+
+ return h32;
+ }
+
+ @Override
+ public int hash(ByteBuffer buf, int off, int len, int seed) {
+
+ if (buf.hasArray()) {
+ return hash(buf.array(), off + buf.arrayOffset(), len, seed);
+ }
+ ByteBufferUtils.checkRange(buf, off, len);
+ buf = ByteBufferUtils.inLittleEndianOrder(buf);
+
+ final int end = off + len;
+ int h32;
+
+ if (len >= 16) {
+ final int limit = end - 16;
+ int v1 = seed + PRIME1 + PRIME2;
+ int v2 = seed + PRIME2;
+ int v3 = seed + 0;
+ int v4 = seed - PRIME1;
+ do {
+ v1 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+ off += 4;
+
+ v2 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+ off += 4;
+
+ v3 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+ off += 4;
+
+ v4 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+ off += 4;
+ } while (off <= limit);
+
+ h32 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+ } else {
+ h32 = seed + PRIME5;
+ }
+
+ h32 += len;
+
+ while (off <= end - 4) {
+ h32 += ByteBufferUtils.readIntLE(buf, off) * PRIME3;
+ h32 = rotateLeft(h32, 17) * PRIME4;
+ off += 4;
+ }
+
+ while (off < end) {
+ h32 += (ByteBufferUtils.readByte(buf, off) & 0xFF) * PRIME5;
+ h32 = rotateLeft(h32, 11) * PRIME1;
+ ++off;
+ }
+
+ h32 ^= h32 >>> 15;
+ h32 *= PRIME2;
+ h32 ^= h32 >>> 13;
+ h32 *= PRIME3;
+ h32 ^= h32 >>> 16;
+
+ return h32;
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JavaUnsafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JavaUnsafe.java
new file mode 100644
index 000000000..b5473e96d
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash32JavaUnsafe.java
@@ -0,0 +1,154 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static java.lang.Integer.rotateLeft;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.UnsafeUtils;
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+
+/**
+ * {@link XXHash32} implementation.
+ */
+final class XXHash32JavaUnsafe extends XXHash32 {
+
+ public static final XXHash32 INSTANCE = new XXHash32JavaUnsafe();
+
+ @Override
+ public int hash(byte[] buf, int off, int len, int seed) {
+
+ UnsafeUtils.checkRange(buf, off, len);
+
+ final int end = off + len;
+ int h32;
+
+ if (len >= 16) {
+ final int limit = end - 16;
+ int v1 = seed + PRIME1 + PRIME2;
+ int v2 = seed + PRIME2;
+ int v3 = seed + 0;
+ int v4 = seed - PRIME1;
+ do {
+ v1 += UnsafeUtils.readIntLE(buf, off) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+ off += 4;
+
+ v2 += UnsafeUtils.readIntLE(buf, off) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+ off += 4;
+
+ v3 += UnsafeUtils.readIntLE(buf, off) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+ off += 4;
+
+ v4 += UnsafeUtils.readIntLE(buf, off) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+ off += 4;
+ } while (off <= limit);
+
+ h32 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+ } else {
+ h32 = seed + PRIME5;
+ }
+
+ h32 += len;
+
+ while (off <= end - 4) {
+ h32 += UnsafeUtils.readIntLE(buf, off) * PRIME3;
+ h32 = rotateLeft(h32, 17) * PRIME4;
+ off += 4;
+ }
+
+ while (off < end) {
+ h32 += (UnsafeUtils.readByte(buf, off) & 0xFF) * PRIME5;
+ h32 = rotateLeft(h32, 11) * PRIME1;
+ ++off;
+ }
+
+ h32 ^= h32 >>> 15;
+ h32 *= PRIME2;
+ h32 ^= h32 >>> 13;
+ h32 *= PRIME3;
+ h32 ^= h32 >>> 16;
+
+ return h32;
+ }
+
+ @Override
+ public int hash(ByteBuffer buf, int off, int len, int seed) {
+
+ if (buf.hasArray()) {
+ return hash(buf.array(), off + buf.arrayOffset(), len, seed);
+ }
+ ByteBufferUtils.checkRange(buf, off, len);
+ buf = ByteBufferUtils.inLittleEndianOrder(buf);
+
+ final int end = off + len;
+ int h32;
+
+ if (len >= 16) {
+ final int limit = end - 16;
+ int v1 = seed + PRIME1 + PRIME2;
+ int v2 = seed + PRIME2;
+ int v3 = seed + 0;
+ int v4 = seed - PRIME1;
+ do {
+ v1 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v1 = rotateLeft(v1, 13);
+ v1 *= PRIME1;
+ off += 4;
+
+ v2 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v2 = rotateLeft(v2, 13);
+ v2 *= PRIME1;
+ off += 4;
+
+ v3 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v3 = rotateLeft(v3, 13);
+ v3 *= PRIME1;
+ off += 4;
+
+ v4 += ByteBufferUtils.readIntLE(buf, off) * PRIME2;
+ v4 = rotateLeft(v4, 13);
+ v4 *= PRIME1;
+ off += 4;
+ } while (off <= limit);
+
+ h32 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+ } else {
+ h32 = seed + PRIME5;
+ }
+
+ h32 += len;
+
+ while (off <= end - 4) {
+ h32 += ByteBufferUtils.readIntLE(buf, off) * PRIME3;
+ h32 = rotateLeft(h32, 17) * PRIME4;
+ off += 4;
+ }
+
+ while (off < end) {
+ h32 += (ByteBufferUtils.readByte(buf, off) & 0xFF) * PRIME5;
+ h32 = rotateLeft(h32, 11) * PRIME1;
+ ++off;
+ }
+
+ h32 ^= h32 >>> 15;
+ h32 *= PRIME2;
+ h32 ^= h32 >>> 13;
+ h32 *= PRIME3;
+ h32 ^= h32 >>> 16;
+
+ return h32;
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64.java
new file mode 100644
index 000000000..2861e3009
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64.java
@@ -0,0 +1,71 @@
+package com.fr.third.net.jpountz.xxhash;
+
+import java.nio.ByteBuffer;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A 64-bits hash.
+ * buf[off:off+len]
using seed
+ * seed
.
+ *
+ * @param buf the input data
+ * @param off the start offset in buf
+ * @param len the number of bytes to hash
+ * @param seed the seed to use
+ * @return the hash value
+ */
+ public abstract long hash(byte[] buf, int off, int len, long seed);
+
+ /**
+ * Computes the hash of the given slice of the {@link ByteBuffer}.
+ * {@link ByteBuffer#position() position} and {@link ByteBuffer#limit() limit}
+ * are not modified.
+ *
+ * @param buf the input data
+ * @param off the start offset in buf
+ * @param len the number of bytes to hash
+ * @param seed the seed to use
+ * @return the hash value
+ */
+ public abstract long hash(ByteBuffer buf, int off, int len, long seed);
+
+ /**
+ * Computes the hash of the given {@link ByteBuffer}. The
+ * {@link ByteBuffer#position() position} is moved in order to reflect bytes
+ * which have been read.
+ *
+ * @param buf the input data
+ * @param seed the seed to use
+ * @return the hash value
+ */
+ public final long hash(ByteBuffer buf, long seed) {
+ final long hash = hash(buf, buf.position(), buf.remaining(), seed);
+ buf.position(buf.limit());
+ return hash;
+ }
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName();
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JNI.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JNI.java
new file mode 100644
index 000000000..0a44018ae
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JNI.java
@@ -0,0 +1,52 @@
+package com.fr.third.net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+import com.fr.third.net.jpountz.util.SafeUtils;
+
+import static com.fr.third.net.jpountz.util.ByteBufferUtils.checkRange;
+import static com.fr.third.net.jpountz.util.SafeUtils.checkRange;
+
+import java.nio.ByteBuffer;
+
+final class XXHash64JNI extends XXHash64 {
+
+ public static final XXHash64 INSTANCE = new XXHash64JNI();
+ private static XXHash64 SAFE_INSTANCE;
+
+ @Override
+ public long hash(byte[] buf, int off, int len, long seed) {
+ SafeUtils.checkRange(buf, off, len);
+ return XXHashJNI.XXH64(buf, off, len, seed);
+ }
+
+ @Override
+ public long hash(ByteBuffer buf, int off, int len, long seed) {
+ if (buf.isDirect()) {
+ ByteBufferUtils.checkRange(buf, off, len);
+ return XXHashJNI.XXH64BB(buf, off, len, seed);
+ } else if (buf.hasArray()) {
+ return hash(buf.array(), off + buf.arrayOffset(), len, seed);
+ } else {
+ XXHash64 safeInstance = SAFE_INSTANCE;
+ if (safeInstance == null) {
+ safeInstance = SAFE_INSTANCE = XXHashFactory.safeInstance().hash64();
+ }
+ return safeInstance.hash(buf, off, len, seed);
+ }
+ }
+
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JavaSafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JavaSafe.java
new file mode 100644
index 000000000..e91aec1d4
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JavaSafe.java
@@ -0,0 +1,192 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static java.lang.Long.rotateLeft;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.SafeUtils;
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+
+/**
+ * {@link XXHash64} implementation.
+ */
+final class XXHash64JavaSafe extends XXHash64 {
+
+ public static final XXHash64 INSTANCE = new XXHash64JavaSafe();
+
+ @Override
+ public long hash(byte[] buf, int off, int len, long seed) {
+
+ SafeUtils.checkRange(buf, off, len);
+
+ final int end = off + len;
+ long h64;
+
+ if (len >= 32) {
+ final int limit = end - 32;
+ long v1 = seed + PRIME64_1 + PRIME64_2;
+ long v2 = seed + PRIME64_2;
+ long v3 = seed + 0;
+ long v4 = seed - PRIME64_1;
+ do {
+ v1 += SafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+ off += 8;
+
+ v2 += SafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+ off += 8;
+
+ v3 += SafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+ off += 8;
+
+ v4 += SafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+ off += 8;
+ } while (off <= limit);
+
+ h64 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ v1 *= PRIME64_2; v1 = rotateLeft(v1, 31); v1 *= PRIME64_1; h64 ^= v1;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v2 *= PRIME64_2; v2 = rotateLeft(v2, 31); v2 *= PRIME64_1; h64 ^= v2;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v3 *= PRIME64_2; v3 = rotateLeft(v3, 31); v3 *= PRIME64_1; h64 ^= v3;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v4 *= PRIME64_2; v4 = rotateLeft(v4, 31); v4 *= PRIME64_1; h64 ^= v4;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += len;
+
+ while (off <= end - 8) {
+ long k1 = SafeUtils.readLongLE(buf, off);
+ k1 *= PRIME64_2; k1 = rotateLeft(k1, 31); k1 *= PRIME64_1; h64 ^= k1;
+ h64 = rotateLeft(h64, 27) * PRIME64_1 + PRIME64_4;
+ off += 8;
+ }
+
+ if (off <= end - 4) {
+ h64 ^= (SafeUtils.readIntLE(buf, off) & 0xFFFFFFFFL) * PRIME64_1;
+ h64 = rotateLeft(h64, 23) * PRIME64_2 + PRIME64_3;
+ off += 4;
+ }
+
+ while (off < end) {
+ h64 ^= (SafeUtils.readByte(buf, off) & 0xFF) * PRIME64_5;
+ h64 = rotateLeft(h64, 11) * PRIME64_1;
+ ++off;
+ }
+
+ h64 ^= h64 >>> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >>> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >>> 32;
+
+ return h64;
+ }
+
+ @Override
+ public long hash(ByteBuffer buf, int off, int len, long seed) {
+
+ if (buf.hasArray()) {
+ return hash(buf.array(), off + buf.arrayOffset(), len, seed);
+ }
+ ByteBufferUtils.checkRange(buf, off, len);
+ buf = ByteBufferUtils.inLittleEndianOrder(buf);
+
+ final int end = off + len;
+ long h64;
+
+ if (len >= 32) {
+ final int limit = end - 32;
+ long v1 = seed + PRIME64_1 + PRIME64_2;
+ long v2 = seed + PRIME64_2;
+ long v3 = seed + 0;
+ long v4 = seed - PRIME64_1;
+ do {
+ v1 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+ off += 8;
+
+ v2 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+ off += 8;
+
+ v3 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+ off += 8;
+
+ v4 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+ off += 8;
+ } while (off <= limit);
+
+ h64 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ v1 *= PRIME64_2; v1 = rotateLeft(v1, 31); v1 *= PRIME64_1; h64 ^= v1;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v2 *= PRIME64_2; v2 = rotateLeft(v2, 31); v2 *= PRIME64_1; h64 ^= v2;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v3 *= PRIME64_2; v3 = rotateLeft(v3, 31); v3 *= PRIME64_1; h64 ^= v3;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v4 *= PRIME64_2; v4 = rotateLeft(v4, 31); v4 *= PRIME64_1; h64 ^= v4;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += len;
+
+ while (off <= end - 8) {
+ long k1 = ByteBufferUtils.readLongLE(buf, off);
+ k1 *= PRIME64_2; k1 = rotateLeft(k1, 31); k1 *= PRIME64_1; h64 ^= k1;
+ h64 = rotateLeft(h64, 27) * PRIME64_1 + PRIME64_4;
+ off += 8;
+ }
+
+ if (off <= end - 4) {
+ h64 ^= (ByteBufferUtils.readIntLE(buf, off) & 0xFFFFFFFFL) * PRIME64_1;
+ h64 = rotateLeft(h64, 23) * PRIME64_2 + PRIME64_3;
+ off += 4;
+ }
+
+ while (off < end) {
+ h64 ^= (ByteBufferUtils.readByte(buf, off) & 0xFF) * PRIME64_5;
+ h64 = rotateLeft(h64, 11) * PRIME64_1;
+ ++off;
+ }
+
+ h64 ^= h64 >>> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >>> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >>> 32;
+
+ return h64;
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JavaUnsafe.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JavaUnsafe.java
new file mode 100644
index 000000000..faac26bc9
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHash64JavaUnsafe.java
@@ -0,0 +1,192 @@
+// Auto-generated: DO NOT EDIT
+
+package com.fr.third.net.jpountz.xxhash;
+
+import static com.fr.third.net.jpountz.xxhash.XXHashConstants.*;
+import static java.lang.Long.rotateLeft;
+
+import java.nio.ByteBuffer;
+
+import com.fr.third.net.jpountz.util.UnsafeUtils;
+import com.fr.third.net.jpountz.util.ByteBufferUtils;
+
+/**
+ * {@link XXHash64} implementation.
+ */
+final class XXHash64JavaUnsafe extends XXHash64 {
+
+ public static final XXHash64 INSTANCE = new XXHash64JavaUnsafe();
+
+ @Override
+ public long hash(byte[] buf, int off, int len, long seed) {
+
+ UnsafeUtils.checkRange(buf, off, len);
+
+ final int end = off + len;
+ long h64;
+
+ if (len >= 32) {
+ final int limit = end - 32;
+ long v1 = seed + PRIME64_1 + PRIME64_2;
+ long v2 = seed + PRIME64_2;
+ long v3 = seed + 0;
+ long v4 = seed - PRIME64_1;
+ do {
+ v1 += UnsafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+ off += 8;
+
+ v2 += UnsafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+ off += 8;
+
+ v3 += UnsafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+ off += 8;
+
+ v4 += UnsafeUtils.readLongLE(buf, off) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+ off += 8;
+ } while (off <= limit);
+
+ h64 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ v1 *= PRIME64_2; v1 = rotateLeft(v1, 31); v1 *= PRIME64_1; h64 ^= v1;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v2 *= PRIME64_2; v2 = rotateLeft(v2, 31); v2 *= PRIME64_1; h64 ^= v2;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v3 *= PRIME64_2; v3 = rotateLeft(v3, 31); v3 *= PRIME64_1; h64 ^= v3;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v4 *= PRIME64_2; v4 = rotateLeft(v4, 31); v4 *= PRIME64_1; h64 ^= v4;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += len;
+
+ while (off <= end - 8) {
+ long k1 = UnsafeUtils.readLongLE(buf, off);
+ k1 *= PRIME64_2; k1 = rotateLeft(k1, 31); k1 *= PRIME64_1; h64 ^= k1;
+ h64 = rotateLeft(h64, 27) * PRIME64_1 + PRIME64_4;
+ off += 8;
+ }
+
+ if (off <= end - 4) {
+ h64 ^= (UnsafeUtils.readIntLE(buf, off) & 0xFFFFFFFFL) * PRIME64_1;
+ h64 = rotateLeft(h64, 23) * PRIME64_2 + PRIME64_3;
+ off += 4;
+ }
+
+ while (off < end) {
+ h64 ^= (UnsafeUtils.readByte(buf, off) & 0xFF) * PRIME64_5;
+ h64 = rotateLeft(h64, 11) * PRIME64_1;
+ ++off;
+ }
+
+ h64 ^= h64 >>> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >>> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >>> 32;
+
+ return h64;
+ }
+
+ @Override
+ public long hash(ByteBuffer buf, int off, int len, long seed) {
+
+ if (buf.hasArray()) {
+ return hash(buf.array(), off + buf.arrayOffset(), len, seed);
+ }
+ ByteBufferUtils.checkRange(buf, off, len);
+ buf = ByteBufferUtils.inLittleEndianOrder(buf);
+
+ final int end = off + len;
+ long h64;
+
+ if (len >= 32) {
+ final int limit = end - 32;
+ long v1 = seed + PRIME64_1 + PRIME64_2;
+ long v2 = seed + PRIME64_2;
+ long v3 = seed + 0;
+ long v4 = seed - PRIME64_1;
+ do {
+ v1 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v1 = rotateLeft(v1, 31);
+ v1 *= PRIME64_1;
+ off += 8;
+
+ v2 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v2 = rotateLeft(v2, 31);
+ v2 *= PRIME64_1;
+ off += 8;
+
+ v3 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v3 = rotateLeft(v3, 31);
+ v3 *= PRIME64_1;
+ off += 8;
+
+ v4 += ByteBufferUtils.readLongLE(buf, off) * PRIME64_2;
+ v4 = rotateLeft(v4, 31);
+ v4 *= PRIME64_1;
+ off += 8;
+ } while (off <= limit);
+
+ h64 = rotateLeft(v1, 1) + rotateLeft(v2, 7) + rotateLeft(v3, 12) + rotateLeft(v4, 18);
+
+ v1 *= PRIME64_2; v1 = rotateLeft(v1, 31); v1 *= PRIME64_1; h64 ^= v1;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v2 *= PRIME64_2; v2 = rotateLeft(v2, 31); v2 *= PRIME64_1; h64 ^= v2;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v3 *= PRIME64_2; v3 = rotateLeft(v3, 31); v3 *= PRIME64_1; h64 ^= v3;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+
+ v4 *= PRIME64_2; v4 = rotateLeft(v4, 31); v4 *= PRIME64_1; h64 ^= v4;
+ h64 = h64 * PRIME64_1 + PRIME64_4;
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += len;
+
+ while (off <= end - 8) {
+ long k1 = ByteBufferUtils.readLongLE(buf, off);
+ k1 *= PRIME64_2; k1 = rotateLeft(k1, 31); k1 *= PRIME64_1; h64 ^= k1;
+ h64 = rotateLeft(h64, 27) * PRIME64_1 + PRIME64_4;
+ off += 8;
+ }
+
+ if (off <= end - 4) {
+ h64 ^= (ByteBufferUtils.readIntLE(buf, off) & 0xFFFFFFFFL) * PRIME64_1;
+ h64 = rotateLeft(h64, 23) * PRIME64_2 + PRIME64_3;
+ off += 4;
+ }
+
+ while (off < end) {
+ h64 ^= (ByteBufferUtils.readByte(buf, off) & 0xFF) * PRIME64_5;
+ h64 = rotateLeft(h64, 11) * PRIME64_1;
+ ++off;
+ }
+
+ h64 ^= h64 >>> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >>> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >>> 32;
+
+ return h64;
+ }
+
+
+}
+
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHashConstants.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHashConstants.java
new file mode 100644
index 000000000..79aa59b8a
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHashConstants.java
@@ -0,0 +1,31 @@
+package com.fr.third.net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+enum XXHashConstants {
+ ;
+
+ static final int PRIME1 = -1640531535;
+ static final int PRIME2 = -2048144777;
+ static final int PRIME3 = -1028477379;
+ static final int PRIME4 = 668265263;
+ static final int PRIME5 = 374761393;
+
+ static final long PRIME64_1 = -7046029288634856825L; //11400714785074694791
+ static final long PRIME64_2 = -4417276706812531889L; //14029467366897019727
+ static final long PRIME64_3 = 1609587929392839161L;
+ static final long PRIME64_4 = -8796714831421723037L; //9650029242287828579
+ static final long PRIME64_5 = 2870177450012600261L;
+}
diff --git a/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHashFactory.java b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHashFactory.java
new file mode 100644
index 000000000..e7be4b328
--- /dev/null
+++ b/fine-lz4/src/com/fr/third/net/jpountz/xxhash/XXHashFactory.java
@@ -0,0 +1,257 @@
+package com.fr.third.net.jpountz.xxhash;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.util.Random;
+
+import com.fr.third.net.jpountz.util.Native;
+import com.fr.third.net.jpountz.util.Utils;
+
+/**
+ * Entry point to get {@link XXHash32} and {@link StreamingXXHash32} instances.
+ *
+ *
+ *
+ *
+ *
+ * @return a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * are native bindings to the original C API.
+ */
+ public static synchronized XXHashFactory nativeInstance() {
+ if (NATIVE_INSTANCE == null) {
+ NATIVE_INSTANCE = instance("JNI");
+ }
+ return NATIVE_INSTANCE;
+ }
+
+ /**
+ * Returns a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * are written with Java's official API.
+ *
+ * @return a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * are written with Java's official API.
+ */
+ public static synchronized XXHashFactory safeInstance() {
+ if (JAVA_SAFE_INSTANCE == null) {
+ JAVA_SAFE_INSTANCE = instance("JavaSafe");
+ }
+ return JAVA_SAFE_INSTANCE;
+ }
+
+ /**
+ * Returns a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * may use {@link sun.misc.Unsafe} to speed up hashing.
+ *
+ * @return a {@link XXHashFactory} that returns {@link XXHash32} instances that
+ * may use {@link sun.misc.Unsafe} to speed up hashing.
+ */
+ public static synchronized XXHashFactory unsafeInstance() {
+ if (JAVA_UNSAFE_INSTANCE == null) {
+ JAVA_UNSAFE_INSTANCE = instance("JavaUnsafe");
+ }
+ return JAVA_UNSAFE_INSTANCE;
+ }
+
+ /**
+ * Returns the fastest available {@link XXHashFactory} instance which does not
+ * rely on JNI bindings. It first tries to load the
+ * {@link #unsafeInstance() unsafe instance}, and then the
+ * {@link #safeInstance() safe Java instance} if the JVM doesn't have a
+ * working {@link sun.misc.Unsafe}.
+ *
+ * @return the fastest available {@link XXHashFactory} instance which does not
+ * rely on JNI bindings.
+ */
+ public static XXHashFactory fastestJavaInstance() {
+ if (Utils.isUnalignedAccessAllowed()) {
+ try {
+ return unsafeInstance();
+ } catch (Throwable t) {
+ return safeInstance();
+ }
+ } else {
+ return safeInstance();
+ }
+ }
+
+ /**
+ * Returns the fastest available {@link XXHashFactory} instance. If the class
+ * loader is the system class loader and if the
+ * {@link #nativeInstance() native instance} loads successfully, then the
+ * {@link #nativeInstance() native instance} is returned, otherwise the
+ * {@link #fastestJavaInstance() fastest Java instance} is returned.
+ *
+ XXHashFactory factory = XXHashFactory.fastestInstance();
+
+ byte[] data = "12345345234572".getBytes("UTF-8");
+
+ XXHash32 hash32 = factory.hash32();
+ int seed = 0x9747b28c; // used to initialize the hash value, use whatever
+ // value you want, but always the same
+ int hash = hash32.hash(data, 0, data.length, seed);
+
+
+
+ XXHashFactory factory = XXHashFactory.fastestInstance();
+
+ byte[] data = "12345345234572".getBytes("UTF-8");
+ ByteArrayInputStream in = new ByteArrayInputStream(data);
+
+ int seed = 0x9747b28c; // used to initialize the hash value, use whatever
+ // value you want, but always the same
+ StreamingXXHash32 hash32 = factory.newStreamingHash32(seed);
+ byte[] buf = new byte[8]; // for real-world usage, use a larger buffer, like 8192 bytes
+ for (;;) {
+ int read = in.read(buf);
+ if (read == -1) {
+ break;
+ }
+ hash32.update(buf, 0, read);
+ }
+ int hash = hash32.getValue();
+
+
+
+