diff --git a/build.gradle b/build.gradle index 0fe483ab..383b2e40 100644 --- a/build.gradle +++ b/build.gradle @@ -41,6 +41,5 @@ subprojects { maven {url "http://maven.sk89q.com/artifactory/repo/"} maven {url "http://nexus.theyeticave.net/content/repositories/pub_releases"} maven {url "http://repo.spongepowered.org/maven"} - } } \ No newline at end of file diff --git a/bukkit/build/resources/main/plugin.yml b/bukkit/build/resources/main/plugin.yml index 2cc92946..b11fce19 100644 --- a/bukkit/build/resources/main/plugin.yml +++ b/bukkit/build/resources/main/plugin.yml @@ -18,6 +18,9 @@ commands: stream: description: (FAWE) Stream a schematic into the world aliases: [/stream] + fawe: + description: (FAWE) Reload the plugin + aliases: [/fawe,/fawereload] wrg: description: (FAWE) Select your current WorldEdit Region. aliases: [/wrg,wer,/wer,worldeditregion,/worldeditregion,/region] @@ -30,3 +33,5 @@ permissions: default: false fawe.fixlighting: default: false + fawe.reload: + default: false diff --git a/bukkit/src/main/java/com/boydti/fawe/bukkit/BukkitCommand.java b/bukkit/src/main/java/com/boydti/fawe/bukkit/BukkitCommand.java index 64dd85da..8f07a10d 100644 --- a/bukkit/src/main/java/com/boydti/fawe/bukkit/BukkitCommand.java +++ b/bukkit/src/main/java/com/boydti/fawe/bukkit/BukkitCommand.java @@ -20,7 +20,7 @@ public class BukkitCommand implements CommandExecutor { @Override public boolean onCommand(final CommandSender sender, final Command cmd, final String label, final String[] args) { final FawePlayer plr = Fawe.imp().wrap(sender); - if (!sender.hasPermission(this.cmd.getPerm()) || sender.isOp()) { + if (!sender.hasPermission(this.cmd.getPerm()) && !sender.isOp()) { BBC.NO_PERM.send(plr, this.cmd.getPerm()); return true; } diff --git a/bukkit/src/main/resources/plugin.yml b/bukkit/src/main/resources/plugin.yml index 2cc92946..b11fce19 100644 --- a/bukkit/src/main/resources/plugin.yml +++ b/bukkit/src/main/resources/plugin.yml @@ -18,6 +18,9 @@ commands: stream: description: (FAWE) Stream a schematic into the world aliases: [/stream] + fawe: + description: (FAWE) Reload the plugin + aliases: [/fawe,/fawereload] wrg: description: (FAWE) Select your current WorldEdit Region. aliases: [/wrg,wer,/wer,worldeditregion,/worldeditregion,/region] @@ -30,3 +33,5 @@ permissions: default: false fawe.fixlighting: default: false + fawe.reload: + default: false diff --git a/core/build/resources/main/darwin/x86_64/liblz4-java.dylib b/core/build/resources/main/darwin/x86_64/liblz4-java.dylib new file mode 100644 index 00000000..9d5cc0e5 Binary files /dev/null and b/core/build/resources/main/darwin/x86_64/liblz4-java.dylib differ diff --git a/core/build/resources/main/linux/amd64/liblz4-java.so b/core/build/resources/main/linux/amd64/liblz4-java.so new file mode 100644 index 00000000..fa143b1e Binary files /dev/null and b/core/build/resources/main/linux/amd64/liblz4-java.so differ diff --git a/core/build/resources/main/linux/i386/liblz4-java.so b/core/build/resources/main/linux/i386/liblz4-java.so new file mode 100644 index 00000000..aa50fd15 Binary files /dev/null and b/core/build/resources/main/linux/i386/liblz4-java.so differ diff --git a/core/build/resources/main/win32/amd64/liblz4-java.so b/core/build/resources/main/win32/amd64/liblz4-java.so new file mode 100644 index 00000000..f8501c00 Binary files /dev/null and b/core/build/resources/main/win32/amd64/liblz4-java.so differ diff --git a/core/src/main/java/com/boydti/fawe/Fawe.java b/core/src/main/java/com/boydti/fawe/Fawe.java index e72fa648..ac2a4e4e 100644 --- a/core/src/main/java/com/boydti/fawe/Fawe.java +++ b/core/src/main/java/com/boydti/fawe/Fawe.java @@ -1,6 +1,7 @@ package com.boydti.fawe; import com.boydti.fawe.command.FixLighting; +import com.boydti.fawe.command.Reload; import com.boydti.fawe.command.Stream; import com.boydti.fawe.command.Wea; import com.boydti.fawe.command.WorldEditRegion; @@ -43,6 +44,7 @@ import javax.management.InstanceAlreadyExistsException; import javax.management.Notification; import javax.management.NotificationEmitter; import javax.management.NotificationListener; +import net.jpountz.util.Native; /**[ WorldEdit action] * | @@ -195,9 +197,10 @@ public class Fawe { this.IMP.setupCommand("fixlighting", new FixLighting()); this.IMP.setupCommand("stream", new Stream()); this.IMP.setupCommand("wrg", new WorldEditRegion()); + this.IMP.setupCommand("fawe", new Reload()); } - private void setupConfigs() { + public void setupConfigs() { // Setting up config.yml Settings.setup(new File(this.IMP.getDirectory(), "config.yml")); // Setting up message.yml @@ -233,6 +236,11 @@ public class Fawe { e.printStackTrace(); IMP.debug("Incompatible version of WorldEdit, please update the plugin or contact the Author!"); } + try { + Native.load(); + } catch (Throwable e) { + e.printStackTrace(); + } } private void setupMemoryListener() { diff --git a/core/src/main/java/com/boydti/fawe/command/Reload.java b/core/src/main/java/com/boydti/fawe/command/Reload.java new file mode 100644 index 00000000..97014131 --- /dev/null +++ b/core/src/main/java/com/boydti/fawe/command/Reload.java @@ -0,0 +1,19 @@ +package com.boydti.fawe.command; + +import com.boydti.fawe.Fawe; +import com.boydti.fawe.object.FaweCommand; +import com.boydti.fawe.object.FawePlayer; + +public class Reload extends FaweCommand { + + public Reload() { + super("fawe.reload"); + } + + @Override + public boolean execute(final FawePlayer player, final String... args) { + Fawe.get().setupConfigs(); + player.sendMessage("&d[FAWE] Reloaded configuration"); + return true; + } +} diff --git a/core/src/main/java/com/boydti/fawe/config/Settings.java b/core/src/main/java/com/boydti/fawe/config/Settings.java index cecf0f1d..0041ab76 100644 --- a/core/src/main/java/com/boydti/fawe/config/Settings.java +++ b/core/src/main/java/com/boydti/fawe/config/Settings.java @@ -25,7 +25,8 @@ public class Settings { public static long MEM_FREE = 95; public static boolean ENABLE_HARD_LIMIT = true; public static boolean STORE_HISTORY_ON_DISK = false; - public static boolean COMPRESS_HISTORY = false; + public static int COMPRESSION_LEVEL = 0; + public static int BUFFER_SIZE = 59049; public static boolean METRICS = true; public static void setup(final File file) { @@ -51,7 +52,7 @@ public class Settings { options.put("crash-mitigation", ENABLE_HARD_LIMIT); options.put("fix-all-lighting", FIX_ALL_LIGHTING); options.put("history.use-disk", STORE_HISTORY_ON_DISK); - options.put("history.compress", COMPRESS_HISTORY); + options.put("history.compress", false); options.put("metrics", METRICS); for (final Entry node : options.entrySet()) { @@ -70,7 +71,8 @@ public class Settings { WE_BLACKLIST = config.getStringList("command-blacklist"); ENABLE_HARD_LIMIT = config.getBoolean("crash-mitigation"); METRICS = config.getBoolean("metrics"); - COMPRESS_HISTORY = config.getBoolean("history.compress"); + COMPRESSION_LEVEL = config.getInt("history.compression-level", config.getBoolean("history.compress") ? 1 : 0); + BUFFER_SIZE = config.getInt("history.buffer-size", 59049); if (STORE_HISTORY_ON_DISK = config.getBoolean("history.use-disk")) { LocalSession.MAX_HISTORY_SIZE = Integer.MAX_VALUE; } diff --git a/core/src/main/java/com/boydti/fawe/object/changeset/DiskStorageHistory.java b/core/src/main/java/com/boydti/fawe/object/changeset/DiskStorageHistory.java index 7cdbddca..cae52100 100644 --- a/core/src/main/java/com/boydti/fawe/object/changeset/DiskStorageHistory.java +++ b/core/src/main/java/com/boydti/fawe/object/changeset/DiskStorageHistory.java @@ -31,6 +31,10 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; +import net.jpountz.lz4.LZ4Compressor; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4InputStream; +import net.jpountz.lz4.LZ4OutputStream; /** * Store the change on disk @@ -232,7 +236,14 @@ public class DiskStorageHistory implements ChangeSet, FaweChangeSet { bdFile.getParentFile().mkdirs(); bdFile.createNewFile(); FileOutputStream stream = new FileOutputStream(bdFile); - osBD = Settings.COMPRESS_HISTORY ? new GZIPOutputStream(stream, true) : stream; + LZ4Factory factory = LZ4Factory.fastestInstance(); + LZ4Compressor compressor = factory.fastCompressor(); + osBD = new LZ4OutputStream(stream, Settings.BUFFER_SIZE, factory.fastCompressor()); + if (Settings.COMPRESSION_LEVEL > 0) { +// Deflater deflater = new Deflater(Math.min(9, Settings.COMPRESSION_LEVEL), true); +// osBD = new DeflaterOutputStream(osBD, deflater, true); + osBD = new LZ4OutputStream(osBD, Settings.BUFFER_SIZE, factory.highCompressor()); + } ox = x; oz = z; osBD.write((byte) (ox >> 24)); @@ -307,7 +318,14 @@ public class DiskStorageHistory implements ChangeSet, FaweChangeSet { final NBTInputStream nbtt = osNBTT != null ? new NBTInputStream(new GZIPInputStream(new FileInputStream(nbttFile))) : null; FileInputStream fis = new FileInputStream(bdFile); - final InputStream gis = Settings.COMPRESS_HISTORY ? new GZIPInputStream(fis) : fis; + LZ4Factory factory = LZ4Factory.fastestInstance(); + LZ4Compressor compressor = factory.fastCompressor(); + final InputStream gis; + if (Settings.COMPRESSION_LEVEL > 0) { + gis = new LZ4InputStream(new LZ4InputStream(fis)); + } else { + gis = new LZ4InputStream(fis); + } gis.skip(8); return new Iterator() { diff --git a/core/src/main/java/com/boydti/fawe/object/changeset/MemoryOptimizedHistory.java b/core/src/main/java/com/boydti/fawe/object/changeset/MemoryOptimizedHistory.java index db6676f0..ccde4d33 100644 --- a/core/src/main/java/com/boydti/fawe/object/changeset/MemoryOptimizedHistory.java +++ b/core/src/main/java/com/boydti/fawe/object/changeset/MemoryOptimizedHistory.java @@ -1,19 +1,6 @@ package com.boydti.fawe.object.changeset; -import com.sk89q.worldedit.Vector; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.zip.GZIPInputStream; -import java.util.zip.GZIPOutputStream; - +import com.boydti.fawe.Fawe; import com.boydti.fawe.FaweCache; import com.boydti.fawe.config.Settings; import com.boydti.fawe.util.MainUtil; @@ -23,10 +10,23 @@ import com.sk89q.jnbt.CompoundTag; import com.sk89q.jnbt.IntTag; import com.sk89q.jnbt.Tag; import com.sk89q.worldedit.BlockVector; +import com.sk89q.worldedit.Vector; import com.sk89q.worldedit.blocks.BaseBlock; import com.sk89q.worldedit.history.change.BlockChange; import com.sk89q.worldedit.history.change.Change; import com.sk89q.worldedit.history.changeset.ChangeSet; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4InputStream; +import net.jpountz.lz4.LZ4OutputStream; /** * ChangeSet optimized for low memory usage @@ -40,23 +40,27 @@ public class MemoryOptimizedHistory implements ChangeSet, FaweChangeSet { private ArrayDeque toTags; private byte[] ids; + private Object lock; + private int decompressedLength; + private ByteArrayOutputStream idsStream; private OutputStream idsStreamZip; - + private ArrayDeque entities; - + int ox; int oz; - private final AtomicInteger size; + private int size; public MemoryOptimizedHistory() { - size = new AtomicInteger(); + } @Override public void add(int x, int y, int z, int combinedFrom, int combinedTo) { + size++; try { OutputStream stream = getBAOS(x, y, z); //x @@ -124,7 +128,6 @@ public class MemoryOptimizedHistory implements ChangeSet, FaweChangeSet { @Override public void add(Change arg) { - size.incrementAndGet(); if ((arg instanceof BlockChange)) { BlockChange change = (BlockChange) arg; BlockVector loc = change.getPosition(); @@ -143,8 +146,14 @@ public class MemoryOptimizedHistory implements ChangeSet, FaweChangeSet { if (idsStreamZip != null) { return idsStreamZip; } - idsStream = new ByteArrayOutputStream(9216); - idsStreamZip = Settings.COMPRESS_HISTORY ? new GZIPOutputStream(idsStream, true) : idsStream; + LZ4Factory factory = LZ4Factory.fastestInstance(); + idsStream = new ByteArrayOutputStream(Settings.BUFFER_SIZE); + idsStreamZip = new LZ4OutputStream(idsStream, Settings.BUFFER_SIZE, factory.fastCompressor()); + if (Settings.COMPRESSION_LEVEL > 0) { +// Deflater deflater = new Deflater(Math.min(9, Settings.COMPRESSION_LEVEL), true); +// idsStreamZip = new DeflaterOutputStream(idsStreamZip, deflater, true); + idsStreamZip = new LZ4OutputStream(idsStreamZip, Settings.BUFFER_SIZE, factory.highCompressor()); + } ox = x; oz = z; return idsStreamZip; @@ -153,6 +162,13 @@ public class MemoryOptimizedHistory implements ChangeSet, FaweChangeSet { @SuppressWarnings("resource") public Iterator getIterator(final boolean dir) { flush(); + if (lock != null) { + try { + lock.wait(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } try { Iterator idsIterator; Iterator entsIterator = entities != null ? entities.iterator() : new ArrayList().iterator(); @@ -160,9 +176,13 @@ public class MemoryOptimizedHistory implements ChangeSet, FaweChangeSet { idsIterator = new ArrayList().iterator(); } else { ByteArrayInputStream bais = new ByteArrayInputStream(ids); - final InputStream gis = Settings.COMPRESS_HISTORY ? new GZIPInputStream(bais) : bais; + final InputStream gis; + if (Settings.COMPRESSION_LEVEL > 0) { + gis = new LZ4InputStream(new LZ4InputStream(bais)); + } else { + gis = new LZ4InputStream(bais); + } idsIterator = new Iterator() { - private final Iterator lastFromIter = fromTags != null ? fromTags.iterator() : null; private final Iterator lastToIter = toTags != null ? toTags.iterator() : null; private CompoundTag lastFrom = read(lastFromIter); @@ -253,7 +273,7 @@ public class MemoryOptimizedHistory implements ChangeSet, FaweChangeSet { @Override public int size() { - return size.get(); + return size; } @Override @@ -264,6 +284,14 @@ public class MemoryOptimizedHistory implements ChangeSet, FaweChangeSet { idsStreamZip.flush(); idsStreamZip.close(); ids = idsStream.toByteArray(); + // Estimate + int total = 0x18 * size; + int ratio = total / ids.length; + int saved = total - ids.length; + if (ratio > 3) { + // TODO remove this debug message + Fawe.debug("[FAWE] History compressed. Saved ~ " + saved + "b (" + ratio + "x smaller)"); + } idsStream = null; idsStreamZip = null; } catch (IOException e) { diff --git a/core/src/main/java/com/test.das b/core/src/main/java/com/test.das new file mode 100644 index 00000000..e69de29b diff --git a/core/src/main/java/net/jpountz/lz4/LZ4BlockInputStream.java b/core/src/main/java/net/jpountz/lz4/LZ4BlockInputStream.java new file mode 100644 index 00000000..95ca3f42 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4BlockInputStream.java @@ -0,0 +1,248 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_LEVEL_BASE; +import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_LZ4; +import static net.jpountz.lz4.LZ4BlockOutputStream.COMPRESSION_METHOD_RAW; +import static net.jpountz.lz4.LZ4BlockOutputStream.DEFAULT_SEED; +import static net.jpountz.lz4.LZ4BlockOutputStream.HEADER_LENGTH; +import static net.jpountz.lz4.LZ4BlockOutputStream.MAGIC; +import static net.jpountz.lz4.LZ4BlockOutputStream.MAGIC_LENGTH; + +import java.io.EOFException; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.zip.Checksum; + +import net.jpountz.util.SafeUtils; +import net.jpountz.util.Utils; +import net.jpountz.xxhash.StreamingXXHash32; +import net.jpountz.xxhash.XXHash32; +import net.jpountz.xxhash.XXHashFactory; + +/** + * {@link InputStream} implementation to decode data written with + * {@link LZ4BlockOutputStream}. This class is not thread-safe and does not + * support {@link #mark(int)}/{@link #reset()}. + * @see LZ4BlockOutputStream + */ +public final class LZ4BlockInputStream extends FilterInputStream { + + private final LZ4FastDecompressor decompressor; + private final Checksum checksum; + private byte[] buffer; + private byte[] compressedBuffer; + private int originalLen; + private int o; + private boolean finished; + + /** + * Create a new {@link InputStream}. + * + * @param in the {@link InputStream} to poll + * @param decompressor the {@link LZ4FastDecompressor decompressor} instance to + * use + * @param checksum the {@link Checksum} instance to use, must be + * equivalent to the instance which has been used to + * write the stream + */ + public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor, Checksum checksum) { + super(in); + this.decompressor = decompressor; + this.checksum = checksum; + this.buffer = new byte[0]; + this.compressedBuffer = new byte[HEADER_LENGTH]; + o = originalLen = 0; + finished = false; + } + + /** + * Create a new instance using {@link XXHash32} for checksuming. + * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor, Checksum) + * @see StreamingXXHash32#asChecksum() + */ + public LZ4BlockInputStream(InputStream in, LZ4FastDecompressor decompressor) { + this(in, decompressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum()); + } + + /** + * Create a new instance which uses the fastest {@link LZ4FastDecompressor} available. + * @see LZ4Factory#fastestInstance() + * @see #LZ4BlockInputStream(InputStream, LZ4FastDecompressor) + */ + public LZ4BlockInputStream(InputStream in) { + this(in, LZ4Factory.fastestInstance().fastDecompressor()); + } + + @Override + public int available() throws IOException { + return originalLen - o; + } + + @Override + public int read() throws IOException { + if (finished) { + return -1; + } + if (o == originalLen) { + refill(); + } + if (finished) { + return -1; + } + return buffer[o++] & 0xFF; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + SafeUtils.checkRange(b, off, len); + if (finished) { + return -1; + } + if (o == originalLen) { + refill(); + } + if (finished) { + return -1; + } + len = Math.min(len, originalLen - o); + System.arraycopy(buffer, o, b, off, len); + o += len; + return len; + } + + @Override + public int read(byte[] b) throws IOException { + return read(b, 0, b.length); + } + + @Override + public long skip(long n) throws IOException { + if (finished) { + return -1; + } + if (o == originalLen) { + refill(); + } + if (finished) { + return -1; + } + final int skipped = (int) Math.min(n, originalLen - o); + o += skipped; + return skipped; + } + + private void refill() throws IOException { + readFully(compressedBuffer, HEADER_LENGTH); + for (int i = 0; i < MAGIC_LENGTH; ++i) { + if (compressedBuffer[i] != MAGIC[i]) { + throw new IOException("Stream is corrupted"); + } + } + final int token = compressedBuffer[MAGIC_LENGTH] & 0xFF; + final int compressionMethod = token & 0xF0; + final int compressionLevel = COMPRESSION_LEVEL_BASE + (token & 0x0F); + if (compressionMethod != COMPRESSION_METHOD_RAW && compressionMethod != COMPRESSION_METHOD_LZ4) { + throw new IOException("Stream is corrupted"); + } + final int compressedLen = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 1); + originalLen = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 5); + final int check = SafeUtils.readIntLE(compressedBuffer, MAGIC_LENGTH + 9); + assert HEADER_LENGTH == MAGIC_LENGTH + 13; + if (originalLen > 1 << compressionLevel + || originalLen < 0 + || compressedLen < 0 + || (originalLen == 0 && compressedLen != 0) + || (originalLen != 0 && compressedLen == 0) + || (compressionMethod == COMPRESSION_METHOD_RAW && originalLen != compressedLen)) { + throw new IOException("Stream is corrupted"); + } + if (originalLen == 0 && compressedLen == 0) { + if (check != 0) { + throw new IOException("Stream is corrupted"); + } + finished = true; + return; + } + if (buffer.length < originalLen) { + buffer = new byte[Math.max(originalLen, buffer.length * 3 / 2)]; + } + switch (compressionMethod) { + case COMPRESSION_METHOD_RAW: + readFully(buffer, originalLen); + break; + case COMPRESSION_METHOD_LZ4: + if (compressedBuffer.length < originalLen) { + compressedBuffer = new byte[Math.max(compressedLen, compressedBuffer.length * 3 / 2)]; + } + readFully(compressedBuffer, compressedLen); + try { + final int compressedLen2 = decompressor.decompress(compressedBuffer, 0, buffer, 0, originalLen); + if (compressedLen != compressedLen2) { + throw new IOException("Stream is corrupted"); + } + } catch (LZ4Exception e) { + throw new IOException("Stream is corrupted", e); + } + break; + default: + throw new AssertionError(); + } + checksum.reset(); + checksum.update(buffer, 0, originalLen); + if ((int) checksum.getValue() != check) { + throw new IOException("Stream is corrupted"); + } + o = 0; + } + + private void readFully(byte[] b, int len) throws IOException { + int read = 0; + while (read < len) { + final int r = in.read(b, read, len - read); + if (r < 0) { + throw new EOFException("Stream ended prematurely"); + } + read += r; + } + assert len == read; + } + + @Override + public boolean markSupported() { + return false; + } + + @SuppressWarnings("sync-override") + @Override + public void mark(int readlimit) { + // unsupported + } + + @SuppressWarnings("sync-override") + @Override + public void reset() throws IOException { + throw new IOException("mark/reset not supported"); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(in=" + in + + ", decompressor=" + decompressor + ", checksum=" + checksum + ")"; + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4BlockOutputStream.java b/core/src/main/java/net/jpountz/lz4/LZ4BlockOutputStream.java new file mode 100644 index 00000000..86e172b0 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4BlockOutputStream.java @@ -0,0 +1,259 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.FilterOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.zip.Checksum; + +import net.jpountz.util.SafeUtils; +import net.jpountz.xxhash.StreamingXXHash32; +import net.jpountz.xxhash.XXHashFactory; + +/** + * Streaming LZ4. + *

+ * This class compresses data into fixed-size blocks of compressed data. + * @see LZ4BlockInputStream + */ +public final class LZ4BlockOutputStream extends FilterOutputStream { + + static final byte[] MAGIC = new byte[] { 'L', 'Z', '4', 'B', 'l', 'o', 'c', 'k' }; + static final int MAGIC_LENGTH = MAGIC.length; + + static final int HEADER_LENGTH = + MAGIC_LENGTH // magic bytes + + 1 // token + + 4 // compressed length + + 4 // decompressed length + + 4; // checksum + + static final int COMPRESSION_LEVEL_BASE = 10; + static final int MIN_BLOCK_SIZE = 64; + static final int MAX_BLOCK_SIZE = 1 << (COMPRESSION_LEVEL_BASE + 0x0F); + + static final int COMPRESSION_METHOD_RAW = 0x10; + static final int COMPRESSION_METHOD_LZ4 = 0x20; + + static final int DEFAULT_SEED = 0x9747b28c; + + private static int compressionLevel(int blockSize) { + if (blockSize < MIN_BLOCK_SIZE) { + throw new IllegalArgumentException("blockSize must be >= " + MIN_BLOCK_SIZE + ", got " + blockSize); + } else if (blockSize > MAX_BLOCK_SIZE) { + throw new IllegalArgumentException("blockSize must be <= " + MAX_BLOCK_SIZE + ", got " + blockSize); + } + int compressionLevel = 32 - Integer.numberOfLeadingZeros(blockSize - 1); // ceil of log2 + assert (1 << compressionLevel) >= blockSize; + assert blockSize * 2 > (1 << compressionLevel); + compressionLevel = Math.max(0, compressionLevel - COMPRESSION_LEVEL_BASE); + assert compressionLevel >= 0 && compressionLevel <= 0x0F; + return compressionLevel; + } + + private final int blockSize; + private final int compressionLevel; + private final LZ4Compressor compressor; + private final Checksum checksum; + private final byte[] buffer; + private final byte[] compressedBuffer; + private final boolean syncFlush; + private boolean finished; + private int o; + + /** + * Create a new {@link OutputStream} with configurable block size. Large + * blocks require more memory at compression and decompression time but + * should improve the compression ratio. + * + * @param out the {@link OutputStream} to feed + * @param blockSize the maximum number of bytes to try to compress at once, + * must be >= 64 and <= 32 M + * @param compressor the {@link LZ4Compressor} instance to use to compress + * data + * @param checksum the {@link Checksum} instance to use to check data for + * integrity. + * @param syncFlush true if pending data should also be flushed on {@link #flush()} + */ + public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, boolean syncFlush) { + super(out); + this.blockSize = blockSize; + this.compressor = compressor; + this.checksum = checksum; + this.compressionLevel = compressionLevel(blockSize); + this.buffer = new byte[blockSize]; + final int compressedBlockSize = HEADER_LENGTH + compressor.maxCompressedLength(blockSize); + this.compressedBuffer = new byte[compressedBlockSize]; + this.syncFlush = syncFlush; + o = 0; + finished = false; + System.arraycopy(MAGIC, 0, compressedBuffer, 0, MAGIC_LENGTH); + } + + /** + * Create a new instance which checks stream integrity using + * {@link StreamingXXHash32} and doesn't sync flush. + * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean) + * @see StreamingXXHash32#asChecksum() + */ + public LZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) { + this(out, blockSize, compressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), false); + } + + /** + * Create a new instance which compresses with the standard LZ4 compression + * algorithm. + * @see #LZ4BlockOutputStream(OutputStream, int, LZ4Compressor) + * @see LZ4Factory#fastCompressor() + */ + public LZ4BlockOutputStream(OutputStream out, int blockSize) { + this(out, blockSize, LZ4Factory.fastestInstance().fastCompressor()); + } + + /** + * Create a new instance which compresses into blocks of 64 KB. + * @see #LZ4BlockOutputStream(OutputStream, int) + */ + public LZ4BlockOutputStream(OutputStream out) { + this(out, 1 << 16); + } + + private void ensureNotFinished() { + if (finished) { + throw new IllegalStateException("This stream is already closed"); + } + } + + @Override + public void write(int b) throws IOException { + ensureNotFinished(); + if (o == blockSize) { + flushBufferedData(); + } + buffer[o++] = (byte) b; + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + SafeUtils.checkRange(b, off, len); + ensureNotFinished(); + + while (o + len > blockSize) { + final int l = blockSize - o; + System.arraycopy(b, off, buffer, o, blockSize - o); + o = blockSize; + flushBufferedData(); + off += l; + len -= l; + } + System.arraycopy(b, off, buffer, o, len); + o += len; + } + + @Override + public void write(byte[] b) throws IOException { + ensureNotFinished(); + write(b, 0, b.length); + } + + @Override + public void close() throws IOException { + if (!finished) { + finish(); + } + if (out != null) { + out.close(); + out = null; + } + } + + private void flushBufferedData() throws IOException { + if (o == 0) { + return; + } + checksum.reset(); + checksum.update(buffer, 0, o); + final int check = (int) checksum.getValue(); + int compressedLength = compressor.compress(buffer, 0, o, compressedBuffer, HEADER_LENGTH); + final int compressMethod; + if (compressedLength >= o) { + compressMethod = COMPRESSION_METHOD_RAW; + compressedLength = o; + System.arraycopy(buffer, 0, compressedBuffer, HEADER_LENGTH, o); + } else { + compressMethod = COMPRESSION_METHOD_LZ4; + } + + compressedBuffer[MAGIC_LENGTH] = (byte) (compressMethod | compressionLevel); + writeIntLE(compressedLength, compressedBuffer, MAGIC_LENGTH + 1); + writeIntLE(o, compressedBuffer, MAGIC_LENGTH + 5); + writeIntLE(check, compressedBuffer, MAGIC_LENGTH + 9); + assert MAGIC_LENGTH + 13 == HEADER_LENGTH; + out.write(compressedBuffer, 0, HEADER_LENGTH + compressedLength); + o = 0; + } + + /** + * Flush this compressed {@link OutputStream}. + * + * If the stream has been created with syncFlush=true, pending + * data will be compressed and appended to the underlying {@link OutputStream} + * before calling {@link OutputStream#flush()} on the underlying stream. + * Otherwise, this method just flushes the underlying stream, so pending + * data might not be available for reading until {@link #finish()} or + * {@link #close()} is called. + */ + @Override + public void flush() throws IOException { + if (out != null) { + if (syncFlush) { + flushBufferedData(); + } + out.flush(); + } + } + + /** + * Same as {@link #close()} except that it doesn't close the underlying stream. + * This can be useful if you want to keep on using the underlying stream. + */ + public void finish() throws IOException { + ensureNotFinished(); + flushBufferedData(); + compressedBuffer[MAGIC_LENGTH] = (byte) (COMPRESSION_METHOD_RAW | compressionLevel); + writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 1); + writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 5); + writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 9); + assert MAGIC_LENGTH + 13 == HEADER_LENGTH; + out.write(compressedBuffer, 0, HEADER_LENGTH); + finished = true; + out.flush(); + } + + private static void writeIntLE(int i, byte[] buf, int off) { + buf[off++] = (byte) i; + buf[off++] = (byte) (i >>> 8); + buf[off++] = (byte) (i >>> 16); + buf[off++] = (byte) (i >>> 24); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(out=" + out + ", blockSize=" + blockSize + + ", compressor=" + compressor + ", checksum=" + checksum + ")"; + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4ByteBufferUtils.java b/core/src/main/java/net/jpountz/lz4/LZ4ByteBufferUtils.java new file mode 100644 index 00000000..3c0b9ac6 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4ByteBufferUtils.java @@ -0,0 +1,237 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.lz4.LZ4Constants.COPY_LENGTH; +import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS; +import static net.jpountz.lz4.LZ4Constants.ML_BITS; +import static net.jpountz.lz4.LZ4Constants.ML_MASK; +import static net.jpountz.lz4.LZ4Constants.RUN_MASK; +import static net.jpountz.util.ByteBufferUtils.readByte; +import static net.jpountz.util.ByteBufferUtils.readInt; +import static net.jpountz.util.ByteBufferUtils.readLong; +import static net.jpountz.util.ByteBufferUtils.writeByte; +import static net.jpountz.util.ByteBufferUtils.writeInt; +import static net.jpountz.util.ByteBufferUtils.writeLong; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +enum LZ4ByteBufferUtils { + ; + static int hash(ByteBuffer buf, int i) { + return LZ4Utils.hash(readInt(buf, i)); + } + + static int hash64k(ByteBuffer buf, int i) { + return LZ4Utils.hash64k(readInt(buf, i)); + } + + static boolean readIntEquals(ByteBuffer buf, int i, int j) { + return buf.getInt(i) == buf.getInt(j); + } + + static void safeIncrementalCopy(ByteBuffer dest, int matchOff, int dOff, int matchLen) { + for (int i = 0; i < matchLen; ++i) { + dest.put(dOff + i, dest.get(matchOff + i)); + } + } + + static void wildIncrementalCopy(ByteBuffer dest, int matchOff, int dOff, int matchCopyEnd) { + if (dOff - matchOff < 4) { + for (int i = 0; i < 4; ++i) { + writeByte(dest, dOff+i, readByte(dest, matchOff+i)); + } + dOff += 4; + matchOff += 4; + int dec = 0; + assert dOff >= matchOff && dOff - matchOff < 8; + switch (dOff - matchOff) { + case 1: + matchOff -= 3; + break; + case 2: + matchOff -= 2; + break; + case 3: + matchOff -= 3; + dec = -1; + break; + case 5: + dec = 1; + break; + case 6: + dec = 2; + break; + case 7: + dec = 3; + break; + default: + break; + } + writeInt(dest, dOff, readInt(dest, matchOff)); + dOff += 4; + matchOff -= dec; + } else if (dOff - matchOff < COPY_LENGTH) { + writeLong(dest, dOff, readLong(dest, matchOff)); + dOff += dOff - matchOff; + } + while (dOff < matchCopyEnd) { + writeLong(dest, dOff, readLong(dest, matchOff)); + dOff += 8; + matchOff += 8; + } + } + + static int commonBytes(ByteBuffer src, int ref, int sOff, int srcLimit) { + int matchLen = 0; + while (sOff <= srcLimit - 8) { + if (readLong(src, sOff) == readLong(src, ref)) { + matchLen += 8; + ref += 8; + sOff += 8; + } else { + final int zeroBits; + if (src.order() == ByteOrder.BIG_ENDIAN) { + zeroBits = Long.numberOfLeadingZeros(readLong(src, sOff) ^ readLong(src, ref)); + } else { + zeroBits = Long.numberOfTrailingZeros(readLong(src, sOff) ^ readLong(src, ref)); + } + return matchLen + (zeroBits >>> 3); + } + } + while (sOff < srcLimit && readByte(src, ref++) == readByte(src, sOff++)) { + ++matchLen; + } + return matchLen; + } + + static int commonBytesBackward(ByteBuffer b, int o1, int o2, int l1, int l2) { + int count = 0; + while (o1 > l1 && o2 > l2 && b.get(--o1) == b.get(--o2)) { + ++count; + } + return count; + } + + static void safeArraycopy(ByteBuffer src, int sOff, ByteBuffer dest, int dOff, int len) { + for (int i = 0; i < len; ++i) { + dest.put(dOff + i, src.get(sOff + i)); + } + } + + static void wildArraycopy(ByteBuffer src, int sOff, ByteBuffer dest, int dOff, int len) { + assert src.order().equals(dest.order()); + try { + for (int i = 0; i < len; i += 8) { + dest.putLong(dOff + i, src.getLong(sOff + i)); + } + } catch (IndexOutOfBoundsException e) { + throw new LZ4Exception("Malformed input at offset " + sOff); + } + } + + static int encodeSequence(ByteBuffer src, int anchor, int matchOff, int matchRef, int matchLen, ByteBuffer dest, int dOff, int destEnd) { + final int runLen = matchOff - anchor; + final int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + int token; + if (runLen >= RUN_MASK) { + token = (byte) (RUN_MASK << ML_BITS); + dOff = writeLen(runLen - RUN_MASK, dest, dOff); + } else { + token = runLen << ML_BITS; + } + + // copy literals + wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + // encode offset + final int matchDec = matchOff - matchRef; + dest.put(dOff++, (byte) matchDec); + dest.put(dOff++, (byte) (matchDec >>> 8)); + + // encode match len + matchLen -= 4; + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + if (matchLen >= ML_MASK) { + token |= ML_MASK; + dOff = writeLen(matchLen - RUN_MASK, dest, dOff); + } else { + token |= matchLen; + } + + dest.put(tokenOff, (byte) token); + + return dOff; + } + + static int lastLiterals(ByteBuffer src, int sOff, int srcLen, ByteBuffer dest, int dOff, int destEnd) { + final int runLen = srcLen; + + if (dOff + runLen + 1 + (runLen + 255 - RUN_MASK) / 255 > destEnd) { + throw new LZ4Exception(); + } + + if (runLen >= RUN_MASK) { + dest.put(dOff++, (byte) (RUN_MASK << ML_BITS)); + dOff = writeLen(runLen - RUN_MASK, dest, dOff); + } else { + dest.put(dOff++, (byte) (runLen << ML_BITS)); + } + // copy literals + safeArraycopy(src, sOff, dest, dOff, runLen); + dOff += runLen; + + return dOff; + } + + static int writeLen(int len, ByteBuffer dest, int dOff) { + while (len >= 0xFF) { + dest.put(dOff++, (byte) 0xFF); + len -= 0xFF; + } + dest.put(dOff++, (byte) len); + return dOff; + } + + static class Match { + int start, ref, len; + + void fix(int correction) { + start += correction; + ref += correction; + len -= correction; + } + + int end() { + return start + len; + } + } + + static void copyTo(Match m1, Match m2) { + m2.len = m1.len; + m2.start = m1.start; + m2.ref = m1.ref; + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4Compressor.java b/core/src/main/java/net/jpountz/lz4/LZ4Compressor.java new file mode 100644 index 00000000..7feb5fde --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4Compressor.java @@ -0,0 +1,126 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * LZ4 compressor. + *

+ * Instances of this class are thread-safe. + */ +public abstract class LZ4Compressor { + + /** Return the maximum compressed length for an input of size length. */ + @SuppressWarnings("static-method") + public final int maxCompressedLength(int length) { + return LZ4Utils.maxCompressedLength(length); + } + + /** + * Compress src[srcOff:srcOff+srcLen] into + * dest[destOff:destOff+destLen] and return the compressed + * length. + * + * This method will throw a {@link LZ4Exception} if this compressor is unable + * to compress the input into less than maxDestLen bytes. To + * prevent this exception to be thrown, you should make sure that + * maxDestLen >= maxCompressedLength(srcLen). + * + * @throws LZ4Exception if maxDestLen is too small + * @return the compressed size + */ + public abstract int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen); + + /** + * Compress src[srcOff:srcOff+srcLen] into + * dest[destOff:destOff+destLen] and return the compressed + * length. + * + * This method will throw a {@link LZ4Exception} if this compressor is unable + * to compress the input into less than maxDestLen bytes. To + * prevent this exception to be thrown, you should make sure that + * maxDestLen >= maxCompressedLength(srcLen). + * + * {@link ByteBuffer} positions remain unchanged. + * + * @throws LZ4Exception if maxDestLen is too small + * @return the compressed size + */ + public abstract int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen); + + /** + * Convenience method, equivalent to calling + * {@link #compress(byte[], int, int, byte[], int, int) compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}. + */ + public final int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) { + return compress(src, srcOff, srcLen, dest, destOff, dest.length - destOff); + } + + /** + * Convenience method, equivalent to calling + * {@link #compress(byte[], int, int, byte[], int) compress(src, 0, src.length, dest, 0)}. + */ + public final int compress(byte[] src, byte[] dest) { + return compress(src, 0, src.length, dest, 0); + } + + /** + * Convenience method which returns src[srcOff:srcOff+srcLen] + * compressed. + *

Warning: this method has an + * important overhead due to the fact that it needs to allocate a buffer to + * compress into, and then needs to resize this buffer to the actual + * compressed length.

+ *

Here is how this method is implemented:

+ *
+   * final int maxCompressedLength = maxCompressedLength(srcLen);
+   * final byte[] compressed = new byte[maxCompressedLength];
+   * final int compressedLength = compress(src, srcOff, srcLen, compressed, 0);
+   * return Arrays.copyOf(compressed, compressedLength);
+   * 
+ */ + public final byte[] compress(byte[] src, int srcOff, int srcLen) { + final int maxCompressedLength = maxCompressedLength(srcLen); + final byte[] compressed = new byte[maxCompressedLength]; + final int compressedLength = compress(src, srcOff, srcLen, compressed, 0); + return Arrays.copyOf(compressed, compressedLength); + } + + /** + * Convenience method, equivalent to calling + * {@link #compress(byte[], int, int) compress(src, 0, src.length)}. + */ + public final byte[] compress(byte[] src) { + return compress(src, 0, src.length); + } + + /** + * Compress src into dest. Calling this method + * will update the positions of both {@link ByteBuffer}s. + */ + public final void compress(ByteBuffer src, ByteBuffer dest) { + final int cpLen = compress(src, src.position(), src.remaining(), dest, dest.position(), dest.remaining()); + src.position(src.limit()); + dest.position(dest.position() + cpLen); + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4Constants.java b/core/src/main/java/net/jpountz/lz4/LZ4Constants.java new file mode 100644 index 00000000..710007ec --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4Constants.java @@ -0,0 +1,53 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +enum LZ4Constants { + ; + + static final int DEFAULT_COMPRESSION_LEVEL = 8+1; + static final int MAX_COMPRESSION_LEVEL = 16+1; + + static final int MEMORY_USAGE = 14; + static final int NOT_COMPRESSIBLE_DETECTION_LEVEL = 6; + + static final int MIN_MATCH = 4; + + static final int HASH_LOG = MEMORY_USAGE - 2; + static final int HASH_TABLE_SIZE = 1 << HASH_LOG; + + static final int SKIP_STRENGTH = Math.max(NOT_COMPRESSIBLE_DETECTION_LEVEL, 2); + static final int COPY_LENGTH = 8; + static final int LAST_LITERALS = 5; + static final int MF_LIMIT = COPY_LENGTH + MIN_MATCH; + static final int MIN_LENGTH = MF_LIMIT + 1; + + static final int MAX_DISTANCE = 1 << 16; + + static final int ML_BITS = 4; + static final int ML_MASK = (1 << ML_BITS) - 1; + static final int RUN_BITS = 8 - ML_BITS; + static final int RUN_MASK = (1 << RUN_BITS) - 1; + + static final int LZ4_64K_LIMIT = (1 << 16) + (MF_LIMIT - 1); + static final int HASH_LOG_64K = HASH_LOG + 1; + static final int HASH_TABLE_SIZE_64K = 1 << HASH_LOG_64K; + + static final int HASH_LOG_HC = 15; + static final int HASH_TABLE_SIZE_HC = 1 << HASH_LOG_HC; + static final int OPTIMAL_ML = ML_MASK - 1 + MIN_MATCH; + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4Decompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4Decompressor.java new file mode 100644 index 00000000..6b2c1833 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4Decompressor.java @@ -0,0 +1,25 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @deprecated Use {@link LZ4FastDecompressor} instead. + */ +@Deprecated +public interface LZ4Decompressor { + + int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen); + +} \ No newline at end of file diff --git a/core/src/main/java/net/jpountz/lz4/LZ4Exception.java b/core/src/main/java/net/jpountz/lz4/LZ4Exception.java new file mode 100644 index 00000000..cb45c0aa --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4Exception.java @@ -0,0 +1,36 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * LZ4 compression or decompression error. + */ +public class LZ4Exception extends RuntimeException { + + private static final long serialVersionUID = 1L; + + public LZ4Exception(String msg, Throwable t) { + super(msg, t); + } + + public LZ4Exception(String msg) { + super(msg); + } + + public LZ4Exception() { + super(); + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4Factory.java b/core/src/main/java/net/jpountz/lz4/LZ4Factory.java new file mode 100644 index 00000000..7d1cf271 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4Factory.java @@ -0,0 +1,258 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.util.Arrays; + +import net.jpountz.util.Native; +import net.jpountz.util.Utils; +import static net.jpountz.lz4.LZ4Constants.DEFAULT_COMPRESSION_LEVEL; +import static net.jpountz.lz4.LZ4Constants.MAX_COMPRESSION_LEVEL; + +/** + * Entry point for the LZ4 API. + *

+ * This class has 3 instances

    + *
  • a {@link #nativeInstance() native} instance which is a JNI binding to + * the original LZ4 C implementation. + *
  • a {@link #safeInstance() safe Java} instance which is a pure Java port + * of the original C library,
  • + *
  • an {@link #unsafeInstance() unsafe Java} instance which is a Java port + * using the unofficial {@link sun.misc.Unsafe} API. + *
+ *

+ * Only the {@link #safeInstance() safe instance} is guaranteed to work on your + * JVM, as a consequence it is advised to use the {@link #fastestInstance()} or + * {@link #fastestJavaInstance()} to pull a {@link LZ4Factory} instance. + *

+ * All methods from this class are very costly, so you should get an instance + * once, and then reuse it whenever possible. This is typically done by storing + * a {@link LZ4Factory} instance in a static field. + */ +public final class LZ4Factory { + + private static LZ4Factory instance(String impl) { + try { + return new LZ4Factory(impl); + } catch (Exception e) { + throw new AssertionError(e); + } + } + + private static LZ4Factory NATIVE_INSTANCE, + JAVA_UNSAFE_INSTANCE, + JAVA_SAFE_INSTANCE; + + /** + * Return a {@link LZ4Factory} instance that returns compressors and + * decompressors that are native bindings to the original C library. + *

+ * Please note that this instance has some traps you should be aware of:

    + *
  1. Upon loading this instance, files will be written to the temporary + * directory of the system. Although these files are supposed to be deleted + * when the JVM exits, they might remain on systems that don't support + * removal of files being used such as Windows. + *
  2. The instance can only be loaded once per JVM. This can be a problem + * if your application uses multiple class loaders (such as most servlet + * containers): this instance will only be available to the children of the + * class loader which has loaded it. As a consequence, it is advised to + * either not use this instance in webapps or to put this library in the lib + * directory of your servlet container so that it is loaded by the system + * class loader. + *
+ */ + public static synchronized LZ4Factory nativeInstance() { + if (NATIVE_INSTANCE == null) { + NATIVE_INSTANCE = instance("JNI"); + } + return NATIVE_INSTANCE; + } + + /** Return a {@link LZ4Factory} instance that returns compressors and + * decompressors that are written with Java's official API. */ + public static synchronized LZ4Factory safeInstance() { + if (JAVA_SAFE_INSTANCE == null) { + JAVA_SAFE_INSTANCE = instance("JavaSafe"); + } + return JAVA_SAFE_INSTANCE; + } + + /** Return a {@link LZ4Factory} instance that returns compressors and + * decompressors that may use {@link sun.misc.Unsafe} to speed up compression + * and decompression. */ + public static synchronized LZ4Factory unsafeInstance() { + if (JAVA_UNSAFE_INSTANCE == null) { + JAVA_UNSAFE_INSTANCE = instance("JavaUnsafe"); + } + return JAVA_UNSAFE_INSTANCE; + } + + /** + * Return the fastest available {@link LZ4Factory} instance which does not + * rely on JNI bindings. It first tries to load the + * {@link #unsafeInstance() unsafe instance}, and then the + * {@link #safeInstance() safe Java instance} if the JVM doesn't have a + * working {@link sun.misc.Unsafe}. + */ + public static LZ4Factory fastestJavaInstance() { + if (Utils.isUnalignedAccessAllowed()) { + try { + return unsafeInstance(); + } catch (Throwable t) { + return safeInstance(); + } + } else { + return safeInstance(); + } + } + + /** + * Return the fastest available {@link LZ4Factory} instance. If the class + * loader is the system class loader and if the + * {@link #nativeInstance() native instance} loads successfully, then the + * {@link #nativeInstance() native instance} is returned, otherwise the + * {@link #fastestJavaInstance() fastest Java instance} is returned. + *

+ * Please read {@link #nativeInstance() javadocs of nativeInstance()} before + * using this method. + */ + public static LZ4Factory fastestInstance() { + if (Native.isLoaded() + || Native.class.getClassLoader() == ClassLoader.getSystemClassLoader()) { + try { + return nativeInstance(); + } catch (Throwable t) { + return fastestJavaInstance(); + } + } else { + return fastestJavaInstance(); + } + } + + @SuppressWarnings("unchecked") + private static T classInstance(String cls) throws NoSuchFieldException, SecurityException, ClassNotFoundException, IllegalArgumentException, IllegalAccessException { + ClassLoader loader = LZ4Factory.class.getClassLoader(); + loader = loader == null ? ClassLoader.getSystemClassLoader() : loader; + final Class c = loader.loadClass(cls); + Field f = c.getField("INSTANCE"); + return (T) f.get(null); + } + + private final String impl; + private final LZ4Compressor fastCompressor; + private final LZ4Compressor highCompressor; + private final LZ4FastDecompressor fastDecompressor; + private final LZ4SafeDecompressor safeDecompressor; + private final LZ4Compressor[] highCompressors = new LZ4Compressor[MAX_COMPRESSION_LEVEL+1]; + + private LZ4Factory(String impl) throws ClassNotFoundException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException, NoSuchMethodException, InstantiationException, InvocationTargetException { + this.impl = impl; + fastCompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "Compressor"); + highCompressor = classInstance("net.jpountz.lz4.LZ4HC" + impl + "Compressor"); + fastDecompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "FastDecompressor"); + safeDecompressor = classInstance("net.jpountz.lz4.LZ4" + impl + "SafeDecompressor"); + Constructor highConstructor = highCompressor.getClass().getDeclaredConstructor(int.class); + highCompressors[DEFAULT_COMPRESSION_LEVEL] = highCompressor; + for(int level = 1; level <= MAX_COMPRESSION_LEVEL; level++) { + if(level == DEFAULT_COMPRESSION_LEVEL) continue; + highCompressors[level] = highConstructor.newInstance(level); + } + + // quickly test that everything works as expected + final byte[] original = new byte[] {'a','b','c','d',' ',' ',' ',' ',' ',' ','a','b','c','d','e','f','g','h','i','j'}; + for (LZ4Compressor compressor : Arrays.asList(fastCompressor, highCompressor)) { + final int maxCompressedLength = compressor.maxCompressedLength(original.length); + final byte[] compressed = new byte[maxCompressedLength]; + final int compressedLength = compressor.compress(original, 0, original.length, compressed, 0, maxCompressedLength); + final byte[] restored = new byte[original.length]; + fastDecompressor.decompress(compressed, 0, restored, 0, original.length); + if (!Arrays.equals(original, restored)) { + throw new AssertionError(); + } + Arrays.fill(restored, (byte) 0); + final int decompressedLength = safeDecompressor.decompress(compressed, 0, compressedLength, restored, 0); + if (decompressedLength != original.length || !Arrays.equals(original, restored)) { + throw new AssertionError(); + } + } + + } + + /** Return a blazing fast {@link LZ4Compressor}. */ + public LZ4Compressor fastCompressor() { + return fastCompressor; + } + + /** Return a {@link LZ4Compressor} which requires more memory than + * {@link #fastCompressor()} and is slower but compresses more efficiently. */ + public LZ4Compressor highCompressor() { + return highCompressor; + } + + /** Return a {@link LZ4Compressor} which requires more memory than + * {@link #fastCompressor()} and is slower but compresses more efficiently. + * The compression level can be customized. + *

For current implementations, the following is true about compression level:

    + *
  1. It should be in range [1, 17]
  2. + *
  3. A compression level higher than 17 would be treated as 17.
  4. + *
  5. A compression level lower than 1 would be treated as 9.
  6. + *

+ */ + public LZ4Compressor highCompressor(int compressionLevel) { + if(compressionLevel > MAX_COMPRESSION_LEVEL) { + compressionLevel = MAX_COMPRESSION_LEVEL; + } else if(compressionLevel < 1) { + compressionLevel = DEFAULT_COMPRESSION_LEVEL; + } + return highCompressors[compressionLevel]; + } + + /** Return a {@link LZ4FastDecompressor} instance. */ + public LZ4FastDecompressor fastDecompressor() { + return fastDecompressor; + } + + /** Return a {@link LZ4SafeDecompressor} instance. */ + public LZ4SafeDecompressor safeDecompressor() { + return safeDecompressor; + } + + /** Return a {@link LZ4UnknownSizeDecompressor} instance. + * @deprecated use {@link #safeDecompressor()} */ + public LZ4UnknownSizeDecompressor unknownSizeDecompressor() { + return safeDecompressor(); + } + + /** Return a {@link LZ4Decompressor} instance. + * @deprecated use {@link #fastDecompressor()} */ + public LZ4Decompressor decompressor() { + return fastDecompressor(); + } + + /** Prints the fastest instance. */ + public static void main(String[] args) { + System.out.println("Fastest instance is " + fastestInstance()); + System.out.println("Fastest Java instance is " + fastestJavaInstance()); + } + + @Override + public String toString() { + return getClass().getSimpleName() + ":" + impl; + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4FastDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4FastDecompressor.java new file mode 100644 index 00000000..8427cc6e --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4FastDecompressor.java @@ -0,0 +1,106 @@ +package net.jpountz.lz4; + +import java.nio.ByteBuffer; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * LZ4 decompressor that requires the size of the original input to be known. + * Use {@link LZ4SafeDecompressor} if you only know the size of the + * compressed stream. + *

+ * Instances of this class are thread-safe. + */ +public abstract class LZ4FastDecompressor implements LZ4Decompressor { + + /** Decompress src[srcOff:] into dest[destOff:destOff+destLen] + * and return the number of bytes read from src. + * destLen must be exactly the size of the decompressed data. + * + * @param destLen the exact size of the original input + * @return the number of bytes read to restore the original input + */ + public abstract int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen); + + /** Decompress src[srcOff:] into dest[destOff:destOff+destLen] + * and return the number of bytes read from src. + * destLen must be exactly the size of the decompressed data. + * The positions and limits of the {@link ByteBuffer}s remain unchanged. + * + * @param destLen the exact size of the original input + * @return the number of bytes read to restore the original input + */ + public abstract int decompress(ByteBuffer src, int srcOff, ByteBuffer dest, int destOff, int destLen); + + /** + * Convenience method, equivalent to calling + * {@link #decompress(byte[], int, byte[], int, int) decompress(src, 0, dest, 0, destLen)}. + */ + public final int decompress(byte[] src, byte[] dest, int destLen) { + return decompress(src, 0, dest, 0, destLen); + } + + /** + * Convenience method, equivalent to calling + * {@link #decompress(byte[], byte[], int) decompress(src, dest, dest.length)}. + */ + public final int decompress(byte[] src, byte[] dest) { + return decompress(src, dest, dest.length); + } + + /** + * Convenience method which returns src[srcOff:?] + * decompressed. + *

Warning: this method has an + * important overhead due to the fact that it needs to allocate a buffer to + * decompress into.

+ *

Here is how this method is implemented:

+ *
+   * final byte[] decompressed = new byte[destLen];
+   * decompress(src, srcOff, decompressed, 0, destLen);
+   * return decompressed;
+   * 
+ */ + public final byte[] decompress(byte[] src, int srcOff, int destLen) { + final byte[] decompressed = new byte[destLen]; + decompress(src, srcOff, decompressed, 0, destLen); + return decompressed; + } + + /** + * Convenience method, equivalent to calling + * {@link #decompress(byte[], int, int) decompress(src, 0, destLen)}. + */ + public final byte[] decompress(byte[] src, int destLen) { + return decompress(src, 0, destLen); + } + + /** + * Decompress src into dest. dest's + * {@link ByteBuffer#remaining()} must be exactly the size of the decompressed + * data. This method moves the positions of the buffers. + */ + public final void decompress(ByteBuffer src, ByteBuffer dest) { + final int read = decompress(src, src.position(), dest, dest.position(), dest.remaining()); + dest.position(dest.limit()); + src.position(src.position() + read); + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4HCJNICompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4HCJNICompressor.java new file mode 100644 index 00000000..da721e31 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4HCJNICompressor.java @@ -0,0 +1,88 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.lz4.LZ4Constants.DEFAULT_COMPRESSION_LEVEL; + +import java.nio.ByteBuffer; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.SafeUtils; + +/** + * High compression {@link LZ4Compressor}s implemented with JNI bindings to the + * original C implementation of LZ4. + */ +final class LZ4HCJNICompressor extends LZ4Compressor { + + public static final LZ4HCJNICompressor INSTANCE = new LZ4HCJNICompressor(); + private static LZ4Compressor SAFE_INSTANCE; + + private final int compressionLevel; + + LZ4HCJNICompressor() { this(DEFAULT_COMPRESSION_LEVEL); } + LZ4HCJNICompressor(int compressionLevel) { + this.compressionLevel = compressionLevel; + } + + @Override + public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) { + SafeUtils.checkRange(src, srcOff, srcLen); + SafeUtils.checkRange(dest, destOff, maxDestLen); + final int result = LZ4JNI.LZ4_compressHC(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen, compressionLevel); + if (result <= 0) { + throw new LZ4Exception(); + } + return result; + } + + @Override + public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) { + ByteBufferUtils.checkNotReadOnly(dest); + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, maxDestLen); + + if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) { + byte[] srcArr = null, destArr = null; + ByteBuffer srcBuf = null, destBuf = null; + if (src.hasArray()) { + srcArr = src.array(); + srcOff += src.arrayOffset(); + } else { + assert src.isDirect(); + srcBuf = src; + } + if (dest.hasArray()) { + destArr = dest.array(); + destOff += dest.arrayOffset(); + } else { + assert dest.isDirect(); + destBuf = dest; + } + + final int result = LZ4JNI.LZ4_compressHC(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen, compressionLevel); + if (result <= 0) { + throw new LZ4Exception(); + } + return result; + } else { + LZ4Compressor safeInstance = SAFE_INSTANCE; + if (safeInstance == null) { + safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().highCompressor(compressionLevel); + } + return safeInstance.compress(src, srcOff, srcLen, dest, destOff, maxDestLen); + } + } +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4HCJavaSafeCompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4HCJavaSafeCompressor.java new file mode 100644 index 00000000..f4ec3bd8 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4HCJavaSafeCompressor.java @@ -0,0 +1,550 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; +import static net.jpountz.lz4.LZ4Utils.*; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import net.jpountz.lz4.LZ4Utils.Match; +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.SafeUtils; + +/** + * High compression compressor. + */ +final class LZ4HCJavaSafeCompressor extends LZ4Compressor { + + public static final LZ4Compressor INSTANCE = new LZ4HCJavaSafeCompressor(); + + private final int maxAttempts; + final int compressionLevel; + + LZ4HCJavaSafeCompressor() { this(DEFAULT_COMPRESSION_LEVEL); } + LZ4HCJavaSafeCompressor(int compressionLevel) { + this.maxAttempts = 1<<(compressionLevel-1); + this.compressionLevel = compressionLevel; + } + + private class HashTable { + static final int MASK = MAX_DISTANCE - 1; + int nextToUpdate; + private final int base; + private final int[] hashTable; + private final short[] chainTable; + + HashTable(int base) { + this.base = base; + nextToUpdate = base; + hashTable = new int[HASH_TABLE_SIZE_HC]; + Arrays.fill(hashTable, -1); + chainTable = new short[MAX_DISTANCE]; + } + + private int hashPointer(byte[] bytes, int off) { + final int v = SafeUtils.readInt(bytes, off); + return hashPointer(v); + } + + private int hashPointer(ByteBuffer bytes, int off) { + final int v = ByteBufferUtils.readInt(bytes, off); + return hashPointer(v); + } + + private int hashPointer(int v) { + final int h = hashHC(v); + return hashTable[h]; + } + + private int next(int off) { + return off - (chainTable[off & MASK] & 0xFFFF); + } + + private void addHash(byte[] bytes, int off) { + final int v = SafeUtils.readInt(bytes, off); + addHash(v, off); + } + + private void addHash(ByteBuffer bytes, int off) { + final int v = ByteBufferUtils.readInt(bytes, off); + addHash(v, off); + } + + private void addHash(int v, int off) { + final int h = hashHC(v); + int delta = off - hashTable[h]; + assert delta > 0 : delta; + if (delta >= MAX_DISTANCE) { + delta = MAX_DISTANCE - 1; + } + chainTable[off & MASK] = (short) delta; + hashTable[h] = off; + } + + void insert(int off, byte[] bytes) { + for (; nextToUpdate < off; ++nextToUpdate) { + addHash(bytes, nextToUpdate); + } + } + + void insert(int off, ByteBuffer bytes) { + for (; nextToUpdate < off; ++nextToUpdate) { + addHash(bytes, nextToUpdate); + } + } + + + + boolean insertAndFindBestMatch(byte[] buf, int off, int matchLimit, Match match) { + match.start = off; + match.len = 0; + int delta = 0; + int repl = 0; + + insert(off, buf); + + int ref = hashPointer(buf, off); + + if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition + if (LZ4SafeUtils.readIntEquals(buf, ref, off)) { // confirmed + delta = off - ref; + repl = match.len = MIN_MATCH + LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + match.ref = ref; + } + ref = next(ref); + } + + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4SafeUtils.readIntEquals(buf, ref, off)) { + final int matchLen = MIN_MATCH + LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + if (matchLen > match.len) { + match.ref = ref; + match.len = matchLen; + } + } + ref = next(ref); + } + + if (repl != 0) { + int ptr = off; + final int end = off + repl - (MIN_MATCH - 1); + while (ptr < end - delta) { + chainTable[ptr & MASK] = (short) delta; // pre load + ++ptr; + } + do { + chainTable[ptr & MASK] = (short) delta; + hashTable[hashHC(SafeUtils.readInt(buf, ptr))] = ptr; + ++ptr; + } while (ptr < end); + nextToUpdate = end; + } + + return match.len != 0; + } + + boolean insertAndFindWiderMatch(byte[] buf, int off, int startLimit, int matchLimit, int minLen, Match match) { + match.len = minLen; + + insert(off, buf); + + final int delta = off - startLimit; + int ref = hashPointer(buf, off); + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4SafeUtils.readIntEquals(buf, ref, off)) { + final int matchLenForward = MIN_MATCH +LZ4SafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + final int matchLenBackward = LZ4SafeUtils.commonBytesBackward(buf, ref, off, base, startLimit); + final int matchLen = matchLenBackward + matchLenForward; + if (matchLen > match.len) { + match.len = matchLen; + match.ref = ref - matchLenBackward; + match.start = off - matchLenBackward; + } + } + ref = next(ref); + } + + return match.len > minLen; + } + + + boolean insertAndFindBestMatch(ByteBuffer buf, int off, int matchLimit, Match match) { + match.start = off; + match.len = 0; + int delta = 0; + int repl = 0; + + insert(off, buf); + + int ref = hashPointer(buf, off); + + if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition + if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { // confirmed + delta = off - ref; + repl = match.len = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + match.ref = ref; + } + ref = next(ref); + } + + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { + final int matchLen = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + if (matchLen > match.len) { + match.ref = ref; + match.len = matchLen; + } + } + ref = next(ref); + } + + if (repl != 0) { + int ptr = off; + final int end = off + repl - (MIN_MATCH - 1); + while (ptr < end - delta) { + chainTable[ptr & MASK] = (short) delta; // pre load + ++ptr; + } + do { + chainTable[ptr & MASK] = (short) delta; + hashTable[hashHC(ByteBufferUtils.readInt(buf, ptr))] = ptr; + ++ptr; + } while (ptr < end); + nextToUpdate = end; + } + + return match.len != 0; + } + + boolean insertAndFindWiderMatch(ByteBuffer buf, int off, int startLimit, int matchLimit, int minLen, Match match) { + match.len = minLen; + + insert(off, buf); + + final int delta = off - startLimit; + int ref = hashPointer(buf, off); + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { + final int matchLenForward = MIN_MATCH +LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + final int matchLenBackward = LZ4ByteBufferUtils.commonBytesBackward(buf, ref, off, base, startLimit); + final int matchLen = matchLenBackward + matchLenForward; + if (matchLen > match.len) { + match.len = matchLen; + match.ref = ref - matchLenBackward; + match.start = off - matchLenBackward; + } + } + ref = next(ref); + } + + return match.len > minLen; + } + + + } + + @Override + public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) { + + SafeUtils.checkRange(src, srcOff, srcLen); + SafeUtils.checkRange(dest, destOff, maxDestLen); + + final int srcEnd = srcOff + srcLen; + final int destEnd = destOff + maxDestLen; + final int mfLimit = srcEnd - MF_LIMIT; + final int matchLimit = srcEnd - LAST_LITERALS; + + int sOff = srcOff; + int dOff = destOff; + int anchor = sOff++; + + final HashTable ht = new HashTable(srcOff); + final Match match0 = new Match(); + final Match match1 = new Match(); + final Match match2 = new Match(); + final Match match3 = new Match(); + + main: + while (sOff < mfLimit) { + if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) { + ++sOff; + continue; + } + + // saved, in case we would skip too much + copyTo(match1, match0); + + search2: + while (true) { + assert match1.start >= anchor; + if (match1.end() >= mfLimit + || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) { + // no better match + dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + continue main; + } + + if (match0.start < match1.start) { + if (match2.start < match1.start + match0.len) { // empirical + copyTo(match0, match1); + } + } + assert match2.start > match1.start; + + if (match2.start - match1.start < 3) { // First Match too small : removed + copyTo(match2, match1); + continue search2; + } + + search3: + while (true) { + if (match2.start - match1.start < OPTIMAL_ML) { + int newMatchLen = match1.len; + if (newMatchLen > OPTIMAL_ML) { + newMatchLen = OPTIMAL_ML; + } + if (match1.start + newMatchLen > match2.end() - MIN_MATCH) { + newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH; + } + final int correction = newMatchLen - (match2.start - match1.start); + if (correction > 0) { + match2.fix(correction); + } + } + + if (match2.start + match2.len >= mfLimit + || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) { + // no better match -> 2 sequences to encode + if (match2.start < match1.end()) { + match1.len = match2.start - match1.start; + } + // encode seq 1 + dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + // encode seq 2 + dOff = LZ4SafeUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd); + anchor = sOff = match2.end(); + continue main; + } + + if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it + if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 + if (match2.start < match1.end()) { + final int correction = match1.end() - match2.start; + match2.fix(correction); + if (match2.len < MIN_MATCH) { + copyTo(match3, match2); + } + } + + dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match3, match1); + copyTo(match2, match0); + + continue search2; + } + + copyTo(match3, match2); + continue search3; + } + + // OK, now we have 3 ascending matches; let's write at least the first one + if (match2.start < match1.end()) { + if (match2.start - match1.start < ML_MASK) { + if (match1.len > OPTIMAL_ML) { + match1.len = OPTIMAL_ML; + } + if (match1.end() > match2.end() - MIN_MATCH) { + match1.len = match2.end() - match1.start - MIN_MATCH; + } + final int correction = match1.end() - match2.start; + match2.fix(correction); + } else { + match1.len = match2.start - match1.start; + } + } + + dOff = LZ4SafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match2, match1); + copyTo(match3, match2); + + continue search3; + } + + } + + } + + dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + + @Override + public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) { + + if (src.hasArray() && dest.hasArray()) { + return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, maxDestLen); + + final int srcEnd = srcOff + srcLen; + final int destEnd = destOff + maxDestLen; + final int mfLimit = srcEnd - MF_LIMIT; + final int matchLimit = srcEnd - LAST_LITERALS; + + int sOff = srcOff; + int dOff = destOff; + int anchor = sOff++; + + final HashTable ht = new HashTable(srcOff); + final Match match0 = new Match(); + final Match match1 = new Match(); + final Match match2 = new Match(); + final Match match3 = new Match(); + + main: + while (sOff < mfLimit) { + if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) { + ++sOff; + continue; + } + + // saved, in case we would skip too much + copyTo(match1, match0); + + search2: + while (true) { + assert match1.start >= anchor; + if (match1.end() >= mfLimit + || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) { + // no better match + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + continue main; + } + + if (match0.start < match1.start) { + if (match2.start < match1.start + match0.len) { // empirical + copyTo(match0, match1); + } + } + assert match2.start > match1.start; + + if (match2.start - match1.start < 3) { // First Match too small : removed + copyTo(match2, match1); + continue search2; + } + + search3: + while (true) { + if (match2.start - match1.start < OPTIMAL_ML) { + int newMatchLen = match1.len; + if (newMatchLen > OPTIMAL_ML) { + newMatchLen = OPTIMAL_ML; + } + if (match1.start + newMatchLen > match2.end() - MIN_MATCH) { + newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH; + } + final int correction = newMatchLen - (match2.start - match1.start); + if (correction > 0) { + match2.fix(correction); + } + } + + if (match2.start + match2.len >= mfLimit + || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) { + // no better match -> 2 sequences to encode + if (match2.start < match1.end()) { + match1.len = match2.start - match1.start; + } + // encode seq 1 + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + // encode seq 2 + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd); + anchor = sOff = match2.end(); + continue main; + } + + if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it + if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 + if (match2.start < match1.end()) { + final int correction = match1.end() - match2.start; + match2.fix(correction); + if (match2.len < MIN_MATCH) { + copyTo(match3, match2); + } + } + + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match3, match1); + copyTo(match2, match0); + + continue search2; + } + + copyTo(match3, match2); + continue search3; + } + + // OK, now we have 3 ascending matches; let's write at least the first one + if (match2.start < match1.end()) { + if (match2.start - match1.start < ML_MASK) { + if (match1.len > OPTIMAL_ML) { + match1.len = OPTIMAL_ML; + } + if (match1.end() > match2.end() - MIN_MATCH) { + match1.len = match2.end() - match1.start - MIN_MATCH; + } + final int correction = match1.end() - match2.start; + match2.fix(correction); + } else { + match1.len = match2.start - match1.start; + } + } + + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match2, match1); + copyTo(match3, match2); + + continue search3; + } + + } + + } + + dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4HCJavaUnsafeCompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4HCJavaUnsafeCompressor.java new file mode 100644 index 00000000..0f503221 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4HCJavaUnsafeCompressor.java @@ -0,0 +1,550 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; +import static net.jpountz.lz4.LZ4Utils.*; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import net.jpountz.lz4.LZ4Utils.Match; +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.UnsafeUtils; + +/** + * High compression compressor. + */ +final class LZ4HCJavaUnsafeCompressor extends LZ4Compressor { + + public static final LZ4Compressor INSTANCE = new LZ4HCJavaUnsafeCompressor(); + + private final int maxAttempts; + final int compressionLevel; + + LZ4HCJavaUnsafeCompressor() { this(DEFAULT_COMPRESSION_LEVEL); } + LZ4HCJavaUnsafeCompressor(int compressionLevel) { + this.maxAttempts = 1<<(compressionLevel-1); + this.compressionLevel = compressionLevel; + } + + private class HashTable { + static final int MASK = MAX_DISTANCE - 1; + int nextToUpdate; + private final int base; + private final int[] hashTable; + private final short[] chainTable; + + HashTable(int base) { + this.base = base; + nextToUpdate = base; + hashTable = new int[HASH_TABLE_SIZE_HC]; + Arrays.fill(hashTable, -1); + chainTable = new short[MAX_DISTANCE]; + } + + private int hashPointer(byte[] bytes, int off) { + final int v = UnsafeUtils.readInt(bytes, off); + return hashPointer(v); + } + + private int hashPointer(ByteBuffer bytes, int off) { + final int v = ByteBufferUtils.readInt(bytes, off); + return hashPointer(v); + } + + private int hashPointer(int v) { + final int h = hashHC(v); + return hashTable[h]; + } + + private int next(int off) { + return off - (chainTable[off & MASK] & 0xFFFF); + } + + private void addHash(byte[] bytes, int off) { + final int v = UnsafeUtils.readInt(bytes, off); + addHash(v, off); + } + + private void addHash(ByteBuffer bytes, int off) { + final int v = ByteBufferUtils.readInt(bytes, off); + addHash(v, off); + } + + private void addHash(int v, int off) { + final int h = hashHC(v); + int delta = off - hashTable[h]; + assert delta > 0 : delta; + if (delta >= MAX_DISTANCE) { + delta = MAX_DISTANCE - 1; + } + chainTable[off & MASK] = (short) delta; + hashTable[h] = off; + } + + void insert(int off, byte[] bytes) { + for (; nextToUpdate < off; ++nextToUpdate) { + addHash(bytes, nextToUpdate); + } + } + + void insert(int off, ByteBuffer bytes) { + for (; nextToUpdate < off; ++nextToUpdate) { + addHash(bytes, nextToUpdate); + } + } + + + + boolean insertAndFindBestMatch(byte[] buf, int off, int matchLimit, Match match) { + match.start = off; + match.len = 0; + int delta = 0; + int repl = 0; + + insert(off, buf); + + int ref = hashPointer(buf, off); + + if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition + if (LZ4UnsafeUtils.readIntEquals(buf, ref, off)) { // confirmed + delta = off - ref; + repl = match.len = MIN_MATCH + LZ4UnsafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + match.ref = ref; + } + ref = next(ref); + } + + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4UnsafeUtils.readIntEquals(buf, ref, off)) { + final int matchLen = MIN_MATCH + LZ4UnsafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + if (matchLen > match.len) { + match.ref = ref; + match.len = matchLen; + } + } + ref = next(ref); + } + + if (repl != 0) { + int ptr = off; + final int end = off + repl - (MIN_MATCH - 1); + while (ptr < end - delta) { + chainTable[ptr & MASK] = (short) delta; // pre load + ++ptr; + } + do { + chainTable[ptr & MASK] = (short) delta; + hashTable[hashHC(UnsafeUtils.readInt(buf, ptr))] = ptr; + ++ptr; + } while (ptr < end); + nextToUpdate = end; + } + + return match.len != 0; + } + + boolean insertAndFindWiderMatch(byte[] buf, int off, int startLimit, int matchLimit, int minLen, Match match) { + match.len = minLen; + + insert(off, buf); + + final int delta = off - startLimit; + int ref = hashPointer(buf, off); + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4UnsafeUtils.readIntEquals(buf, ref, off)) { + final int matchLenForward = MIN_MATCH +LZ4UnsafeUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + final int matchLenBackward = LZ4UnsafeUtils.commonBytesBackward(buf, ref, off, base, startLimit); + final int matchLen = matchLenBackward + matchLenForward; + if (matchLen > match.len) { + match.len = matchLen; + match.ref = ref - matchLenBackward; + match.start = off - matchLenBackward; + } + } + ref = next(ref); + } + + return match.len > minLen; + } + + + boolean insertAndFindBestMatch(ByteBuffer buf, int off, int matchLimit, Match match) { + match.start = off; + match.len = 0; + int delta = 0; + int repl = 0; + + insert(off, buf); + + int ref = hashPointer(buf, off); + + if (ref >= off - 4 && ref <= off && ref >= base) { // potential repetition + if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { // confirmed + delta = off - ref; + repl = match.len = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + match.ref = ref; + } + ref = next(ref); + } + + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { + final int matchLen = MIN_MATCH + LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + if (matchLen > match.len) { + match.ref = ref; + match.len = matchLen; + } + } + ref = next(ref); + } + + if (repl != 0) { + int ptr = off; + final int end = off + repl - (MIN_MATCH - 1); + while (ptr < end - delta) { + chainTable[ptr & MASK] = (short) delta; // pre load + ++ptr; + } + do { + chainTable[ptr & MASK] = (short) delta; + hashTable[hashHC(ByteBufferUtils.readInt(buf, ptr))] = ptr; + ++ptr; + } while (ptr < end); + nextToUpdate = end; + } + + return match.len != 0; + } + + boolean insertAndFindWiderMatch(ByteBuffer buf, int off, int startLimit, int matchLimit, int minLen, Match match) { + match.len = minLen; + + insert(off, buf); + + final int delta = off - startLimit; + int ref = hashPointer(buf, off); + for (int i = 0; i < maxAttempts; ++i) { + if (ref < Math.max(base, off - MAX_DISTANCE + 1) || ref > off) { + break; + } + if (LZ4ByteBufferUtils.readIntEquals(buf, ref, off)) { + final int matchLenForward = MIN_MATCH +LZ4ByteBufferUtils.commonBytes(buf, ref + MIN_MATCH, off + MIN_MATCH, matchLimit); + final int matchLenBackward = LZ4ByteBufferUtils.commonBytesBackward(buf, ref, off, base, startLimit); + final int matchLen = matchLenBackward + matchLenForward; + if (matchLen > match.len) { + match.len = matchLen; + match.ref = ref - matchLenBackward; + match.start = off - matchLenBackward; + } + } + ref = next(ref); + } + + return match.len > minLen; + } + + + } + + @Override + public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) { + + UnsafeUtils.checkRange(src, srcOff, srcLen); + UnsafeUtils.checkRange(dest, destOff, maxDestLen); + + final int srcEnd = srcOff + srcLen; + final int destEnd = destOff + maxDestLen; + final int mfLimit = srcEnd - MF_LIMIT; + final int matchLimit = srcEnd - LAST_LITERALS; + + int sOff = srcOff; + int dOff = destOff; + int anchor = sOff++; + + final HashTable ht = new HashTable(srcOff); + final Match match0 = new Match(); + final Match match1 = new Match(); + final Match match2 = new Match(); + final Match match3 = new Match(); + + main: + while (sOff < mfLimit) { + if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) { + ++sOff; + continue; + } + + // saved, in case we would skip too much + copyTo(match1, match0); + + search2: + while (true) { + assert match1.start >= anchor; + if (match1.end() >= mfLimit + || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) { + // no better match + dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + continue main; + } + + if (match0.start < match1.start) { + if (match2.start < match1.start + match0.len) { // empirical + copyTo(match0, match1); + } + } + assert match2.start > match1.start; + + if (match2.start - match1.start < 3) { // First Match too small : removed + copyTo(match2, match1); + continue search2; + } + + search3: + while (true) { + if (match2.start - match1.start < OPTIMAL_ML) { + int newMatchLen = match1.len; + if (newMatchLen > OPTIMAL_ML) { + newMatchLen = OPTIMAL_ML; + } + if (match1.start + newMatchLen > match2.end() - MIN_MATCH) { + newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH; + } + final int correction = newMatchLen - (match2.start - match1.start); + if (correction > 0) { + match2.fix(correction); + } + } + + if (match2.start + match2.len >= mfLimit + || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) { + // no better match -> 2 sequences to encode + if (match2.start < match1.end()) { + match1.len = match2.start - match1.start; + } + // encode seq 1 + dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + // encode seq 2 + dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd); + anchor = sOff = match2.end(); + continue main; + } + + if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it + if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 + if (match2.start < match1.end()) { + final int correction = match1.end() - match2.start; + match2.fix(correction); + if (match2.len < MIN_MATCH) { + copyTo(match3, match2); + } + } + + dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match3, match1); + copyTo(match2, match0); + + continue search2; + } + + copyTo(match3, match2); + continue search3; + } + + // OK, now we have 3 ascending matches; let's write at least the first one + if (match2.start < match1.end()) { + if (match2.start - match1.start < ML_MASK) { + if (match1.len > OPTIMAL_ML) { + match1.len = OPTIMAL_ML; + } + if (match1.end() > match2.end() - MIN_MATCH) { + match1.len = match2.end() - match1.start - MIN_MATCH; + } + final int correction = match1.end() - match2.start; + match2.fix(correction); + } else { + match1.len = match2.start - match1.start; + } + } + + dOff = LZ4UnsafeUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match2, match1); + copyTo(match3, match2); + + continue search3; + } + + } + + } + + dOff = LZ4UnsafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + + @Override + public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) { + + if (src.hasArray() && dest.hasArray()) { + return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, maxDestLen); + + final int srcEnd = srcOff + srcLen; + final int destEnd = destOff + maxDestLen; + final int mfLimit = srcEnd - MF_LIMIT; + final int matchLimit = srcEnd - LAST_LITERALS; + + int sOff = srcOff; + int dOff = destOff; + int anchor = sOff++; + + final HashTable ht = new HashTable(srcOff); + final Match match0 = new Match(); + final Match match1 = new Match(); + final Match match2 = new Match(); + final Match match3 = new Match(); + + main: + while (sOff < mfLimit) { + if (!ht.insertAndFindBestMatch(src, sOff, matchLimit, match1)) { + ++sOff; + continue; + } + + // saved, in case we would skip too much + copyTo(match1, match0); + + search2: + while (true) { + assert match1.start >= anchor; + if (match1.end() >= mfLimit + || !ht.insertAndFindWiderMatch(src, match1.end() - 2, match1.start + 1, matchLimit, match1.len, match2)) { + // no better match + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + continue main; + } + + if (match0.start < match1.start) { + if (match2.start < match1.start + match0.len) { // empirical + copyTo(match0, match1); + } + } + assert match2.start > match1.start; + + if (match2.start - match1.start < 3) { // First Match too small : removed + copyTo(match2, match1); + continue search2; + } + + search3: + while (true) { + if (match2.start - match1.start < OPTIMAL_ML) { + int newMatchLen = match1.len; + if (newMatchLen > OPTIMAL_ML) { + newMatchLen = OPTIMAL_ML; + } + if (match1.start + newMatchLen > match2.end() - MIN_MATCH) { + newMatchLen = match2.start - match1.start + match2.len - MIN_MATCH; + } + final int correction = newMatchLen - (match2.start - match1.start); + if (correction > 0) { + match2.fix(correction); + } + } + + if (match2.start + match2.len >= mfLimit + || !ht.insertAndFindWiderMatch(src, match2.end() - 3, match2.start, matchLimit, match2.len, match3)) { + // no better match -> 2 sequences to encode + if (match2.start < match1.end()) { + match1.len = match2.start - match1.start; + } + // encode seq 1 + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + // encode seq 2 + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match2.start, match2.ref, match2.len, dest, dOff, destEnd); + anchor = sOff = match2.end(); + continue main; + } + + if (match3.start < match1.end() + 3) { // Not enough space for match 2 : remove it + if (match3.start >= match1.end()) { // // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 + if (match2.start < match1.end()) { + final int correction = match1.end() - match2.start; + match2.fix(correction); + if (match2.len < MIN_MATCH) { + copyTo(match3, match2); + } + } + + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match3, match1); + copyTo(match2, match0); + + continue search2; + } + + copyTo(match3, match2); + continue search3; + } + + // OK, now we have 3 ascending matches; let's write at least the first one + if (match2.start < match1.end()) { + if (match2.start - match1.start < ML_MASK) { + if (match1.len > OPTIMAL_ML) { + match1.len = OPTIMAL_ML; + } + if (match1.end() > match2.end() - MIN_MATCH) { + match1.len = match2.end() - match1.start - MIN_MATCH; + } + final int correction = match1.end() - match2.start; + match2.fix(correction); + } else { + match1.len = match2.start - match1.start; + } + } + + dOff = LZ4ByteBufferUtils.encodeSequence(src, anchor, match1.start, match1.ref, match1.len, dest, dOff, destEnd); + anchor = sOff = match1.end(); + + copyTo(match2, match1); + copyTo(match3, match2); + + continue search3; + } + + } + + } + + dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4InputStream.java b/core/src/main/java/net/jpountz/lz4/LZ4InputStream.java new file mode 100644 index 00000000..012c8071 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4InputStream.java @@ -0,0 +1,133 @@ +package net.jpountz.lz4; + +import java.io.IOException; +import java.io.InputStream; + +public class LZ4InputStream extends InputStream { + + private static LZ4Factory factory = LZ4Factory.fastestInstance(); + + private final InputStream inputStream; + private final LZ4Decompressor decompressor; + + private byte compressedBuffer[] = new byte[1048576]; + private byte decompressedBuffer[] = new byte[1048576]; + private int decompressedBufferPosition = 0; + private int decompressedBufferLength = 0; + + public LZ4InputStream(InputStream stream) { + this.decompressor = factory.decompressor(); + this.inputStream = stream; + } + + @Override + public void close() throws IOException { + inputStream.close(); + } + + @Override + public int read() throws IOException { + if (ensureBytesAvailableInDecompressedBuffer()) + return decompressedBuffer[decompressedBufferPosition++] & 0xFF; + + return -1; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (!ensureBytesAvailableInDecompressedBuffer()) + return -1; + + int numBytesRemainingToRead = len - off; + + while (numBytesRemainingToRead > 0 && ensureBytesAvailableInDecompressedBuffer()) { + int numBytesToRead = numBytesRemainingToRead; + int numBytesRemainingInBlock = decompressedBufferLength - decompressedBufferPosition; + if (numBytesToRead > numBytesRemainingInBlock) { + numBytesToRead = numBytesRemainingInBlock; + } + + System.arraycopy(decompressedBuffer, decompressedBufferPosition, b, off, numBytesToRead); + + decompressedBufferPosition += numBytesToRead; + off += numBytesToRead; + numBytesRemainingToRead -= numBytesToRead; + } + + return len - numBytesRemainingToRead; + } + + @Override + public long skip(long n) throws IOException { + long numBytesRemainingToSkip = n; + + while (numBytesRemainingToSkip > 0 && ensureBytesAvailableInDecompressedBuffer()) { + long numBytesToSkip = numBytesRemainingToSkip; + int numBytesRemainingInBlock = decompressedBufferLength - decompressedBufferPosition; + if (numBytesToSkip > numBytesRemainingInBlock) { + numBytesToSkip = numBytesRemainingInBlock; + } + + numBytesRemainingToSkip -= numBytesToSkip; + decompressedBufferPosition += numBytesToSkip; + } + + return n - numBytesRemainingToSkip; + } + + private boolean ensureBytesAvailableInDecompressedBuffer() throws IOException { + while (decompressedBufferPosition >= decompressedBufferLength) { + if (!fillBuffer()) { + return false; + } + } + + return true; + } + + private boolean fillBuffer() throws IOException { + decompressedBufferLength = LZ4StreamHelper.readLength(inputStream); + int compressedBufferLength = LZ4StreamHelper.readLength(inputStream); + + if (blockHeadersIndicateNoMoreData(compressedBufferLength, decompressedBufferLength)) { + return false; + } + + ensureBufferCapacity(compressedBufferLength, decompressedBufferLength); + + if (fillCompressedBuffer(compressedBufferLength)) { + decompressor.decompress(compressedBuffer, 0, decompressedBuffer, 0, decompressedBufferLength); + decompressedBufferPosition = 0; + return true; + } + + return false; + } + + private boolean blockHeadersIndicateNoMoreData(int compressedBufferLength, int decompressedBufferLength) { + return compressedBufferLength < 0 || decompressedBufferLength < 0; + } + + private boolean fillCompressedBuffer(int compressedBufferLength) throws IOException { + int bytesRead = 0; + while (bytesRead < compressedBufferLength) { + int bytesReadInAttempt = inputStream.read(compressedBuffer, bytesRead, compressedBufferLength - bytesRead); + if (bytesReadInAttempt < 0) + return false; + bytesRead += bytesReadInAttempt; + } + + return true; + } + + private void ensureBufferCapacity(int compressedBufferLength, int decompressedBufferLength) { + if (compressedBufferLength > compressedBuffer.length) { + compressedBuffer = new byte[compressedBufferLength]; + } + + if (decompressedBufferLength > decompressedBuffer.length) { + decompressedBuffer = new byte[decompressedBufferLength]; + } + } + +} \ No newline at end of file diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JNI.java b/core/src/main/java/net/jpountz/lz4/LZ4JNI.java new file mode 100644 index 00000000..f083319a --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JNI.java @@ -0,0 +1,41 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.nio.ByteBuffer; + +import net.jpountz.util.Native; + + +/** + * JNI bindings to the original C implementation of LZ4. + */ +enum LZ4JNI { + ; + + static { + Native.load(); + init(); + } + + static native void init(); + static native int LZ4_compress_limitedOutput(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen); + static native int LZ4_compressHC(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen, int compressionLevel); + static native int LZ4_decompress_fast(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, byte[] destArray, ByteBuffer destBuffer, int destOff, int destLen); + static native int LZ4_decompress_safe(byte[] srcArray, ByteBuffer srcBuffer, int srcOff, int srcLen, byte[] destArray, ByteBuffer destBuffer, int destOff, int maxDestLen); + static native int LZ4_compressBound(int len); + +} + diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JNICompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JNICompressor.java new file mode 100644 index 00000000..18971a33 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JNICompressor.java @@ -0,0 +1,80 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.util.ByteBufferUtils.checkNotReadOnly; +import static net.jpountz.util.ByteBufferUtils.checkRange; +import static net.jpountz.util.SafeUtils.checkRange; + +import java.nio.ByteBuffer; + +/** + * Fast {@link LZ4FastCompressor}s implemented with JNI bindings to the original C + * implementation of LZ4. + */ +final class LZ4JNICompressor extends LZ4Compressor { + + public static final LZ4Compressor INSTANCE = new LZ4JNICompressor(); + private static LZ4Compressor SAFE_INSTANCE; + + @Override + public int compress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) { + checkRange(src, srcOff, srcLen); + checkRange(dest, destOff, maxDestLen); + final int result = LZ4JNI.LZ4_compress_limitedOutput(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen); + if (result <= 0) { + throw new LZ4Exception("maxDestLen is too small"); + } + return result; + } + + @Override + public int compress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) { + checkNotReadOnly(dest); + checkRange(src, srcOff, srcLen); + checkRange(dest, destOff, maxDestLen); + + if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) { + byte[] srcArr = null, destArr = null; + ByteBuffer srcBuf = null, destBuf = null; + if (src.hasArray()) { + srcArr = src.array(); + srcOff += src.arrayOffset(); + } else { + assert src.isDirect(); + srcBuf = src; + } + if (dest.hasArray()) { + destArr = dest.array(); + destOff += dest.arrayOffset(); + } else { + assert dest.isDirect(); + destBuf = dest; + } + + final int result = LZ4JNI.LZ4_compress_limitedOutput(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen); + if (result <= 0) { + throw new LZ4Exception("maxDestLen is too small"); + } + return result; + } else { + LZ4Compressor safeInstance = SAFE_INSTANCE; + if (safeInstance == null) { + safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().fastCompressor(); + } + return safeInstance.compress(src, srcOff, srcLen, dest, destOff, maxDestLen); + } + } +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JNIFastDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JNIFastDecompressor.java new file mode 100644 index 00000000..5c355d85 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JNIFastDecompressor.java @@ -0,0 +1,82 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +import java.nio.ByteBuffer; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.SafeUtils; + + +/** + * {@link LZ4FastDecompressor} implemented with JNI bindings to the original C + * implementation of LZ4. + */ +final class LZ4JNIFastDecompressor extends LZ4FastDecompressor { + + public static final LZ4JNIFastDecompressor INSTANCE = new LZ4JNIFastDecompressor(); + private static LZ4FastDecompressor SAFE_INSTANCE; + + @Override + public final int decompress(byte[] src, int srcOff, byte[] dest, int destOff, int destLen) { + SafeUtils.checkRange(src, srcOff); + SafeUtils.checkRange(dest, destOff, destLen); + final int result = LZ4JNI.LZ4_decompress_fast(src, null, srcOff, dest, null, destOff, destLen); + if (result < 0) { + throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer"); + } + return result; + } + + @Override + public int decompress(ByteBuffer src, int srcOff, ByteBuffer dest, int destOff, int destLen) { + ByteBufferUtils.checkNotReadOnly(dest); + ByteBufferUtils.checkRange(src, srcOff); + ByteBufferUtils.checkRange(dest, destOff, destLen); + + if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) { + byte[] srcArr = null, destArr = null; + ByteBuffer srcBuf = null, destBuf = null; + if (src.hasArray()) { + srcArr = src.array(); + srcOff += src.arrayOffset(); + } else { + assert src.isDirect(); + srcBuf = src; + } + if (dest.hasArray()) { + destArr = dest.array(); + destOff += dest.arrayOffset(); + } else { + assert dest.isDirect(); + destBuf = dest; + } + + final int result = LZ4JNI.LZ4_decompress_fast(srcArr, srcBuf, srcOff, destArr, destBuf, destOff, destLen); + if (result < 0) { + throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer"); + } + return result; + } else { + LZ4FastDecompressor safeInstance = SAFE_INSTANCE; + if (safeInstance == null) { + safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().fastDecompressor(); + } + return safeInstance.decompress(src, srcOff, dest, destOff, destLen); + } + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JNISafeDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JNISafeDecompressor.java new file mode 100644 index 00000000..d948c988 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JNISafeDecompressor.java @@ -0,0 +1,81 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +import java.nio.ByteBuffer; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.SafeUtils; + +/** + * {@link LZ4SafeDecompressor} implemented with JNI bindings to the original C + * implementation of LZ4. + */ +final class LZ4JNISafeDecompressor extends LZ4SafeDecompressor { + + public static final LZ4JNISafeDecompressor INSTANCE = new LZ4JNISafeDecompressor(); + private static LZ4SafeDecompressor SAFE_INSTANCE; + + @Override + public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) { + SafeUtils.checkRange(src, srcOff, srcLen); + SafeUtils.checkRange(dest, destOff, maxDestLen); + final int result = LZ4JNI.LZ4_decompress_safe(src, null, srcOff, srcLen, dest, null, destOff, maxDestLen); + if (result < 0) { + throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer"); + } + return result; + } + + @Override + public int decompress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen) { + ByteBufferUtils.checkNotReadOnly(dest); + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, maxDestLen); + + if ((src.hasArray() || src.isDirect()) && (dest.hasArray() || dest.isDirect())) { + byte[] srcArr = null, destArr = null; + ByteBuffer srcBuf = null, destBuf = null; + if (src.hasArray()) { + srcArr = src.array(); + srcOff += src.arrayOffset(); + } else { + assert src.isDirect(); + srcBuf = src; + } + if (dest.hasArray()) { + destArr = dest.array(); + destOff += dest.arrayOffset(); + } else { + assert dest.isDirect(); + destBuf = dest; + } + + final int result = LZ4JNI.LZ4_decompress_safe(srcArr, srcBuf, srcOff, srcLen, destArr, destBuf, destOff, maxDestLen); + if (result < 0) { + throw new LZ4Exception("Error decoding offset " + (srcOff - result) + " of input buffer"); + } + return result; + } else { + LZ4SafeDecompressor safeInstance = SAFE_INSTANCE; + if (safeInstance == null) { + safeInstance = SAFE_INSTANCE = LZ4Factory.safeInstance().safeDecompressor(); + } + return safeInstance.decompress(src, srcOff, srcLen, dest, destOff, maxDestLen); + } + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeCompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeCompressor.java new file mode 100644 index 00000000..06d635d6 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeCompressor.java @@ -0,0 +1,511 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; +import static net.jpountz.lz4.LZ4Utils.*; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.SafeUtils; + +/** + * Compressor. + */ +final class LZ4JavaSafeCompressor extends LZ4Compressor { + + public static final LZ4Compressor INSTANCE = new LZ4JavaSafeCompressor(); + + static int compress64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int destEnd) { + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + + int anchor = sOff; + + if (srcLen >= MIN_LENGTH) { + + final short[] hashTable = new short[HASH_TABLE_SIZE_64K]; + + ++sOff; + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash64k(SafeUtils.readInt(src, sOff)); + ref = srcOff + SafeUtils.readShort(hashTable, h); + SafeUtils.writeShort(hashTable, h, sOff - srcOff); + } while (!LZ4SafeUtils.readIntEquals(src, ref, sOff)); + + // catch up + final int excess = LZ4SafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + SafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4SafeUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + SafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4SafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + SafeUtils.writeShortLE(dest, dOff, (short) (sOff - ref)); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + ref += MIN_MATCH; + final int matchLen = LZ4SafeUtils.commonBytes(src, ref, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4SafeUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + SafeUtils.writeShort(hashTable, hash64k(SafeUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff); + + // test next position + final int h = hash64k(SafeUtils.readInt(src, sOff)); + ref = srcOff + SafeUtils.readShort(hashTable, h); + SafeUtils.writeShort(hashTable, h, sOff - srcOff); + + if (!LZ4SafeUtils.readIntEquals(src, sOff, ref)) { + break; + } + + tokenOff = dOff++; + SafeUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + } + + dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + @Override + public int compress(byte[] src, final int srcOff, int srcLen, byte[] dest, final int destOff, int maxDestLen) { + + SafeUtils.checkRange(src, srcOff, srcLen); + SafeUtils.checkRange(dest, destOff, maxDestLen); + final int destEnd = destOff + maxDestLen; + + if (srcLen < LZ4_64K_LIMIT) { + return compress64k(src, srcOff, srcLen, dest, destOff, destEnd); + } + + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + int anchor = sOff++; + + final int[] hashTable = new int[HASH_TABLE_SIZE]; + Arrays.fill(hashTable, anchor); + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + int back; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash(SafeUtils.readInt(src, sOff)); + ref = SafeUtils.readInt(hashTable, h); + back = sOff - ref; + SafeUtils.writeInt(hashTable, h, sOff); + } while (back >= MAX_DISTANCE || !LZ4SafeUtils.readIntEquals(src, ref, sOff)); + + + final int excess = LZ4SafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + SafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4SafeUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + SafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4SafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + SafeUtils.writeShortLE(dest, dOff, back); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + final int matchLen = LZ4SafeUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4SafeUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + SafeUtils.writeByte(dest, tokenOff, SafeUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + SafeUtils.writeInt(hashTable, hash(SafeUtils.readInt(src, sOff - 2)), sOff - 2); + + // test next position + final int h = hash(SafeUtils.readInt(src, sOff)); + ref = SafeUtils.readInt(hashTable, h); + SafeUtils.writeInt(hashTable, h, sOff); + back = sOff - ref; + + if (back >= MAX_DISTANCE || !LZ4SafeUtils.readIntEquals(src, ref, sOff)) { + break; + } + + tokenOff = dOff++; + SafeUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + + dOff = LZ4SafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + + static int compress64k(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int destEnd) { + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + + int anchor = sOff; + + if (srcLen >= MIN_LENGTH) { + + final short[] hashTable = new short[HASH_TABLE_SIZE_64K]; + + ++sOff; + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash64k(ByteBufferUtils.readInt(src, sOff)); + ref = srcOff + SafeUtils.readShort(hashTable, h); + SafeUtils.writeShort(hashTable, h, sOff - srcOff); + } while (!LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)); + + // catch up + final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + ByteBufferUtils.writeShortLE(dest, dOff, (short) (sOff - ref)); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + ref += MIN_MATCH; + final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + SafeUtils.writeShort(hashTable, hash64k(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff); + + // test next position + final int h = hash64k(ByteBufferUtils.readInt(src, sOff)); + ref = srcOff + SafeUtils.readShort(hashTable, h); + SafeUtils.writeShort(hashTable, h, sOff - srcOff); + + if (!LZ4ByteBufferUtils.readIntEquals(src, sOff, ref)) { + break; + } + + tokenOff = dOff++; + ByteBufferUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + } + + dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + @Override + public int compress(ByteBuffer src, final int srcOff, int srcLen, ByteBuffer dest, final int destOff, int maxDestLen) { + + if (src.hasArray() && dest.hasArray()) { + return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, maxDestLen); + final int destEnd = destOff + maxDestLen; + + if (srcLen < LZ4_64K_LIMIT) { + return compress64k(src, srcOff, srcLen, dest, destOff, destEnd); + } + + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + int anchor = sOff++; + + final int[] hashTable = new int[HASH_TABLE_SIZE]; + Arrays.fill(hashTable, anchor); + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + int back; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash(ByteBufferUtils.readInt(src, sOff)); + ref = SafeUtils.readInt(hashTable, h); + back = sOff - ref; + SafeUtils.writeInt(hashTable, h, sOff); + } while (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)); + + + final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + ByteBufferUtils.writeShortLE(dest, dOff, back); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + SafeUtils.writeInt(hashTable, hash(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2); + + // test next position + final int h = hash(ByteBufferUtils.readInt(src, sOff)); + ref = SafeUtils.readInt(hashTable, h); + SafeUtils.writeInt(hashTable, h, sOff); + back = sOff - ref; + + if (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)) { + break; + } + + tokenOff = dOff++; + ByteBufferUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + + dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeFastDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeFastDecompressor.java new file mode 100644 index 00000000..f9354d67 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeFastDecompressor.java @@ -0,0 +1,205 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; + +import java.nio.ByteBuffer; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.SafeUtils; + +/** + * Decompressor. + */ +final class LZ4JavaSafeFastDecompressor extends LZ4FastDecompressor { + + public static final LZ4FastDecompressor INSTANCE = new LZ4JavaSafeFastDecompressor(); + + @Override + public int decompress(byte[] src, final int srcOff, byte[] dest, final int destOff, int destLen) { + + + SafeUtils.checkRange(src, srcOff); + SafeUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (SafeUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Malformed input at " + srcOff); + } + return 1; + } + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = SafeUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while ((len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH) { + if (literalCopyEnd != destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4SafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4SafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = SafeUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while ((len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4SafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4SafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return sOff - srcOff; + + } + + @Override + public int decompress(ByteBuffer src, final int srcOff, ByteBuffer dest, final int destOff, int destLen) { + + if (src.hasArray() && dest.hasArray()) { + return decompress(src.array(), srcOff + src.arrayOffset(), dest.array(), destOff + dest.arrayOffset(), destLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + + ByteBufferUtils.checkRange(src, srcOff); + ByteBufferUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (ByteBufferUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Malformed input at " + srcOff); + } + return 1; + } + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH) { + if (literalCopyEnd != destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = ByteBufferUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return sOff - srcOff; + + } + + +} + diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeSafeDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeSafeDecompressor.java new file mode 100644 index 00000000..2e77203c --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JavaSafeSafeDecompressor.java @@ -0,0 +1,213 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; + +import java.nio.ByteBuffer; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.SafeUtils; + +/** + * Decompressor. + */ +final class LZ4JavaSafeSafeDecompressor extends LZ4SafeDecompressor { + + public static final LZ4SafeDecompressor INSTANCE = new LZ4JavaSafeSafeDecompressor(); + + @Override + public int decompress(byte[] src, final int srcOff, final int srcLen , byte[] dest, final int destOff, int destLen) { + + + SafeUtils.checkRange(src, srcOff, srcLen); + SafeUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (srcLen != 1 || SafeUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Output buffer too small"); + } + return 0; + } + + final int srcEnd = srcOff + srcLen; + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = SafeUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) { + if (literalCopyEnd > destEnd) { + throw new LZ4Exception(); + } else if (sOff + literalLen != srcEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4SafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4SafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = SafeUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = SafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4SafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4SafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return dOff - destOff; + + } + + @Override + public int decompress(ByteBuffer src, final int srcOff, final int srcLen , ByteBuffer dest, final int destOff, int destLen) { + + if (src.hasArray() && dest.hasArray()) { + return decompress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), destLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (srcLen != 1 || ByteBufferUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Output buffer too small"); + } + return 0; + } + + final int srcEnd = srcOff + srcLen; + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) { + if (literalCopyEnd > destEnd) { + throw new LZ4Exception(); + } else if (sOff + literalLen != srcEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = ByteBufferUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return dOff - destOff; + + } + + +} + diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeCompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeCompressor.java new file mode 100644 index 00000000..b52fb521 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeCompressor.java @@ -0,0 +1,511 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; +import static net.jpountz.lz4.LZ4Utils.*; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.UnsafeUtils; + +/** + * Compressor. + */ +final class LZ4JavaUnsafeCompressor extends LZ4Compressor { + + public static final LZ4Compressor INSTANCE = new LZ4JavaUnsafeCompressor(); + + static int compress64k(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int destEnd) { + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + + int anchor = sOff; + + if (srcLen >= MIN_LENGTH) { + + final short[] hashTable = new short[HASH_TABLE_SIZE_64K]; + + ++sOff; + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash64k(UnsafeUtils.readInt(src, sOff)); + ref = srcOff + UnsafeUtils.readShort(hashTable, h); + UnsafeUtils.writeShort(hashTable, h, sOff - srcOff); + } while (!LZ4UnsafeUtils.readIntEquals(src, ref, sOff)); + + // catch up + final int excess = LZ4UnsafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + UnsafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4UnsafeUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + UnsafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4UnsafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + UnsafeUtils.writeShortLE(dest, dOff, (short) (sOff - ref)); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + ref += MIN_MATCH; + final int matchLen = LZ4UnsafeUtils.commonBytes(src, ref, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4UnsafeUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + UnsafeUtils.writeShort(hashTable, hash64k(UnsafeUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff); + + // test next position + final int h = hash64k(UnsafeUtils.readInt(src, sOff)); + ref = srcOff + UnsafeUtils.readShort(hashTable, h); + UnsafeUtils.writeShort(hashTable, h, sOff - srcOff); + + if (!LZ4UnsafeUtils.readIntEquals(src, sOff, ref)) { + break; + } + + tokenOff = dOff++; + UnsafeUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + } + + dOff = LZ4UnsafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + @Override + public int compress(byte[] src, final int srcOff, int srcLen, byte[] dest, final int destOff, int maxDestLen) { + + UnsafeUtils.checkRange(src, srcOff, srcLen); + UnsafeUtils.checkRange(dest, destOff, maxDestLen); + final int destEnd = destOff + maxDestLen; + + if (srcLen < LZ4_64K_LIMIT) { + return compress64k(src, srcOff, srcLen, dest, destOff, destEnd); + } + + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + int anchor = sOff++; + + final int[] hashTable = new int[HASH_TABLE_SIZE]; + Arrays.fill(hashTable, anchor); + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + int back; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash(UnsafeUtils.readInt(src, sOff)); + ref = UnsafeUtils.readInt(hashTable, h); + back = sOff - ref; + UnsafeUtils.writeInt(hashTable, h, sOff); + } while (back >= MAX_DISTANCE || !LZ4UnsafeUtils.readIntEquals(src, ref, sOff)); + + + final int excess = LZ4UnsafeUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + UnsafeUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4UnsafeUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + UnsafeUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4UnsafeUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + UnsafeUtils.writeShortLE(dest, dOff, back); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + final int matchLen = LZ4UnsafeUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4UnsafeUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + UnsafeUtils.writeByte(dest, tokenOff, UnsafeUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + UnsafeUtils.writeInt(hashTable, hash(UnsafeUtils.readInt(src, sOff - 2)), sOff - 2); + + // test next position + final int h = hash(UnsafeUtils.readInt(src, sOff)); + ref = UnsafeUtils.readInt(hashTable, h); + UnsafeUtils.writeInt(hashTable, h, sOff); + back = sOff - ref; + + if (back >= MAX_DISTANCE || !LZ4UnsafeUtils.readIntEquals(src, ref, sOff)) { + break; + } + + tokenOff = dOff++; + UnsafeUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + + dOff = LZ4UnsafeUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + + static int compress64k(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int destEnd) { + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + + int anchor = sOff; + + if (srcLen >= MIN_LENGTH) { + + final short[] hashTable = new short[HASH_TABLE_SIZE_64K]; + + ++sOff; + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash64k(ByteBufferUtils.readInt(src, sOff)); + ref = srcOff + UnsafeUtils.readShort(hashTable, h); + UnsafeUtils.writeShort(hashTable, h, sOff - srcOff); + } while (!LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)); + + // catch up + final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + ByteBufferUtils.writeShortLE(dest, dOff, (short) (sOff - ref)); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + ref += MIN_MATCH; + final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + UnsafeUtils.writeShort(hashTable, hash64k(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2 - srcOff); + + // test next position + final int h = hash64k(ByteBufferUtils.readInt(src, sOff)); + ref = srcOff + UnsafeUtils.readShort(hashTable, h); + UnsafeUtils.writeShort(hashTable, h, sOff - srcOff); + + if (!LZ4ByteBufferUtils.readIntEquals(src, sOff, ref)) { + break; + } + + tokenOff = dOff++; + ByteBufferUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + } + + dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + @Override + public int compress(ByteBuffer src, final int srcOff, int srcLen, ByteBuffer dest, final int destOff, int maxDestLen) { + + if (src.hasArray() && dest.hasArray()) { + return compress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), maxDestLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, maxDestLen); + final int destEnd = destOff + maxDestLen; + + if (srcLen < LZ4_64K_LIMIT) { + return compress64k(src, srcOff, srcLen, dest, destOff, destEnd); + } + + final int srcEnd = srcOff + srcLen; + final int srcLimit = srcEnd - LAST_LITERALS; + final int mflimit = srcEnd - MF_LIMIT; + + int sOff = srcOff, dOff = destOff; + int anchor = sOff++; + + final int[] hashTable = new int[HASH_TABLE_SIZE]; + Arrays.fill(hashTable, anchor); + + main: + while (true) { + + // find a match + int forwardOff = sOff; + + int ref; + int step = 1; + int searchMatchNb = 1 << SKIP_STRENGTH; + int back; + do { + sOff = forwardOff; + forwardOff += step; + step = searchMatchNb++ >>> SKIP_STRENGTH; + + if (forwardOff > mflimit) { + break main; + } + + final int h = hash(ByteBufferUtils.readInt(src, sOff)); + ref = UnsafeUtils.readInt(hashTable, h); + back = sOff - ref; + UnsafeUtils.writeInt(hashTable, h, sOff); + } while (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)); + + + final int excess = LZ4ByteBufferUtils.commonBytesBackward(src, ref, sOff, srcOff, anchor); + sOff -= excess; + ref -= excess; + + // sequence == refsequence + final int runLen = sOff - anchor; + + // encode literal length + int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + if (runLen >= RUN_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, RUN_MASK << ML_BITS); + dOff = LZ4ByteBufferUtils.writeLen(runLen - RUN_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, runLen << ML_BITS); + } + + // copy literals + LZ4ByteBufferUtils.wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + while (true) { + // encode offset + ByteBufferUtils.writeShortLE(dest, dOff, back); + dOff += 2; + + // count nb matches + sOff += MIN_MATCH; + final int matchLen = LZ4ByteBufferUtils.commonBytes(src, ref + MIN_MATCH, sOff, srcLimit); + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + sOff += matchLen; + + // encode match len + if (matchLen >= ML_MASK) { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | ML_MASK); + dOff = LZ4ByteBufferUtils.writeLen(matchLen - ML_MASK, dest, dOff); + } else { + ByteBufferUtils.writeByte(dest, tokenOff, ByteBufferUtils.readByte(dest, tokenOff) | matchLen); + } + + // test end of chunk + if (sOff > mflimit) { + anchor = sOff; + break main; + } + + // fill table + UnsafeUtils.writeInt(hashTable, hash(ByteBufferUtils.readInt(src, sOff - 2)), sOff - 2); + + // test next position + final int h = hash(ByteBufferUtils.readInt(src, sOff)); + ref = UnsafeUtils.readInt(hashTable, h); + UnsafeUtils.writeInt(hashTable, h, sOff); + back = sOff - ref; + + if (back >= MAX_DISTANCE || !LZ4ByteBufferUtils.readIntEquals(src, ref, sOff)) { + break; + } + + tokenOff = dOff++; + ByteBufferUtils.writeByte(dest, tokenOff, 0); + } + + // prepare next loop + anchor = sOff++; + } + + dOff = LZ4ByteBufferUtils.lastLiterals(src, anchor, srcEnd - anchor, dest, dOff, destEnd); + return dOff - destOff; + } + + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeFastDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeFastDecompressor.java new file mode 100644 index 00000000..ca26a4a8 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeFastDecompressor.java @@ -0,0 +1,205 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; + +import java.nio.ByteBuffer; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.UnsafeUtils; + +/** + * Decompressor. + */ +final class LZ4JavaUnsafeFastDecompressor extends LZ4FastDecompressor { + + public static final LZ4FastDecompressor INSTANCE = new LZ4JavaUnsafeFastDecompressor(); + + @Override + public int decompress(byte[] src, final int srcOff, byte[] dest, final int destOff, int destLen) { + + + UnsafeUtils.checkRange(src, srcOff); + UnsafeUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (UnsafeUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Malformed input at " + srcOff); + } + return 1; + } + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = UnsafeUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while ((len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH) { + if (literalCopyEnd != destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4UnsafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4UnsafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = UnsafeUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while ((len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4UnsafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4UnsafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return sOff - srcOff; + + } + + @Override + public int decompress(ByteBuffer src, final int srcOff, ByteBuffer dest, final int destOff, int destLen) { + + if (src.hasArray() && dest.hasArray()) { + return decompress(src.array(), srcOff + src.arrayOffset(), dest.array(), destOff + dest.arrayOffset(), destLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + + ByteBufferUtils.checkRange(src, srcOff); + ByteBufferUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (ByteBufferUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Malformed input at " + srcOff); + } + return 1; + } + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH) { + if (literalCopyEnd != destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = ByteBufferUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while ((len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return sOff - srcOff; + + } + + +} + diff --git a/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeSafeDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeSafeDecompressor.java new file mode 100644 index 00000000..2bf83e51 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4JavaUnsafeSafeDecompressor.java @@ -0,0 +1,213 @@ +// Auto-generated: DO NOT EDIT + +package net.jpountz.lz4; + +import static net.jpountz.lz4.LZ4Constants.*; + +import java.nio.ByteBuffer; + +import net.jpountz.util.ByteBufferUtils; +import net.jpountz.util.UnsafeUtils; + +/** + * Decompressor. + */ +final class LZ4JavaUnsafeSafeDecompressor extends LZ4SafeDecompressor { + + public static final LZ4SafeDecompressor INSTANCE = new LZ4JavaUnsafeSafeDecompressor(); + + @Override + public int decompress(byte[] src, final int srcOff, final int srcLen , byte[] dest, final int destOff, int destLen) { + + + UnsafeUtils.checkRange(src, srcOff, srcLen); + UnsafeUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (srcLen != 1 || UnsafeUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Output buffer too small"); + } + return 0; + } + + final int srcEnd = srcOff + srcLen; + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = UnsafeUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) { + if (literalCopyEnd > destEnd) { + throw new LZ4Exception(); + } else if (sOff + literalLen != srcEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4UnsafeUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4UnsafeUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = UnsafeUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = UnsafeUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4UnsafeUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4UnsafeUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return dOff - destOff; + + } + + @Override + public int decompress(ByteBuffer src, final int srcOff, final int srcLen , ByteBuffer dest, final int destOff, int destLen) { + + if (src.hasArray() && dest.hasArray()) { + return decompress(src.array(), srcOff + src.arrayOffset(), srcLen, dest.array(), destOff + dest.arrayOffset(), destLen); + } + src = ByteBufferUtils.inNativeByteOrder(src); + dest = ByteBufferUtils.inNativeByteOrder(dest); + + + ByteBufferUtils.checkRange(src, srcOff, srcLen); + ByteBufferUtils.checkRange(dest, destOff, destLen); + + if (destLen == 0) { + if (srcLen != 1 || ByteBufferUtils.readByte(src, srcOff) != 0) { + throw new LZ4Exception("Output buffer too small"); + } + return 0; + } + + final int srcEnd = srcOff + srcLen; + + + final int destEnd = destOff + destLen; + + int sOff = srcOff; + int dOff = destOff; + + while (true) { + final int token = ByteBufferUtils.readByte(src, sOff) & 0xFF; + ++sOff; + + // literals + int literalLen = token >>> ML_BITS; + if (literalLen == RUN_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + + final int literalCopyEnd = dOff + literalLen; + + if (literalCopyEnd > destEnd - COPY_LENGTH || sOff + literalLen > srcEnd - COPY_LENGTH) { + if (literalCopyEnd > destEnd) { + throw new LZ4Exception(); + } else if (sOff + literalLen != srcEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + + } else { + LZ4ByteBufferUtils.safeArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + break; // EOF + } + } + + LZ4ByteBufferUtils.wildArraycopy(src, sOff, dest, dOff, literalLen); + sOff += literalLen; + dOff = literalCopyEnd; + + // matchs + final int matchDec = ByteBufferUtils.readShortLE(src, sOff); + sOff += 2; + int matchOff = dOff - matchDec; + + if (matchOff < destOff) { + throw new LZ4Exception("Malformed input at " + sOff); + } + + int matchLen = token & ML_MASK; + if (matchLen == ML_MASK) { + byte len = (byte) 0xFF; + while (sOff < srcEnd &&(len = ByteBufferUtils.readByte(src, sOff++)) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + final int matchCopyEnd = dOff + matchLen; + + if (matchCopyEnd > destEnd - COPY_LENGTH) { + if (matchCopyEnd > destEnd) { + throw new LZ4Exception("Malformed input at " + sOff); + } + LZ4ByteBufferUtils.safeIncrementalCopy(dest, matchOff, dOff, matchLen); + } else { + LZ4ByteBufferUtils.wildIncrementalCopy(dest, matchOff, dOff, matchCopyEnd); + } + dOff = matchCopyEnd; + } + + + return dOff - destOff; + + } + + +} + diff --git a/core/src/main/java/net/jpountz/lz4/LZ4OutputStream.java b/core/src/main/java/net/jpountz/lz4/LZ4OutputStream.java new file mode 100644 index 00000000..af16abab --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4OutputStream.java @@ -0,0 +1,73 @@ +package net.jpountz.lz4; + +import java.io.OutputStream; +import java.io.IOException; + +public class LZ4OutputStream extends OutputStream { + private static final LZ4Factory lz4Factory = LZ4Factory.fastestInstance(); + private final LZ4Compressor compressor; + private static final int ONE_MEGABYTE = 1048576; + private final byte[] compressionInputBuffer; + private final byte[] compressionOutputBuffer; + private final OutputStream underlyingOutputStream; + private int bytesRemainingInCompressionInputBuffer = 0; + private int currentCompressionInputBufferPosition = 0; + + public LZ4OutputStream(OutputStream os) throws IOException { + this(os, ONE_MEGABYTE, lz4Factory.fastCompressor()); + } + + public LZ4OutputStream(OutputStream underlyingOutputStream, int blocksize, LZ4Compressor compressor) throws IOException { + compressionInputBuffer = new byte[blocksize]; + this.compressor = compressor; + this.underlyingOutputStream = underlyingOutputStream; + this.bytesRemainingInCompressionInputBuffer = blocksize; + this.currentCompressionInputBufferPosition = 0; + this.compressionOutputBuffer = new byte[compressor.maxCompressedLength(blocksize)]; + } + + public void write(byte[] b, int off, int len) throws IOException { + if (len <= bytesRemainingInCompressionInputBuffer) { + System.arraycopy(b, off, compressionInputBuffer, currentCompressionInputBufferPosition, len); + currentCompressionInputBufferPosition += len; + bytesRemainingInCompressionInputBuffer -= len; + } else { + // len > bytesRemainingInCompressionInputBuffer + while (len > 0) { + int bytesToCopy = Math.min(bytesRemainingInCompressionInputBuffer, len); + System.arraycopy(b, off, compressionInputBuffer, currentCompressionInputBufferPosition, bytesToCopy); + currentCompressionInputBufferPosition += bytesToCopy; + bytesRemainingInCompressionInputBuffer -= bytesToCopy; + flush(); + len -= bytesToCopy; + off += bytesToCopy; + } + } + } + + public void write(int i) throws IOException { + byte b = (byte)i; + if (0 == bytesRemainingInCompressionInputBuffer) { + flush(); + } + compressionInputBuffer[currentCompressionInputBufferPosition] = b; + bytesRemainingInCompressionInputBuffer--; + currentCompressionInputBufferPosition++; + } + + public void flush() throws IOException { + if(currentCompressionInputBufferPosition > 0) { + LZ4StreamHelper.writeLength(currentCompressionInputBufferPosition, this.underlyingOutputStream); + int bytesCompressed = compressor.compress(compressionInputBuffer, 0, currentCompressionInputBufferPosition, compressionOutputBuffer, 0, compressionOutputBuffer.length); + LZ4StreamHelper.writeLength(bytesCompressed, this.underlyingOutputStream); + underlyingOutputStream.write(compressionOutputBuffer, 0, bytesCompressed); + bytesRemainingInCompressionInputBuffer = compressionInputBuffer.length; + currentCompressionInputBufferPosition = 0; + } + } + + public void close() throws IOException { + flush(); + underlyingOutputStream.close(); + } +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4SafeDecompressor.java b/core/src/main/java/net/jpountz/lz4/LZ4SafeDecompressor.java new file mode 100644 index 00000000..2416ccba --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4SafeDecompressor.java @@ -0,0 +1,117 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * LZ4 decompressor that requires the size of the compressed data to be known. + *

+ * Implementations of this class are usually a little slower than those of + * {@link LZ4FastDecompressor} but do not require the size of the original data to + * be known. + */ +public abstract class LZ4SafeDecompressor implements LZ4UnknownSizeDecompressor { + + /** + * Decompress src[srcOff:srcLen] into + * dest[destOff:destOff+maxDestLen] and returns the number of + * decompressed bytes written into dest. + * + * @param srcLen the exact size of the compressed stream + * @return the original input size + * @throws LZ4Exception if maxDestLen is too small + */ + public abstract int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen); + + /** + * Uncompress src[srcOff:srcLen] into + * dest[destOff:destOff+maxDestLen] and returns the number of + * decompressed bytes written into dest. + * + * @param srcLen the exact size of the compressed stream + * @return the original input size + * @throws LZ4Exception if maxDestLen is too small + */ + public abstract int decompress(ByteBuffer src, int srcOff, int srcLen, ByteBuffer dest, int destOff, int maxDestLen); + + /** + * Convenience method, equivalent to calling + * {@link #decompress(byte[], int, int, byte[], int, int) decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff)}. + */ + public final int decompress(byte[] src, int srcOff, int srcLen, byte[] dest, int destOff) { + return decompress(src, srcOff, srcLen, dest, destOff, dest.length - destOff); + } + + /** + * Convenience method, equivalent to calling + * {@link #decompress(byte[], int, int, byte[], int) decompress(src, 0, src.length, dest, 0)} + */ + public final int decompress(byte[] src, byte[] dest) { + return decompress(src, 0, src.length, dest, 0); + } + + /** + * Convenience method which returns src[srcOff:srcOff+srcLen] + * decompressed. + *

Warning: this method has an + * important overhead due to the fact that it needs to allocate a buffer to + * decompress into, and then needs to resize this buffer to the actual + * decompressed length.

+ *

Here is how this method is implemented:

+ *
+   * byte[] decompressed = new byte[maxDestLen];
+   * final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen);
+   * if (decompressedLength != decompressed.length) {
+   *   decompressed = Arrays.copyOf(decompressed, decompressedLength);
+   * }
+   * return decompressed;
+   * 
+ */ + public final byte[] decompress(byte[] src, int srcOff, int srcLen, int maxDestLen) { + byte[] decompressed = new byte[maxDestLen]; + final int decompressedLength = decompress(src, srcOff, srcLen, decompressed, 0, maxDestLen); + if (decompressedLength != decompressed.length) { + decompressed = Arrays.copyOf(decompressed, decompressedLength); + } + return decompressed; + } + + /** + * Convenience method, equivalent to calling + * {@link #decompress(byte[], int, int, int) decompress(src, 0, src.length, maxDestLen)}. + */ + public final byte[] decompress(byte[] src, int maxDestLen) { + return decompress(src, 0, src.length, maxDestLen); + } + + /** + * Decompress src into dest. src's + * {@link ByteBuffer#remaining()} must be exactly the size of the compressed + * data. This method moves the positions of the buffers. + */ + public final void decompress(ByteBuffer src, ByteBuffer dest) { + final int decompressed = decompress(src, src.position(), src.remaining(), dest, dest.position(), dest.remaining()); + src.position(src.limit()); + dest.position(dest.position() + decompressed); + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4SafeUtils.java b/core/src/main/java/net/jpountz/lz4/LZ4SafeUtils.java new file mode 100644 index 00000000..7842bd77 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4SafeUtils.java @@ -0,0 +1,179 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS; +import static net.jpountz.lz4.LZ4Constants.ML_BITS; +import static net.jpountz.lz4.LZ4Constants.ML_MASK; +import static net.jpountz.lz4.LZ4Constants.RUN_MASK; +import net.jpountz.util.SafeUtils; + +enum LZ4SafeUtils { + ; + + static int hash(byte[] buf, int i) { + return LZ4Utils.hash(SafeUtils.readInt(buf, i)); + } + + static int hash64k(byte[] buf, int i) { + return LZ4Utils.hash64k(SafeUtils.readInt(buf, i)); + } + + static boolean readIntEquals(byte[] buf, int i, int j) { + return buf[i] == buf[j] && buf[i+1] == buf[j+1] && buf[i+2] == buf[j+2] && buf[i+3] == buf[j+3]; + } + + static void safeIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchLen) { + for (int i = 0; i < matchLen; ++i) { + dest[dOff + i] = dest[matchOff + i]; + } + } + + static void wildIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchCopyEnd) { + do { + copy8Bytes(dest, matchOff, dest, dOff); + matchOff += 8; + dOff += 8; + } while (dOff < matchCopyEnd); + } + + static void copy8Bytes(byte[] src, int sOff, byte[] dest, int dOff) { + for (int i = 0; i < 8; ++i) { + dest[dOff + i] = src[sOff + i]; + } + } + + static int commonBytes(byte[] b, int o1, int o2, int limit) { + int count = 0; + while (o2 < limit && b[o1++] == b[o2++]) { + ++count; + } + return count; + } + + static int commonBytesBackward(byte[] b, int o1, int o2, int l1, int l2) { + int count = 0; + while (o1 > l1 && o2 > l2 && b[--o1] == b[--o2]) { + ++count; + } + return count; + } + + static void safeArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) { + System.arraycopy(src, sOff, dest, dOff, len); + } + + static void wildArraycopy(byte[] src, int sOff, byte[] dest, int dOff, int len) { + try { + for (int i = 0; i < len; i += 8) { + copy8Bytes(src, sOff + i, dest, dOff + i); + } + } catch (ArrayIndexOutOfBoundsException e) { + throw new LZ4Exception("Malformed input at offset " + sOff); + } + } + + static int encodeSequence(byte[] src, int anchor, int matchOff, int matchRef, int matchLen, byte[] dest, int dOff, int destEnd) { + final int runLen = matchOff - anchor; + final int tokenOff = dOff++; + + if (dOff + runLen + (2 + 1 + LAST_LITERALS) + (runLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + + int token; + if (runLen >= RUN_MASK) { + token = (byte) (RUN_MASK << ML_BITS); + dOff = writeLen(runLen - RUN_MASK, dest, dOff); + } else { + token = runLen << ML_BITS; + } + + // copy literals + wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + // encode offset + final int matchDec = matchOff - matchRef; + dest[dOff++] = (byte) matchDec; + dest[dOff++] = (byte) (matchDec >>> 8); + + // encode match len + matchLen -= 4; + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + if (matchLen >= ML_MASK) { + token |= ML_MASK; + dOff = writeLen(matchLen - RUN_MASK, dest, dOff); + } else { + token |= matchLen; + } + + dest[tokenOff] = (byte) token; + + return dOff; + } + + static int lastLiterals(byte[] src, int sOff, int srcLen, byte[] dest, int dOff, int destEnd) { + final int runLen = srcLen; + + if (dOff + runLen + 1 + (runLen + 255 - RUN_MASK) / 255 > destEnd) { + throw new LZ4Exception(); + } + + if (runLen >= RUN_MASK) { + dest[dOff++] = (byte) (RUN_MASK << ML_BITS); + dOff = writeLen(runLen - RUN_MASK, dest, dOff); + } else { + dest[dOff++] = (byte) (runLen << ML_BITS); + } + // copy literals + System.arraycopy(src, sOff, dest, dOff, runLen); + dOff += runLen; + + return dOff; + } + + static int writeLen(int len, byte[] dest, int dOff) { + while (len >= 0xFF) { + dest[dOff++] = (byte) 0xFF; + len -= 0xFF; + } + dest[dOff++] = (byte) len; + return dOff; + } + + static class Match { + int start, ref, len; + + void fix(int correction) { + start += correction; + ref += correction; + len -= correction; + } + + int end() { + return start + len; + } + } + + static void copyTo(Match m1, Match m2) { + m2.len = m1.len; + m2.start = m1.start; + m2.ref = m1.ref; + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/LZ4StreamHelper.java b/core/src/main/java/net/jpountz/lz4/LZ4StreamHelper.java new file mode 100644 index 00000000..1f42692f --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4StreamHelper.java @@ -0,0 +1,36 @@ +package net.jpountz.lz4; + +import java.io.InputStream; +import java.io.OutputStream; +import java.io.IOException; + +public class LZ4StreamHelper { + static void writeLength(int length, OutputStream os) throws IOException { + int b1 = ((length & 0xff000000) >> 24); + int b2 = ((length & 0x00ff0000) >> 16); + int b3 = ((length & 0x0000ff00) >> 8); + int b4 = (length & 0xff0000ff); + os.write(b1); + os.write(b2); + os.write(b3); + os.write(b4); + } + + // network order, big endian, most significant byte first + // package scope + static int readLength(InputStream is) throws IOException { + int b1 = is.read(); + int b2 = is.read(); + int b3 = is.read(); + int b4 = is.read(); + + int length; + if((-1 == b1) || (-1 == b2) || (-1 == b3) || (-1 == b4)) { + length = -1; + } + else { + length = ((b1 << 24) | (b2 << 16) | (b3 << 8) | b4); + } + return length; + } +} \ No newline at end of file diff --git a/core/src/main/java/net/jpountz/lz4/LZ4StreamTest.java b/core/src/main/java/net/jpountz/lz4/LZ4StreamTest.java new file mode 100644 index 00000000..ca2d7cf7 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/LZ4StreamTest.java @@ -0,0 +1,150 @@ +package net.jpountz.lz4; + +import static junit.framework.Assert.assertEquals; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Random; + +import junit.framework.Assert; + +import org.junit.Before; +import org.junit.Test; + +public class LZ4StreamTest { + + private long seed; + private Random rand; + + private byte randomContent[]; + private byte compressedOutput[]; + + @Before + public void setUp() throws IOException { + seed = System.currentTimeMillis(); + rand = new Random(seed); + + int randomContentLength = rand.nextInt(10000000) + 10000000; + + randomContent = new byte[randomContentLength]; + rand.nextBytes(randomContent); + + compressContent(); + } + + private void compressContent() throws IOException { + ByteArrayOutputStream compressedOutputStream = new ByteArrayOutputStream(); + + LZ4OutputStream os = new LZ4OutputStream(compressedOutputStream); + int currentContentPosition = 0; + + while(currentContentPosition < randomContent.length) { + int testBlockSize = rand.nextInt(500000); + + if(testBlockSize > randomContent.length - currentContentPosition) + testBlockSize = randomContent.length - currentContentPosition; + + boolean writeByteByByte = true; //rand.nextBoolean(); + + if(writeByteByByte) { + for(int i=0;i= 0, got " + length); + } else if (length >= MAX_INPUT_SIZE) { + throw new IllegalArgumentException("length must be < " + MAX_INPUT_SIZE); + } + return length + length / 255 + 16; + } + + static int hash(int i) { + return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG); + } + + static int hash64k(int i) { + return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_64K); + } + + static int hashHC(int i) { + return (i * -1640531535) >>> ((MIN_MATCH * 8) - HASH_LOG_HC); + } + + static class Match { + int start, ref, len; + + void fix(int correction) { + start += correction; + ref += correction; + len -= correction; + } + + int end() { + return start + len; + } + } + + static void copyTo(Match m1, Match m2) { + m2.len = m1.len; + m2.start = m1.start; + m2.ref = m1.ref; + } + +} diff --git a/core/src/main/java/net/jpountz/lz4/package.html b/core/src/main/java/net/jpountz/lz4/package.html new file mode 100644 index 00000000..e5341067 --- /dev/null +++ b/core/src/main/java/net/jpountz/lz4/package.html @@ -0,0 +1,55 @@ + + + + + + + +

LZ4 compression. The entry point of the API is the +{@link net.jpountz.lz4.LZ4Factory} class, which gives access to +{@link net.jpountz.lz4.LZ4Compressor compressors} and +{@link net.jpountz.lz4.LZ4SafeDecompressor decompressors}.

+ + +

Sample usage:

+ +
+    LZ4Factory factory = LZ4Factory.fastestInstance();
+
+    byte[] data = "12345345234572".getBytes("UTF-8");
+    final int decompressedLength = data.length;
+
+    // compress data
+    LZ4Compressor compressor = factory.fastCompressor();
+    int maxCompressedLength = compressor.maxCompressedLength(decompressedLength);
+    byte[] compressed = new byte[maxCompressedLength];
+    int compressedLength = compressor.compress(data, 0, decompressedLength, compressed, 0, maxCompressedLength);
+
+    // decompress data
+    // - method 1: when the decompressed length is known
+    LZ4FastDecompressor decompressor = factory.fastDecompressor();
+    byte[] restored = new byte[decompressedLength];
+    int compressedLength2 = decompressor.decompress(compressed, 0, restored, 0, decompressedLength);
+    // compressedLength == compressedLength2
+
+    // - method 2: when the compressed length is known (a little slower)
+    // the destination buffer needs to be over-sized
+    LZ4SafeDecompressor decompressor2 = factory.safeDecompressor();
+    int decompressedLength2 = decompressor2.decompress(compressed, 0, compressedLength, restored, 0);
+    // decompressedLength == decompressedLength2
+
+ + + diff --git a/core/src/main/java/net/jpountz/util/ByteBufferUtils.java b/core/src/main/java/net/jpountz/util/ByteBufferUtils.java new file mode 100644 index 00000000..9e5f3388 --- /dev/null +++ b/core/src/main/java/net/jpountz/util/ByteBufferUtils.java @@ -0,0 +1,92 @@ +package net.jpountz.util; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.ReadOnlyBufferException; + +public enum ByteBufferUtils { + ; + + public static void checkRange(ByteBuffer buf, int off, int len) { + SafeUtils.checkLength(len); + if (len > 0) { + checkRange(buf, off); + checkRange(buf, off + len - 1); + } + } + + public static void checkRange(ByteBuffer buf, int off) { + if (off < 0 || off >= buf.capacity()) { + throw new ArrayIndexOutOfBoundsException(off); + } + } + + public static ByteBuffer inLittleEndianOrder(ByteBuffer buf) { + if (buf.order().equals(ByteOrder.LITTLE_ENDIAN)) { + return buf; + } else { + return buf.duplicate().order(ByteOrder.LITTLE_ENDIAN); + } + } + + public static ByteBuffer inNativeByteOrder(ByteBuffer buf) { + if (buf.order().equals(Utils.NATIVE_BYTE_ORDER)) { + return buf; + } else { + return buf.duplicate().order(Utils.NATIVE_BYTE_ORDER); + } + } + + public static byte readByte(ByteBuffer buf, int i) { + return buf.get(i); + } + + public static void writeInt(ByteBuffer buf, int i, int v) { + assert buf.order() == Utils.NATIVE_BYTE_ORDER; + buf.putInt(i, v); + } + + public static int readInt(ByteBuffer buf, int i) { + assert buf.order() == Utils.NATIVE_BYTE_ORDER; + return buf.getInt(i); + } + + public static int readIntLE(ByteBuffer buf, int i) { + assert buf.order() == ByteOrder.LITTLE_ENDIAN; + return buf.getInt(i); + } + + public static void writeLong(ByteBuffer buf, int i, long v) { + assert buf.order() == Utils.NATIVE_BYTE_ORDER; + buf.putLong(i, v); + } + + public static long readLong(ByteBuffer buf, int i) { + assert buf.order() == Utils.NATIVE_BYTE_ORDER; + return buf.getLong(i); + } + + public static long readLongLE(ByteBuffer buf, int i) { + assert buf.order() == ByteOrder.LITTLE_ENDIAN; + return buf.getLong(i); + } + + public static void writeByte(ByteBuffer dest, int off, int i) { + dest.put(off, (byte) i); + } + + public static void writeShortLE(ByteBuffer dest, int off, int i) { + dest.put(off, (byte) i); + dest.put(off + 1, (byte) (i >>> 8)); + } + + public static void checkNotReadOnly(ByteBuffer buffer) { + if (buffer.isReadOnly()) { + throw new ReadOnlyBufferException(); + } + } + + public static int readShortLE(ByteBuffer buf, int i) { + return (buf.get(i) & 0xFF) | ((buf.get(i+1) & 0xFF) << 8); + } +} diff --git a/core/src/main/java/net/jpountz/util/LZ4UnsafeUtils.java b/core/src/main/java/net/jpountz/util/LZ4UnsafeUtils.java new file mode 100644 index 00000000..a5ad7834 --- /dev/null +++ b/core/src/main/java/net/jpountz/util/LZ4UnsafeUtils.java @@ -0,0 +1,206 @@ +package net.jpountz.lz4; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.lz4.LZ4Constants.COPY_LENGTH; +import static net.jpountz.lz4.LZ4Constants.LAST_LITERALS; +import static net.jpountz.lz4.LZ4Constants.ML_BITS; +import static net.jpountz.lz4.LZ4Constants.ML_MASK; +import static net.jpountz.lz4.LZ4Constants.RUN_MASK; +import static net.jpountz.util.UnsafeUtils.readByte; +import static net.jpountz.util.UnsafeUtils.readInt; +import static net.jpountz.util.UnsafeUtils.readLong; +import static net.jpountz.util.UnsafeUtils.readShort; +import static net.jpountz.util.UnsafeUtils.writeByte; +import static net.jpountz.util.UnsafeUtils.writeInt; +import static net.jpountz.util.UnsafeUtils.writeLong; +import static net.jpountz.util.UnsafeUtils.writeShort; +import static net.jpountz.util.Utils.NATIVE_BYTE_ORDER; + +import java.nio.ByteOrder; + +enum LZ4UnsafeUtils { + ; + + static void safeArraycopy(byte[] src, int srcOff, byte[] dest, int destOff, int len) { + final int fastLen = len & 0xFFFFFFF8; + wildArraycopy(src, srcOff, dest, destOff, fastLen); + for (int i = 0, slowLen = len & 0x7; i < slowLen; i += 1) { + writeByte(dest, destOff + fastLen + i, readByte(src, srcOff + fastLen + i)); + } + } + + static void wildArraycopy(byte[] src, int srcOff, byte[] dest, int destOff, int len) { + for (int i = 0; i < len; i += 8) { + writeLong(dest, destOff + i, readLong(src, srcOff + i)); + } + } + + static void wildIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchCopyEnd) { + if (dOff - matchOff < 4) { + for (int i = 0; i < 4; ++i) { + writeByte(dest, dOff+i, readByte(dest, matchOff+i)); + } + dOff += 4; + matchOff += 4; + int dec = 0; + assert dOff >= matchOff && dOff - matchOff < 8; + switch (dOff - matchOff) { + case 1: + matchOff -= 3; + break; + case 2: + matchOff -= 2; + break; + case 3: + matchOff -= 3; + dec = -1; + break; + case 5: + dec = 1; + break; + case 6: + dec = 2; + break; + case 7: + dec = 3; + break; + default: + break; + } + writeInt(dest, dOff, readInt(dest, matchOff)); + dOff += 4; + matchOff -= dec; + } else if (dOff - matchOff < COPY_LENGTH) { + writeLong(dest, dOff, readLong(dest, matchOff)); + dOff += dOff - matchOff; + } + while (dOff < matchCopyEnd) { + writeLong(dest, dOff, readLong(dest, matchOff)); + dOff += 8; + matchOff += 8; + } + } + + static void safeIncrementalCopy(byte[] dest, int matchOff, int dOff, int matchLen) { + for (int i = 0; i < matchLen; ++i) { + dest[dOff + i] = dest[matchOff + i]; + writeByte(dest, dOff + i, readByte(dest, matchOff + i)); + } + } + + static int readShortLittleEndian(byte[] src, int srcOff) { + short s = readShort(src, srcOff); + if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) { + s = Short.reverseBytes(s); + } + return s & 0xFFFF; + } + + static void writeShortLittleEndian(byte[] dest, int destOff, int value) { + short s = (short) value; + if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) { + s = Short.reverseBytes(s); + } + writeShort(dest, destOff, s); + } + + static boolean readIntEquals(byte[] src, int ref, int sOff) { + return readInt(src, ref) == readInt(src, sOff); + } + + static int commonBytes(byte[] src, int ref, int sOff, int srcLimit) { + int matchLen = 0; + while (sOff <= srcLimit - 8) { + if (readLong(src, sOff) == readLong(src, ref)) { + matchLen += 8; + ref += 8; + sOff += 8; + } else { + final int zeroBits; + if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) { + zeroBits = Long.numberOfLeadingZeros(readLong(src, sOff) ^ readLong(src, ref)); + } else { + zeroBits = Long.numberOfTrailingZeros(readLong(src, sOff) ^ readLong(src, ref)); + } + return matchLen + (zeroBits >>> 3); + } + } + while (sOff < srcLimit && readByte(src, ref++) == readByte(src, sOff++)) { + ++matchLen; + } + return matchLen; + } + + static int writeLen(int len, byte[] dest, int dOff) { + while (len >= 0xFF) { + writeByte(dest, dOff++, 0xFF); + len -= 0xFF; + } + writeByte(dest, dOff++, len); + return dOff; + } + + static int encodeSequence(byte[] src, int anchor, int matchOff, int matchRef, int matchLen, byte[] dest, int dOff, int destEnd) { + final int runLen = matchOff - anchor; + final int tokenOff = dOff++; + int token; + + if (runLen >= RUN_MASK) { + token = (byte) (RUN_MASK << ML_BITS); + dOff = writeLen(runLen - RUN_MASK, dest, dOff); + } else { + token = runLen << ML_BITS; + } + + // copy literals + wildArraycopy(src, anchor, dest, dOff, runLen); + dOff += runLen; + + // encode offset + final int matchDec = matchOff - matchRef; + dest[dOff++] = (byte) matchDec; + dest[dOff++] = (byte) (matchDec >>> 8); + + // encode match len + matchLen -= 4; + if (dOff + (1 + LAST_LITERALS) + (matchLen >>> 8) > destEnd) { + throw new LZ4Exception("maxDestLen is too small"); + } + if (matchLen >= ML_MASK) { + token |= ML_MASK; + dOff = writeLen(matchLen - RUN_MASK, dest, dOff); + } else { + token |= matchLen; + } + + dest[tokenOff] = (byte) token; + + return dOff; + } + + static int commonBytesBackward(byte[] b, int o1, int o2, int l1, int l2) { + int count = 0; + while (o1 > l1 && o2 > l2 && readByte(b, --o1) == readByte(b, --o2)) { + ++count; + } + return count; + } + + static int lastLiterals(byte[] src, int sOff, int srcLen, byte[] dest, int dOff, int destEnd) { + return LZ4SafeUtils.lastLiterals(src, sOff, srcLen, dest, dOff, destEnd); + } + +} diff --git a/core/src/main/java/net/jpountz/util/Native.java b/core/src/main/java/net/jpountz/util/Native.java new file mode 100644 index 00000000..d4a8707b --- /dev/null +++ b/core/src/main/java/net/jpountz/util/Native.java @@ -0,0 +1,125 @@ +package net.jpountz.util; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.boydti.fawe.Fawe; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; + +/** FOR INTERNAL USE ONLY */ +public enum Native { + ; + + private enum OS { + // Even on Windows, the default compiler from cpptasks (gcc) uses .so as a shared lib extension + WINDOWS("win32", "so"), LINUX("linux", "so"), MAC("darwin", "dylib"), SOLARIS("solaris", "so"); + public final String name, libExtension; + + OS(String name, String libExtension) { + this.name = name; + this.libExtension = libExtension; + } + } + + private static String arch() { + return System.getProperty("os.arch"); + } + + private static OS os() { + String osName = System.getProperty("os.name"); + if (osName.contains("Linux")) { + return OS.LINUX; + } else if (osName.contains("Mac")) { + return OS.MAC; + } else if (osName.contains("Windows")) { + return OS.WINDOWS; + } else if (osName.contains("Solaris") || osName.contains("SunOS")) { + return OS.SOLARIS; + } else { + throw new UnsupportedOperationException("Unsupported operating system: " + + osName); + } + } + + private static String resourceName() { + OS os = os(); + return "/" + os.name + "/" + arch() + "/liblz4-java." + os.libExtension; + } + + private static boolean loaded = false; + + public static synchronized boolean isLoaded() { + return loaded; + } + + public static synchronized void load() { + if (loaded) { + return; + } + String resourceName = resourceName(); + System.out.println("NAME: " + resourceName); + System.out.println("WORK: " + Fawe.class.getResourceAsStream("/LICENSE")); + System.out.println("WORK: " + Fawe.class.getResourceAsStream("/win32/amd64/liblz4-java.so")); + InputStream is = Fawe.class.getResourceAsStream(resourceName); + if (is == null) { + throw new UnsupportedOperationException("Unsupported OS/arch, cannot find " + resourceName + ". Please try building from source."); + } + File tempLib; + try { + tempLib = File.createTempFile("liblz4-java", "." + os().libExtension); + // copy to tempLib + FileOutputStream out = new FileOutputStream(tempLib); + try { + byte[] buf = new byte[4096]; + while (true) { + int read = is.read(buf); + if (read == -1) { + break; + } + out.write(buf, 0, read); + } + try { + out.close(); + out = null; + } catch (IOException e) { + // ignore + } + System.load(tempLib.getAbsolutePath()); + loaded = true; + } finally { + try { + if (out != null) { + out.close(); + } + } catch (IOException e) { + // ignore + } + if (tempLib != null && tempLib.exists()) { + if (!loaded) { + tempLib.delete(); + } else { + // try to delete on exit, does it work on Windows? + tempLib.deleteOnExit(); + } + } + } + } catch (IOException e) { + throw new ExceptionInInitializerError("Cannot unpack liblz4-java"); + } + } + +} diff --git a/core/src/main/java/net/jpountz/util/SafeUtils.java b/core/src/main/java/net/jpountz/util/SafeUtils.java new file mode 100644 index 00000000..ceaf9177 --- /dev/null +++ b/core/src/main/java/net/jpountz/util/SafeUtils.java @@ -0,0 +1,95 @@ +package net.jpountz.util; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.nio.ByteOrder; + +public enum SafeUtils { + ; + + public static void checkRange(byte[] buf, int off) { + if (off < 0 || off >= buf.length) { + throw new ArrayIndexOutOfBoundsException(off); + } + } + + public static void checkRange(byte[] buf, int off, int len) { + checkLength(len); + if (len > 0) { + checkRange(buf, off); + checkRange(buf, off + len - 1); + } + } + + public static void checkLength(int len) { + if (len < 0) { + throw new IllegalArgumentException("lengths must be >= 0"); + } + } + + public static byte readByte(byte[] buf, int i) { + return buf[i]; + } + + public static int readIntBE(byte[] buf, int i) { + return ((buf[i] & 0xFF) << 24) | ((buf[i+1] & 0xFF) << 16) | ((buf[i+2] & 0xFF) << 8) | (buf[i+3] & 0xFF); + } + + public static int readIntLE(byte[] buf, int i) { + return (buf[i] & 0xFF) | ((buf[i+1] & 0xFF) << 8) | ((buf[i+2] & 0xFF) << 16) | ((buf[i+3] & 0xFF) << 24); + } + + public static int readInt(byte[] buf, int i) { + if (Utils.NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) { + return readIntBE(buf, i); + } else { + return readIntLE(buf, i); + } + } + + public static long readLongLE(byte[] buf, int i) { + return (buf[i] & 0xFFL) | ((buf[i+1] & 0xFFL) << 8) | ((buf[i+2] & 0xFFL) << 16) | ((buf[i+3] & 0xFFL) << 24) + | ((buf[i+4] & 0xFFL) << 32) | ((buf[i+5] & 0xFFL) << 40) | ((buf[i+6] & 0xFFL) << 48) | ((buf[i+7] & 0xFFL) << 56); + } + + public static void writeShortLE(byte[] buf, int off, int v) { + buf[off++] = (byte) v; + buf[off++] = (byte) (v >>> 8); + } + + public static void writeInt(int[] buf, int off, int v) { + buf[off] = v; + } + + public static int readInt(int[] buf, int off) { + return buf[off]; + } + + public static void writeByte(byte[] dest, int off, int i) { + dest[off] = (byte) i; + } + + public static void writeShort(short[] buf, int off, int v) { + buf[off] = (short) v; + } + + public static int readShortLE(byte[] buf, int i) { + return (buf[i] & 0xFF) | ((buf[i+1] & 0xFF) << 8); + } + + public static int readShort(short[] buf, int off) { + return buf[off] & 0xFFFF; + } +} diff --git a/core/src/main/java/net/jpountz/util/UnsafeUtils.java b/core/src/main/java/net/jpountz/util/UnsafeUtils.java new file mode 100644 index 00000000..30231ef1 --- /dev/null +++ b/core/src/main/java/net/jpountz/util/UnsafeUtils.java @@ -0,0 +1,147 @@ +package net.jpountz.util; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.util.Utils.NATIVE_BYTE_ORDER; + +import java.lang.reflect.Field; +import java.nio.ByteOrder; + +import sun.misc.Unsafe; + +public enum UnsafeUtils { + ; + + private static final Unsafe UNSAFE; + private static final long BYTE_ARRAY_OFFSET; + private static final int BYTE_ARRAY_SCALE; + private static final long INT_ARRAY_OFFSET; + private static final int INT_ARRAY_SCALE; + private static final long SHORT_ARRAY_OFFSET; + private static final int SHORT_ARRAY_SCALE; + + static { + try { + Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe"); + theUnsafe.setAccessible(true); + UNSAFE = (Unsafe) theUnsafe.get(null); + BYTE_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(byte[].class); + BYTE_ARRAY_SCALE = UNSAFE.arrayIndexScale(byte[].class); + INT_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(int[].class); + INT_ARRAY_SCALE = UNSAFE.arrayIndexScale(int[].class); + SHORT_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(short[].class); + SHORT_ARRAY_SCALE = UNSAFE.arrayIndexScale(short[].class); + } catch (IllegalAccessException e) { + throw new ExceptionInInitializerError("Cannot access Unsafe"); + } catch (NoSuchFieldException e) { + throw new ExceptionInInitializerError("Cannot access Unsafe"); + } catch (SecurityException e) { + throw new ExceptionInInitializerError("Cannot access Unsafe"); + } + } + + public static void checkRange(byte[] buf, int off) { + SafeUtils.checkRange(buf, off); + } + + public static void checkRange(byte[] buf, int off, int len) { + SafeUtils.checkRange(buf, off, len); + } + + public static void checkLength(int len) { + SafeUtils.checkLength(len); + } + + public static byte readByte(byte[] src, int srcOff) { + return UNSAFE.getByte(src, BYTE_ARRAY_OFFSET + BYTE_ARRAY_SCALE * srcOff); + } + + public static void writeByte(byte[] src, int srcOff, byte value) { + UNSAFE.putByte(src, BYTE_ARRAY_OFFSET + BYTE_ARRAY_SCALE * srcOff, (byte) value); + } + + public static void writeByte(byte[] src, int srcOff, int value) { + writeByte(src, srcOff, (byte) value); + } + + public static long readLong(byte[] src, int srcOff) { + return UNSAFE.getLong(src, BYTE_ARRAY_OFFSET + srcOff); + } + + public static long readLongLE(byte[] src, int srcOff) { + long i = readLong(src, srcOff); + if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) { + i = Long.reverseBytes(i); + } + return i; + } + + public static void writeLong(byte[] dest, int destOff, long value) { + UNSAFE.putLong(dest, BYTE_ARRAY_OFFSET + destOff, value); + } + + public static int readInt(byte[] src, int srcOff) { + return UNSAFE.getInt(src, BYTE_ARRAY_OFFSET + srcOff); + } + + public static int readIntLE(byte[] src, int srcOff) { + int i = readInt(src, srcOff); + if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) { + i = Integer.reverseBytes(i); + } + return i; + } + + public static void writeInt(byte[] dest, int destOff, int value) { + UNSAFE.putInt(dest, BYTE_ARRAY_OFFSET + destOff, value); + } + + public static short readShort(byte[] src, int srcOff) { + return UNSAFE.getShort(src, BYTE_ARRAY_OFFSET + srcOff); + } + + public static int readShortLE(byte[] src, int srcOff) { + short s = readShort(src, srcOff); + if (NATIVE_BYTE_ORDER == ByteOrder.BIG_ENDIAN) { + s = Short.reverseBytes(s); + } + return s & 0xFFFF; + } + + public static void writeShort(byte[] dest, int destOff, short value) { + UNSAFE.putShort(dest, BYTE_ARRAY_OFFSET + destOff, value); + } + + public static void writeShortLE(byte[] buf, int off, int v) { + writeByte(buf, off, (byte) v); + writeByte(buf, off + 1, (byte) (v >>> 8)); + } + + public static int readInt(int[] src, int srcOff) { + return UNSAFE.getInt(src, INT_ARRAY_OFFSET + INT_ARRAY_SCALE * srcOff); + } + + public static void writeInt(int[] dest, int destOff, int value) { + UNSAFE.putInt(dest, INT_ARRAY_OFFSET + INT_ARRAY_SCALE * destOff, value); + } + + public static int readShort(short[] src, int srcOff) { + return UNSAFE.getShort(src, SHORT_ARRAY_OFFSET + SHORT_ARRAY_SCALE * srcOff) & 0xFFFF; + } + + public static void writeShort(short[] dest, int destOff, int value) { + UNSAFE.putShort(dest, SHORT_ARRAY_OFFSET + SHORT_ARRAY_SCALE * destOff, (short) value); + } +} diff --git a/core/src/main/java/net/jpountz/util/Utils.java b/core/src/main/java/net/jpountz/util/Utils.java new file mode 100644 index 00000000..12177bcf --- /dev/null +++ b/core/src/main/java/net/jpountz/util/Utils.java @@ -0,0 +1,35 @@ +package net.jpountz.util; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.nio.ByteOrder; + +public enum Utils { + ; + + public static final ByteOrder NATIVE_BYTE_ORDER = ByteOrder.nativeOrder(); + + private static final boolean unalignedAccessAllowed; + static { + String arch = System.getProperty("os.arch"); + unalignedAccessAllowed = arch.equals("i386") || arch.equals("x86") + || arch.equals("amd64") || arch.equals("x86_64"); + } + + public static boolean isUnalignedAccessAllowed() { + return unalignedAccessAllowed; + } + +} diff --git a/core/src/main/java/net/jpountz/util/package.html b/core/src/main/java/net/jpountz/util/package.html new file mode 100644 index 00000000..4b3ceb98 --- /dev/null +++ b/core/src/main/java/net/jpountz/util/package.html @@ -0,0 +1,22 @@ + + + + + + + +

Utility classes.

+ + \ No newline at end of file diff --git a/core/src/main/java/net/jpountz/xxhash/AbstractStreamingXXHash32Java.java b/core/src/main/java/net/jpountz/xxhash/AbstractStreamingXXHash32Java.java new file mode 100644 index 00000000..3dd3ed4f --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/AbstractStreamingXXHash32Java.java @@ -0,0 +1,42 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.xxhash.XXHashConstants.PRIME1; +import static net.jpountz.xxhash.XXHashConstants.PRIME2; + +abstract class AbstractStreamingXXHash32Java extends StreamingXXHash32 { + + int v1, v2, v3, v4, memSize; + long totalLen; + final byte[] memory; + + AbstractStreamingXXHash32Java(int seed) { + super(seed); + memory = new byte[16]; + reset(); + } + + @Override + public void reset() { + v1 = seed + PRIME1 + PRIME2; + v2 = seed + PRIME2; + v3 = seed + 0; + v4 = seed - PRIME1; + totalLen = 0; + memSize = 0; + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/AbstractStreamingXXHash64Java.java b/core/src/main/java/net/jpountz/xxhash/AbstractStreamingXXHash64Java.java new file mode 100644 index 00000000..97e294dd --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/AbstractStreamingXXHash64Java.java @@ -0,0 +1,43 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.xxhash.XXHashConstants.PRIME64_1; +import static net.jpountz.xxhash.XXHashConstants.PRIME64_2; + +abstract class AbstractStreamingXXHash64Java extends StreamingXXHash64 { + + int memSize; + long v1, v2, v3, v4; + long totalLen; + final byte[] memory; + + AbstractStreamingXXHash64Java(long seed) { + super(seed); + memory = new byte[32]; + reset(); + } + + @Override + public void reset() { + v1 = seed + PRIME64_1 + PRIME64_2; + v2 = seed + PRIME64_2; + v3 = seed + 0; + v4 = seed - PRIME64_1; + totalLen = 0; + memSize = 0; + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/StreamingXXHash32.java b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash32.java new file mode 100644 index 00000000..c59f0346 --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash32.java @@ -0,0 +1,111 @@ +package net.jpountz.xxhash; + +import java.util.zip.Checksum; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +/** + * Streaming interface for {@link XXHash32}. + *

+ * This API is compatible with the {@link XXHash32 block API} and the following + * code samples are equivalent: + *

+ *   int hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, int seed) {
+ *     return xxhashFactory.hash32().hash(buf, off, len, seed);
+ *   }
+ * 
+ *
+ *   int hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, int seed) {
+ *     StreamingXXHash32 sh32 = xxhashFactory.newStreamingHash32(seed);
+ *     sh32.update(buf, off, len);
+ *     return sh32.getValue();
+ *   }
+ * 
+ *

+ * Instances of this class are not thread-safe. + */ +public abstract class StreamingXXHash32 { + + interface Factory { + + StreamingXXHash32 newStreamingHash(int seed); + + } + + final int seed; + + StreamingXXHash32(int seed) { + this.seed = seed; + } + + /** + * Get the value of the checksum. + */ + public abstract int getValue(); + + /** + * Update the value of the hash with buf[off:off+len]. + */ + public abstract void update(byte[] buf, int off, int len); + + /** + * Reset this instance to the state it had right after instantiation. The + * seed remains unchanged. + */ + public abstract void reset(); + + @Override + public String toString() { + return getClass().getSimpleName() + "(seed=" + seed + ")"; + } + + /** + * Return a {@link Checksum} view of this instance. Modifications to the view + * will modify this instance too and vice-versa. + */ + public final Checksum asChecksum() { + return new Checksum() { + + @Override + public long getValue() { + return StreamingXXHash32.this.getValue() & 0xFFFFFFFL; + } + + @Override + public void reset() { + StreamingXXHash32.this.reset(); + } + + @Override + public void update(int b) { + StreamingXXHash32.this.update(new byte[] {(byte) b}, 0, 1); + } + + @Override + public void update(byte[] b, int off, int len) { + StreamingXXHash32.this.update(b, off, len); + } + + @Override + public String toString() { + return StreamingXXHash32.this.toString(); + } + + }; + } + +} \ No newline at end of file diff --git a/core/src/main/java/net/jpountz/xxhash/StreamingXXHash32JNI.java b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash32JNI.java new file mode 100644 index 00000000..e9b58fb4 --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash32JNI.java @@ -0,0 +1,71 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +final class StreamingXXHash32JNI extends StreamingXXHash32 { + + static class Factory implements StreamingXXHash32.Factory { + + public static final StreamingXXHash32.Factory INSTANCE = new Factory(); + + @Override + public StreamingXXHash32 newStreamingHash(int seed) { + return new StreamingXXHash32JNI(seed); + } + + } + + private long state; + + StreamingXXHash32JNI(int seed) { + super(seed); + state = XXHashJNI.XXH32_init(seed); + } + + private void checkState() { + if (state == 0) { + throw new AssertionError("Already finalized"); + } + } + + @Override + public void reset() { + checkState(); + XXHashJNI.XXH32_free(state); + state = XXHashJNI.XXH32_init(seed); + } + + @Override + public int getValue() { + checkState(); + return XXHashJNI.XXH32_digest(state); + } + + @Override + public void update(byte[] bytes, int off, int len) { + checkState(); + XXHashJNI.XXH32_update(state, bytes, off, len); + } + + @Override + protected void finalize() throws Throwable { + super.finalize(); + // free memory + XXHashJNI.XXH32_free(state); + state = 0; + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/StreamingXXHash64.java b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash64.java new file mode 100644 index 00000000..c68fb4d1 --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash64.java @@ -0,0 +1,111 @@ +package net.jpountz.xxhash; + +import java.util.zip.Checksum; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +/** + * Streaming interface for {@link XXHash64}. + *

+ * This API is compatible with the {@link XXHash64 block API} and the following + * code samples are equivalent: + *

+ *   long hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, long seed) {
+ *     return xxhashFactory.hash64().hash(buf, off, len, seed);
+ *   }
+ * 
+ *
+ *   long hash(XXHashFactory xxhashFactory, byte[] buf, int off, int len, long seed) {
+ *     StreamingXXHash64 sh64 = xxhashFactory.newStreamingHash64(seed);
+ *     sh64.update(buf, off, len);
+ *     return sh64.getValue();
+ *   }
+ * 
+ *

+ * Instances of this class are not thread-safe. + */ +public abstract class StreamingXXHash64 { + + interface Factory { + + StreamingXXHash64 newStreamingHash(long seed); + + } + + final long seed; + + StreamingXXHash64(long seed) { + this.seed = seed; + } + + /** + * Get the value of the checksum. + */ + public abstract long getValue(); + + /** + * Update the value of the hash with buf[off:off+len]. + */ + public abstract void update(byte[] buf, int off, int len); + + /** + * Reset this instance to the state it had right after instantiation. The + * seed remains unchanged. + */ + public abstract void reset(); + + @Override + public String toString() { + return getClass().getSimpleName() + "(seed=" + seed + ")"; + } + + /** + * Return a {@link Checksum} view of this instance. Modifications to the view + * will modify this instance too and vice-versa. + */ + public final Checksum asChecksum() { + return new Checksum() { + + @Override + public long getValue() { + return StreamingXXHash64.this.getValue(); + } + + @Override + public void reset() { + StreamingXXHash64.this.reset(); + } + + @Override + public void update(int b) { + StreamingXXHash64.this.update(new byte[] {(byte) b}, 0, 1); + } + + @Override + public void update(byte[] b, int off, int len) { + StreamingXXHash64.this.update(b, off, len); + } + + @Override + public String toString() { + return StreamingXXHash64.this.toString(); + } + + }; + } + +} \ No newline at end of file diff --git a/core/src/main/java/net/jpountz/xxhash/StreamingXXHash64JNI.java b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash64JNI.java new file mode 100644 index 00000000..b7ab1acd --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/StreamingXXHash64JNI.java @@ -0,0 +1,71 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +final class StreamingXXHash64JNI extends StreamingXXHash64 { + + static class Factory implements StreamingXXHash64.Factory { + + public static final StreamingXXHash64.Factory INSTANCE = new Factory(); + + @Override + public StreamingXXHash64 newStreamingHash(long seed) { + return new StreamingXXHash64JNI(seed); + } + + } + + private long state; + + StreamingXXHash64JNI(long seed) { + super(seed); + state = XXHashJNI.XXH64_init(seed); + } + + private void checkState() { + if (state == 0) { + throw new AssertionError("Already finalized"); + } + } + + @Override + public void reset() { + checkState(); + XXHashJNI.XXH64_free(state); + state = XXHashJNI.XXH64_init(seed); + } + + @Override + public long getValue() { + checkState(); + return XXHashJNI.XXH64_digest(state); + } + + @Override + public void update(byte[] bytes, int off, int len) { + checkState(); + XXHashJNI.XXH64_update(state, bytes, off, len); + } + + @Override + protected void finalize() throws Throwable { + super.finalize(); + // free memory + XXHashJNI.XXH64_free(state); + state = 0; + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/XXHash32.java b/core/src/main/java/net/jpountz/xxhash/XXHash32.java new file mode 100644 index 00000000..5d211436 --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/XXHash32.java @@ -0,0 +1,55 @@ +package net.jpountz.xxhash; + +import java.nio.ByteBuffer; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A 32-bits hash. + *

+ * Instances of this class are thread-safe. + */ +public abstract class XXHash32 { + + /** + * Compute the 32-bits hash of buf[off:off+len] using seed + * seed. + */ + public abstract int hash(byte[] buf, int off, int len, int seed); + + /** + * Compute the hash of the given slice of the {@link ByteBuffer}. + * {@link ByteBuffer#position() position} and {@link ByteBuffer#limit() limit} + * are not modified. + */ + public abstract int hash(ByteBuffer buf, int off, int len, int seed); + + /** + * Compute the hash of the given {@link ByteBuffer}. The + * {@link ByteBuffer#position() position} is moved in order to reflect bytes + * which have been read. + */ + public final int hash(ByteBuffer buf, int seed) { + final int hash = hash(buf, buf.position(), buf.remaining(), seed); + buf.position(buf.limit()); + return hash; + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/XXHash32JNI.java b/core/src/main/java/net/jpountz/xxhash/XXHash32JNI.java new file mode 100644 index 00000000..4b713d11 --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/XXHash32JNI.java @@ -0,0 +1,49 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.util.ByteBufferUtils.checkRange; +import static net.jpountz.util.SafeUtils.checkRange; + +import java.nio.ByteBuffer; + +final class XXHash32JNI extends XXHash32 { + + public static final XXHash32 INSTANCE = new XXHash32JNI(); + private static XXHash32 SAFE_INSTANCE; + + @Override + public int hash(byte[] buf, int off, int len, int seed) { + checkRange(buf, off, len); + return XXHashJNI.XXH32(buf, off, len, seed); + } + + @Override + public int hash(ByteBuffer buf, int off, int len, int seed) { + if (buf.isDirect()) { + checkRange(buf, off, len); + return XXHashJNI.XXH32BB(buf, off, len, seed); + } else if (buf.hasArray()) { + return hash(buf.array(), off + buf.arrayOffset(), len, seed); + } else { + XXHash32 safeInstance = SAFE_INSTANCE; + if (safeInstance == null) { + safeInstance = SAFE_INSTANCE = XXHashFactory.safeInstance().hash32(); + } + return safeInstance.hash(buf, off, len, seed); + } + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/XXHash64.java b/core/src/main/java/net/jpountz/xxhash/XXHash64.java new file mode 100644 index 00000000..3ab73ddc --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/XXHash64.java @@ -0,0 +1,55 @@ +package net.jpountz.xxhash; + +import java.nio.ByteBuffer; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A 64-bits hash. + *

+ * Instances of this class are thread-safe. + */ +public abstract class XXHash64 { + + /** + * Compute the 64-bits hash of buf[off:off+len] using seed + * seed. + */ + public abstract long hash(byte[] buf, int off, int len, long seed); + + /** + * Compute the hash of the given slice of the {@link ByteBuffer}. + * {@link ByteBuffer#position() position} and {@link ByteBuffer#limit() limit} + * are not modified. + */ + public abstract long hash(ByteBuffer buf, int off, int len, long seed); + + /** + * Compute the hash of the given {@link ByteBuffer}. The + * {@link ByteBuffer#position() position} is moved in order to reflect bytes + * which have been read. + */ + public final long hash(ByteBuffer buf, long seed) { + final long hash = hash(buf, buf.position(), buf.remaining(), seed); + buf.position(buf.limit()); + return hash; + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/XXHash64JNI.java b/core/src/main/java/net/jpountz/xxhash/XXHash64JNI.java new file mode 100644 index 00000000..d952e1e5 --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/XXHash64JNI.java @@ -0,0 +1,49 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import static net.jpountz.util.ByteBufferUtils.checkRange; +import static net.jpountz.util.SafeUtils.checkRange; + +import java.nio.ByteBuffer; + +final class XXHash64JNI extends XXHash64 { + + public static final XXHash64 INSTANCE = new XXHash64JNI(); + private static XXHash64 SAFE_INSTANCE; + + @Override + public long hash(byte[] buf, int off, int len, long seed) { + checkRange(buf, off, len); + return XXHashJNI.XXH64(buf, off, len, seed); + } + + @Override + public long hash(ByteBuffer buf, int off, int len, long seed) { + if (buf.isDirect()) { + checkRange(buf, off, len); + return XXHashJNI.XXH64BB(buf, off, len, seed); + } else if (buf.hasArray()) { + return hash(buf.array(), off + buf.arrayOffset(), len, seed); + } else { + XXHash64 safeInstance = SAFE_INSTANCE; + if (safeInstance == null) { + safeInstance = SAFE_INSTANCE = XXHashFactory.safeInstance().hash64(); + } + return safeInstance.hash(buf, off, len, seed); + } + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/XXHashConstants.java b/core/src/main/java/net/jpountz/xxhash/XXHashConstants.java new file mode 100644 index 00000000..2e887dfd --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/XXHashConstants.java @@ -0,0 +1,31 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +enum XXHashConstants { + ; + + static final int PRIME1 = -1640531535; + static final int PRIME2 = -2048144777; + static final int PRIME3 = -1028477379; + static final int PRIME4 = 668265263; + static final int PRIME5 = 374761393; + + static final long PRIME64_1 = -7046029288634856825L; //11400714785074694791 + static final long PRIME64_2 = -4417276706812531889L; //14029467366897019727 + static final long PRIME64_3 = 1609587929392839161L; + static final long PRIME64_4 = -8796714831421723037L; //9650029242287828579 + static final long PRIME64_5 = 2870177450012600261L; +} diff --git a/core/src/main/java/net/jpountz/xxhash/XXHashFactory.java b/core/src/main/java/net/jpountz/xxhash/XXHashFactory.java new file mode 100644 index 00000000..ab03dff1 --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/XXHashFactory.java @@ -0,0 +1,220 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Field; +import java.util.Random; + +import net.jpountz.util.Native; +import net.jpountz.util.Utils; + +/** + * Entry point to get {@link XXHash32} and {@link StreamingXXHash32} instances. + *

+ * This class has 3 instances

    + *
  • a {@link #nativeInstance() native} instance which is a JNI binding to + * the original LZ4 C implementation. + *
  • a {@link #safeInstance() safe Java} instance which is a pure Java port + * of the original C library,
  • + *
  • an {@link #unsafeInstance() unsafe Java} instance which is a Java port + * using the unofficial {@link sun.misc.Unsafe} API. + *
+ *

+ * Only the {@link #safeInstance() safe instance} is guaranteed to work on your + * JVM, as a consequence it is advised to use the {@link #fastestInstance()} or + * {@link #fastestJavaInstance()} to pull a {@link XXHashFactory} instance. + *

+ * All methods from this class are very costly, so you should get an instance + * once, and then reuse it whenever possible. This is typically done by storing + * a {@link XXHashFactory} instance in a static field. + */ +public final class XXHashFactory { + + private static XXHashFactory instance(String impl) { + try { + return new XXHashFactory(impl); + } catch (Exception e) { + throw new AssertionError(e); + } + } + + private static XXHashFactory NATIVE_INSTANCE, + JAVA_UNSAFE_INSTANCE, + JAVA_SAFE_INSTANCE; + + /** Return a {@link XXHashFactory} that returns {@link XXHash32} instances that + * are native bindings to the original C API. + *

+ * Please note that this instance has some traps you should be aware of:

    + *
  1. Upon loading this instance, files will be written to the temporary + * directory of the system. Although these files are supposed to be deleted + * when the JVM exits, they might remain on systems that don't support + * removal of files being used such as Windows. + *
  2. The instance can only be loaded once per JVM. This can be a problem + * if your application uses multiple class loaders (such as most servlet + * containers): this instance will only be available to the children of the + * class loader which has loaded it. As a consequence, it is advised to + * either not use this instance in webapps or to put this library in the lib + * directory of your servlet container so that it is loaded by the system + * class loader. + *
+ */ + public static synchronized XXHashFactory nativeInstance() { + if (NATIVE_INSTANCE == null) { + NATIVE_INSTANCE = instance("JNI"); + } + return NATIVE_INSTANCE; + } + + /** Return a {@link XXHashFactory} that returns {@link XXHash32} instances that + * are written with Java's official API. */ + public static synchronized XXHashFactory safeInstance() { + if (JAVA_SAFE_INSTANCE == null) { + JAVA_SAFE_INSTANCE = instance("JavaSafe"); + } + return JAVA_SAFE_INSTANCE; + } + + /** Return a {@link XXHashFactory} that returns {@link XXHash32} instances that + * may use {@link sun.misc.Unsafe} to speed up hashing. */ + public static synchronized XXHashFactory unsafeInstance() { + if (JAVA_UNSAFE_INSTANCE == null) { + JAVA_UNSAFE_INSTANCE = instance("JavaUnsafe"); + } + return JAVA_UNSAFE_INSTANCE; + } + + /** + * Return the fastest available {@link XXHashFactory} instance which does not + * rely on JNI bindings. It first tries to load the + * {@link #unsafeInstance() unsafe instance}, and then the + * {@link #safeInstance() safe Java instance} if the JVM doesn't have a + * working {@link sun.misc.Unsafe}. + */ + public static XXHashFactory fastestJavaInstance() { + if (Utils.isUnalignedAccessAllowed()) { + try { + return unsafeInstance(); + } catch (Throwable t) { + return safeInstance(); + } + } else { + return safeInstance(); + } + } + + /** + * Return the fastest available {@link XXHashFactory} instance. If the class + * loader is the system class loader and if the + * {@link #nativeInstance() native instance} loads successfully, then the + * {@link #nativeInstance() native instance} is returned, otherwise the + * {@link #fastestJavaInstance() fastest Java instance} is returned. + *

+ * Please read {@link #nativeInstance() javadocs of nativeInstance()} before + * using this method. + */ + public static XXHashFactory fastestInstance() { + if (Native.isLoaded() + || Native.class.getClassLoader() == ClassLoader.getSystemClassLoader()) { + try { + return nativeInstance(); + } catch (Throwable t) { + return fastestJavaInstance(); + } + } else { + return fastestJavaInstance(); + } + } + + @SuppressWarnings("unchecked") + private static T classInstance(String cls) throws NoSuchFieldException, SecurityException, ClassNotFoundException, IllegalArgumentException, IllegalAccessException { + ClassLoader loader = XXHashFactory.class.getClassLoader(); + loader = loader == null ? ClassLoader.getSystemClassLoader() : loader; + final Class c = loader.loadClass(cls); + Field f = c.getField("INSTANCE"); + return (T) f.get(null); + } + + private final String impl; + private final XXHash32 hash32; + private final XXHash64 hash64; + private final StreamingXXHash32.Factory streamingHash32Factory; + private final StreamingXXHash64.Factory streamingHash64Factory; + + private XXHashFactory(String impl) throws ClassNotFoundException, NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException { + this.impl = impl; + hash32 = classInstance("net.jpountz.xxhash.XXHash32" + impl); + streamingHash32Factory = classInstance("net.jpountz.xxhash.StreamingXXHash32" + impl + "$Factory"); + hash64 = classInstance("net.jpountz.xxhash.XXHash64" + impl); + streamingHash64Factory = classInstance("net.jpountz.xxhash.StreamingXXHash64" + impl + "$Factory"); + + // make sure it can run + final byte[] bytes = new byte[100]; + final Random random = new Random(); + random.nextBytes(bytes); + final int seed = random.nextInt(); + + final int h1 = hash32.hash(bytes, 0, bytes.length, seed); + final StreamingXXHash32 streamingHash32 = newStreamingHash32(seed); + streamingHash32.update(bytes, 0, bytes.length); + final int h2 = streamingHash32.getValue(); + final long h3 = hash64.hash(bytes, 0, bytes.length, seed); + final StreamingXXHash64 streamingHash64 = newStreamingHash64(seed); + streamingHash64.update(bytes, 0, bytes.length); + final long h4 = streamingHash64.getValue(); + if (h1 != h2) { + throw new AssertionError(); + } + if (h3 != h4) { + throw new AssertionError(); + } + } + + /** Return a {@link XXHash32} instance. */ + public XXHash32 hash32() { + return hash32; + } + + /** Return a {@link XXHash64} instance. */ + public XXHash64 hash64() { + return hash64; + } + + /** + * Return a new {@link StreamingXXHash32} instance. + */ + public StreamingXXHash32 newStreamingHash32(int seed) { + return streamingHash32Factory.newStreamingHash(seed); + } + + /** + * Return a new {@link StreamingXXHash64} instance. + */ + public StreamingXXHash64 newStreamingHash64(long seed) { + return streamingHash64Factory.newStreamingHash(seed); + } + + /** Prints the fastest instance. */ + public static void main(String[] args) { + System.out.println("Fastest instance is " + fastestInstance()); + System.out.println("Fastest Java instance is " + fastestJavaInstance()); + } + + @Override + public String toString() { + return getClass().getSimpleName() + ":" + impl; + } + +} diff --git a/core/src/main/java/net/jpountz/xxhash/XXHashJNI.java b/core/src/main/java/net/jpountz/xxhash/XXHashJNI.java new file mode 100644 index 00000000..125e640b --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/XXHashJNI.java @@ -0,0 +1,43 @@ +package net.jpountz.xxhash; + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.nio.ByteBuffer; + +import net.jpountz.util.Native; + +enum XXHashJNI { + ; + + static { + Native.load(); + init(); + } + + private static native void init(); + static native int XXH32(byte[] input, int offset, int len, int seed); + static native int XXH32BB(ByteBuffer input, int offset, int len, int seed); + static native long XXH32_init(int seed); + static native void XXH32_update(long state, byte[] input, int offset, int len); + static native int XXH32_digest(long state); + static native void XXH32_free(long state); + + static native long XXH64(byte[] input, int offset, int len, long seed); + static native long XXH64BB(ByteBuffer input, int offset, int len, long seed); + static native long XXH64_init(long seed); + static native void XXH64_update(long state, byte[] input, int offset, int len); + static native long XXH64_digest(long state); + static native void XXH64_free(long state); +} diff --git a/core/src/main/java/net/jpountz/xxhash/package.html b/core/src/main/java/net/jpountz/xxhash/package.html new file mode 100644 index 00000000..f595d25a --- /dev/null +++ b/core/src/main/java/net/jpountz/xxhash/package.html @@ -0,0 +1,65 @@ + + + + + + + +

xxhash hashing. This package supports both block hashing via +{@link net.jpountz.xxhash.XXHash32} and streaming hashing via +{@link net.jpountz.xxhash.StreamingXXHash32}. Have a look at +{@link net.jpountz.xxhash.XXHashFactory} to know how to get instances of these +interfaces.

+ +

Streaming hashing is a little slower but doesn't require to load the whole +stream into memory.

+ +

Sample block usage:

+ +
+    XXHashFactory factory = XXHashFactory.fastestInstance();
+
+    byte[] data = "12345345234572".getBytes("UTF-8");
+
+    XXHash32 hash32 = factory.hash32();
+    int seed = 0x9747b28c; // used to initialize the hash value, use whatever
+                           // value you want, but always the same
+    int hash = hash32.hash(data, 0, data.length, seed);
+
+ +

Sample streaming usage:

+ +
+    XXHashFactory factory = XXHashFactory.fastestInstance();
+
+    byte[] data = "12345345234572".getBytes("UTF-8");
+    ByteArrayInputStream in = new ByteArrayInputStream(data);
+
+    int seed = 0x9747b28c; // used to initialize the hash value, use whatever
+                           // value you want, but always the same
+    StreamingXXHash32 hash32 = factory.newStreamingHash32(seed);
+    byte[] buf = new byte[8]; // for real-world usage, use a larger buffer, like 8192 bytes
+    for (;;) {
+      int read = in.read(buf);
+      if (read == -1) {
+        break;
+      }
+      hash32.update(buf, 0, read);
+    }
+    int hash = hash32.getValue();
+
+ + + diff --git a/core/src/main/resources/darwin/x86_64/liblz4-java.dylib b/core/src/main/resources/darwin/x86_64/liblz4-java.dylib new file mode 100644 index 00000000..9d5cc0e5 Binary files /dev/null and b/core/src/main/resources/darwin/x86_64/liblz4-java.dylib differ diff --git a/core/src/main/resources/linux/amd64/liblz4-java.so b/core/src/main/resources/linux/amd64/liblz4-java.so new file mode 100644 index 00000000..fa143b1e Binary files /dev/null and b/core/src/main/resources/linux/amd64/liblz4-java.so differ diff --git a/core/src/main/resources/linux/i386/liblz4-java.so b/core/src/main/resources/linux/i386/liblz4-java.so new file mode 100644 index 00000000..aa50fd15 Binary files /dev/null and b/core/src/main/resources/linux/i386/liblz4-java.so differ diff --git a/core/src/main/resources/win32/amd64/liblz4-java.so b/core/src/main/resources/win32/amd64/liblz4-java.so new file mode 100644 index 00000000..f8501c00 Binary files /dev/null and b/core/src/main/resources/win32/amd64/liblz4-java.so differ diff --git a/forge/.gradle/gradle.log b/forge/.gradle/gradle.log index 8fbb2e8b..e69de29b 100644 --- a/forge/.gradle/gradle.log +++ b/forge/.gradle/gradle.log @@ -1,88 +0,0 @@ -################################################# - ForgeGradle 2.1-SNAPSHOT-unknown - https://github.com/MinecraftForge/ForgeGradle -################################################# - Powered by MCP unknown - http://modcoderpack.com - by: Searge, ProfMobius, Fesh0r, - R4wk, ZeuX, IngisKahn, bspkrs -################################################# -Version string 'unspecified' does not match SemVer specification -You should try SemVer : http://semver.org/ -:core:compileJava -:forge:deobfCompileDummyTask -:forge:deobfProvidedDummyTask -:forge:extractDependencyATs SKIPPED -:forge:extractMcpData -:core:compileJava UP-TO-DATE -:core:processResources UP-TO-DATE -:core:classes UP-TO-DATE -:core:jar UP-TO-DATE -:forge:extractMcpData SKIPPED -:forge:extractMcpMappings SKIPPED -:forge:genSrgs SKIPPED -:forge:getVersionJson -:forge:downloadServer SKIPPED -:forge:splitServerJar SKIPPED -:forge:deobfMcMCP SKIPPED -:forge:sourceApiJava UP-TO-DATE -:forge:compileApiJava UP-TO-DATE -:forge:processApiResources UP-TO-DATE -:forge:apiClasses UP-TO-DATE -:forge:sourceMainJava UP-TO-DATE -:forge:compileJavaC:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'modid()' in type 'Mod': class file for net.minecraftforge.fml.common.Mod not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'name()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'version()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'acceptableRemoteVersions()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'dependencies()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'acceptedMinecraftVersions()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'value()' in type 'Instance': class file for net.minecraftforge.fml.common.Mod$Instance not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'serverSide()' in type 'SidedProxy': class file for net.minecraftforge.fml.common.SidedProxy not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'clientSide()' in type 'SidedProxy' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'modid()' in type 'Mod': class file for net.minecraftforge.fml.common.Mod not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'name()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'version()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'acceptableRemoteVersions()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'dependencies()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'acceptedMinecraftVersions()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'value()' in type 'Instance': class file for net.minecraftforge.fml.common.Mod$Instance not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'serverSide()' in type 'SidedProxy': class file for net.minecraftforge.fml.common.SidedProxy not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'clientSide()' in type 'SidedProxy' -Note: Writing plugin metadata to file:/C:/Users/Jesse/Desktop/OTHER/GitHub/FastAsyncWorldEdit/forge/build/classes/main/mcmod.info -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'modid()' in type 'Mod': class file for net.minecraftforge.fml.common.Mod not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'name()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'version()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'acceptableRemoteVersions()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'dependencies()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'acceptedMinecraftVersions()' in type 'Mod' -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'value()' in type 'Instance': class file for net.minecraftforge.fml.common.Mod$Instance not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'serverSide()' in type 'SidedProxy': class file for net.minecraftforge.fml.common.SidedProxy not found -C:\Users\Jesse\.gradle\caches\modules-2\files-2.1\com.sk89q.worldedit\worldedit-forge-mc1.8.9\6.1.1\dffd7e1882eba256eb2132fe315682c1d26522b1\worldedit-forge-mc1.8.9-6.1.1.jar(com/sk89q/worldedit/forge/ForgeWorldEdit.class): warning: Cannot find annotation method 'clientSide()' in type 'SidedProxy' -Note: Some input files use or override a deprecated API. -Note: Recompile with -Xlint:deprecation for details. -Note: Some input files use unchecked or unsafe operations. -Note: Recompile with -Xlint:unchecked for details. -27 warnings - -:forge:processResources UP-TO-DATE -:forge:classes -:forge:jar -:forge:sourceTestJava UP-TO-DATE -:forge:compileTestJava UP-TO-DATE -:forge:processTestResources UP-TO-DATE -:forge:testClasses UP-TO-DATE -:forge:test UP-TO-DATE -:forge:reobfJar -:forge:shadowJar -:forge:reobfShadowJar -:forge:extractRangemapReplacedMain -C:\Users\Jesse\Desktop\OTHER\GitHub\FastAsyncWorldEdit\forge\build\sources\main\java -:forge:retromapReplacedMain UP-TO-DATE -:forge:sourceJar UP-TO-DATE -:forge:assemble -:forge:check UP-TO-DATE -:forge:build - -BUILD SUCCESSFUL - -Total time: 36.184 secs diff --git a/gradle.properties b/gradle.properties index 0ff1aa36..5a95e1a0 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,4 +1,4 @@ -org.gradle.daemon=true +org.gradle.daemon=false org.gradle.jvmargs=-Xmx2048m -XX:MaxPermSize=512m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8 org.gradle.configureondemand=true org.gradle.parallel=true \ No newline at end of file