diff --git a/.travis.yml b/.travis.yml index 023acd32..cc722345 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,11 +5,17 @@ python: - 3.5 - 3.6 install: + - pip install -U pip - pip install tox - pip install -r docs/requirements.txt script: + - pip install .[flake8] + - flake8 --ignore=E501 lz4 + - flake8 --ignore=E501 tests + - flake8 --ignore=E501 setup.py - tox - - python setup.py install + - pip install . + - pip install .[tests] - make -C docs doctest html deploy: - provider: pypi diff --git a/README.rst b/README.rst index 4a7b51c6..fb5d2913 100644 --- a/README.rst +++ b/README.rst @@ -9,11 +9,11 @@ Status :target: https://travis-ci.org/python-lz4/python-lz4 :alt: Build Status -.. image:: https://ci.appveyor.com/api/projects/status/github/python-lz4/python-lz4?branch=master +.. image:: https://ci.appveyor.com/api/projects/status/r2qvw9mlfo63lklo/branch/master?svg=true :target: https://ci.appveyor.com/project/jonathanunderwood/python-lz4 :alt: Build Status Windows -.. image:: https://readthedocs.org/projects/python-lz4/badge/?version=latest +.. image:: https://readthedocs.org/projects/python-lz4/badge/?version=stable :target: https://readthedocs.org/projects/python-lz4/ :alt: Documentation @@ -40,7 +40,7 @@ thread safe. An extensive test suite is included. Documenation ============ -.. image:: https://readthedocs.org/projects/python-lz4/badge/?version=latest +.. image:: https://readthedocs.org/projects/python-lz4/badge/?version=stable :target: https://readthedocs.org/projects/python-lz4/ :alt: Documentation diff --git a/docs/conf.py b/docs/conf.py index f752fa81..51b6576f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -49,17 +49,15 @@ # General information about the project. project = u'python-lz4' -copyright = u'2016, python-lz4 developers' +copyright = u'2016, 2017, 2018 python-lz4 developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # -# The short X.Y version. -import lz4 -version = lz4.__version__ -# The full version, including alpha/beta/rc tags. -release = lz4.__version__ +from pkg_resources import get_distribution +release = get_distribution('lz4').version +version = release # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/lz4.rst b/docs/lz4.rst index be058f5a..fbf0ec62 100644 --- a/docs/lz4.rst +++ b/docs/lz4.rst @@ -12,5 +12,6 @@ Contents :members: library_version_number, library_version_string, - version + __version__ + diff --git a/lz4/__init__.py b/lz4/__init__.py index 69da37eb..15701bb8 100644 --- a/lz4/__init__.py +++ b/lz4/__init__.py @@ -1,8 +1,14 @@ -# Package version info, generated on install -from .version import version as __version__ +from pkg_resources import get_distribution, DistributionNotFound +try: + __version__ = get_distribution(__name__).version +except DistributionNotFound: + # package is not installed + pass + VERSION = __version__ -from ._version import ( + +from ._version import ( # noqa: F401 library_version_number, library_version_string, ) diff --git a/lz4/block/__init__.py b/lz4/block/__init__.py index b1b13f9d..2f4ff026 100644 --- a/lz4/block/__init__.py +++ b/lz4/block/__init__.py @@ -1 +1 @@ -from ._block import compress, decompress +from ._block import compress, decompress # noqa: F401 diff --git a/lz4/block/_block.c b/lz4/block/_block.c index b4b5da2f..37d02dee 100644 --- a/lz4/block/_block.c +++ b/lz4/block/_block.c @@ -80,10 +80,6 @@ load_le32 (const char *c) return d[0] | (d[1] << 8) | (d[2] << 16) | (d[3] << 24); } -#ifdef inline -#undef inline -#endif - static const size_t hdr_size = sizeof (uint32_t); typedef enum @@ -93,6 +89,40 @@ typedef enum HIGH_COMPRESSION } compression_type; +static inline int +lz4_compress_generic (int comp, char* source, char* dest, int source_size, int dest_size, + char* dict, int dict_size, int acceleration, int compression) +{ + if (comp != HIGH_COMPRESSION) + { + LZ4_stream_t lz4_state; + LZ4_resetStream (&lz4_state); + if (dict) + { + LZ4_loadDict (&lz4_state, dict, dict_size); + } + if (comp != FAST) + { + acceleration = 1; + } + return LZ4_compress_fast_continue (&lz4_state, source, dest, source_size, dest_size, acceleration); + } + else + { + LZ4_streamHC_t lz4_state; + LZ4_resetStreamHC (&lz4_state, compression); + if (dict) + { + LZ4_loadDictHC (&lz4_state, dict, dict_size); + } + return LZ4_compress_HC_continue (&lz4_state, source, dest, source_size, dest_size); + } +} + +#ifdef inline +#undef inline +#endif + static PyObject * compress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) { @@ -104,10 +134,11 @@ compress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) PyObject *py_dest; char *dest, *dest_start; compression_type comp; - size_t output_size; + int output_size; Py_buffer source; - size_t source_size; + int source_size; int return_bytearray = 0; + Py_buffer dict = { NULL, NULL }; static char *argnames[] = { "source", "mode", @@ -115,40 +146,49 @@ compress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) "acceleration", "compression", "return_bytearray", + "dict", NULL }; #if IS_PY3 - if (!PyArg_ParseTupleAndKeywords (args, kwargs, "y*|spiip", argnames, + if (!PyArg_ParseTupleAndKeywords (args, kwargs, "y*|spiipz*", argnames, &source, &mode, &store_size, &acceleration, &compression, - &return_bytearray)) + &return_bytearray, &dict)) { return NULL; } #else - if (!PyArg_ParseTupleAndKeywords (args, kwargs, "s*|siiii", argnames, + if (!PyArg_ParseTupleAndKeywords (args, kwargs, "s*|siiiiz*", argnames, &source, &mode, &store_size, &acceleration, &compression, - &return_bytearray)) + &return_bytearray, &dict)) { return NULL; } #endif - source_size = (size_t) source.len; + if (source.len > INT_MAX) + { + PyBuffer_Release(&source); + PyBuffer_Release(&dict); + PyErr_Format(PyExc_OverflowError, + "Input too large for LZ4 API"); + return NULL; + } - /* We're using 4 bytes for the size of the content in the header. This means - we can store a size as large as the maximum value of an unsinged int. */ - if (store_size && source_size > UINT_MAX) + if (dict.len > INT_MAX) { PyBuffer_Release(&source); + PyBuffer_Release(&dict); PyErr_Format(PyExc_OverflowError, - "Input too large for storing size in 4 byte header"); + "Dictionary too large for LZ4 API"); return NULL; } + source_size = source.len; + if (!strncmp (mode, "default", sizeof ("default"))) { comp = DEFAULT; @@ -164,9 +204,10 @@ compress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) else { PyBuffer_Release(&source); + PyBuffer_Release(&dict); PyErr_Format (PyExc_ValueError, - "Invalid mode argument: %s. Must be one of: standard, fast, high_compression", - mode); + "Invalid mode argument: %s. Must be one of: standard, fast, high_compression", + mode); return NULL; } @@ -199,25 +240,14 @@ compress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) dest_start = dest; } - switch (comp) - { - case DEFAULT: - output_size = LZ4_compress_default (source.buf, dest_start, source_size, - dest_size); - break; - case FAST: - output_size = LZ4_compress_fast (source.buf, dest_start, source_size, - dest_size, acceleration); - break; - case HIGH_COMPRESSION: - output_size = LZ4_compress_HC (source.buf, dest_start, source_size, - dest_size, compression); - break; - } + output_size = lz4_compress_generic (comp, source.buf, dest_start, source_size, + dest_size, dict.buf, dict.len, acceleration, + compression); Py_END_ALLOW_THREADS PyBuffer_Release(&source); + PyBuffer_Release(&dict); if (output_size <= 0) { @@ -258,34 +288,55 @@ decompress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) size_t source_size; PyObject *py_dest; char *dest; - size_t output_size; + int output_size; size_t dest_size; int uncompressed_size = -1; int return_bytearray = 0; + Py_buffer dict = { NULL, NULL }; static char *argnames[] = { "source", "uncompressed_size", "return_bytearray", + "dict", NULL }; #if IS_PY3 - if (!PyArg_ParseTupleAndKeywords (args, kwargs, "y*|ip", argnames, + if (!PyArg_ParseTupleAndKeywords (args, kwargs, "y*|ipz*", argnames, &source, &uncompressed_size, - &return_bytearray)) + &return_bytearray, &dict)) { return NULL; } #else - if (!PyArg_ParseTupleAndKeywords (args, kwargs, "s*|ii", argnames, + if (!PyArg_ParseTupleAndKeywords (args, kwargs, "s*|iiz*", argnames, &source, &uncompressed_size, - &return_bytearray)) + &return_bytearray, &dict)) { return NULL; } #endif + + if (source.len > INT_MAX) + { + PyBuffer_Release(&source); + PyBuffer_Release(&dict); + PyErr_Format(PyExc_OverflowError, + "Input too large for LZ4 API"); + return NULL; + } + + if (dict.len > INT_MAX) + { + PyBuffer_Release(&source); + PyBuffer_Release(&dict); + PyErr_Format(PyExc_OverflowError, + "Dictionary too large for LZ4 API"); + return NULL; + } + source_start = (const char *) source.buf; - source_size = (size_t) source.len; + source_size = source.len; if (uncompressed_size >= 0) { @@ -296,6 +347,7 @@ decompress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) if (source_size < hdr_size) { PyBuffer_Release(&source); + PyBuffer_Release(&dict); PyErr_SetString (PyExc_ValueError, "Input source data size too small"); return NULL; } @@ -307,6 +359,7 @@ decompress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) if (dest_size < 0 || dest_size > PY_SSIZE_T_MAX) { PyBuffer_Release(&source); + PyBuffer_Release(&dict); PyErr_Format (PyExc_ValueError, "Invalid size in header: 0x%zu", dest_size); return NULL; @@ -321,15 +374,17 @@ decompress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) Py_BEGIN_ALLOW_THREADS output_size = - LZ4_decompress_safe (source_start, dest, source_size, dest_size); + LZ4_decompress_safe_usingDict (source_start, dest, source_size, dest_size, + dict.buf, dict.len); Py_END_ALLOW_THREADS PyBuffer_Release(&source); + PyBuffer_Release(&dict); if (output_size < 0) { - PyErr_Format (PyExc_ValueError, "Corrupt input at byte %zu", -output_size); + PyErr_Format (PyExc_ValueError, "Corrupt input at byte %u", -output_size); PyMem_Free (dest); return NULL; } @@ -337,7 +392,7 @@ decompress (PyObject * Py_UNUSED (self), PyObject * args, PyObject * kwargs) { /* Better to fail explicitly than to allow fishy data to pass through. */ PyErr_Format (PyExc_ValueError, - "Decompressor wrote %zu bytes, but %zu bytes expected from header", + "Decompressor wrote %u bytes, but %zu bytes expected from header", output_size, dest_size); PyMem_Free (dest); return NULL; @@ -389,14 +444,16 @@ PyDoc_STRVAR(compress__doc, " block.\n" \ " return_bytearray (bool): If ``False`` (the default) then the function\n" \ " will return a bytes object. If ``True``, then the function will\n" \ - " return a bytearray object.\n\n" \ + " return a bytearray object.\n\n" \ + " dict (str, bytes or buffer-compatible object): If specified, perform\n" \ + " compression using this initial dictionary.\n" \ "Returns:\n" \ " bytes or bytearray: Compressed data.\n"); PyDoc_STRVAR(decompress__doc, - "decompress(source, uncompressed_size=-1, return_bytearray=False)\n\n" \ + "decompress(source, uncompressed_size=-1, return_bytearray=False)\n\n" \ "Decompress source, returning the uncompressed data as a string.\n" \ - "Raises an exception if any error occurs.\n" \ + "Raises an exception if any error occurs.\n" \ "\n" \ "Args:\n" \ " source (str, bytes or buffer-compatible object): Data to decompress.\n" \ @@ -408,6 +465,8 @@ PyDoc_STRVAR(decompress__doc, " return_bytearray (bool): If ``False`` (the default) then the function\n" \ " will return a bytes object. If ``True``, then the function will\n" \ " return a bytearray object.\n\n" \ + " dict (str, bytes or buffer-compatible object): If specified, perform\n" \ + " decompression using this initial dictionary.\n" \ "Returns:\n" \ " bytes or bytearray: Decompressed data.\n"); diff --git a/lz4/frame/__init__.py b/lz4/frame/__init__.py index 28e8e45a..5fa03ce6 100644 --- a/lz4/frame/__init__.py +++ b/lz4/frame/__init__.py @@ -396,7 +396,7 @@ def decompress(self, data, max_length=-1): # noqa: F811 data, max_length=max_length, return_bytearray=self._return_bytearray, - ) + ) if bytes_read < len(data): if eoframe: diff --git a/lz4/frame/_compression.py b/lz4/frame/_compression.py index c6977449..7773ee44 100644 --- a/lz4/frame/_compression.py +++ b/lz4/frame/_compression.py @@ -4,6 +4,7 @@ """Internal classes used by the gzip, lzma and bz2 modules""" +import sys import io # Ensure super has Python 3 semantics even on Python 2 from builtins import super @@ -156,3 +157,13 @@ def seek(self, offset, whence=io.SEEK_SET): def tell(self): """Return the current file position.""" return self._pos + + +if sys.version_info < (3, 3): + # memoryview.cast is added in 3.3 + def readinto(self, b): + data = self.read(len(b)) + b[:len(data)] = data + return len(data) + + DecompressReader.readinto = readinto diff --git a/lz4libs/lz4.c b/lz4libs/lz4.c index 213b0851..e51a3e0a 100644 --- a/lz4libs/lz4.c +++ b/lz4libs/lz4.c @@ -69,9 +69,11 @@ * Prefer these methods in priority order (0 > 1 > 2) */ #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# if defined(__GNUC__) && \ + ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ + || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define LZ4_FORCE_MEMORY_ACCESS 2 -# elif defined(__INTEL_COMPILER) || defined(__GNUC__) +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) # define LZ4_FORCE_MEMORY_ACCESS 1 # endif #endif @@ -80,7 +82,7 @@ * LZ4_FORCE_SW_BITCOUNT * Define this parameter if your target system or compiler does not support hardware bit count */ -#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */ +#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ # define LZ4_FORCE_SW_BITCOUNT #endif @@ -89,6 +91,8 @@ /*-************************************ * Dependency **************************************/ +#define LZ4_STATIC_LINKING_ONLY +#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ #include "lz4.h" /* see also "memory routines" below */ @@ -146,18 +150,23 @@ # define expect(expr,value) (expr) #endif +#ifndef likely #define likely(expr) expect((expr) != 0, 1) +#endif +#ifndef unlikely #define unlikely(expr) expect((expr) != 0, 0) +#endif /*-************************************ * Memory routines **************************************/ #include /* malloc, calloc, free */ -#define ALLOCATOR(n,s) calloc(n,s) -#define FREEMEM free +#define ALLOC(s) malloc(s) +#define ALLOC_AND_ZERO(s) calloc(1,s) +#define FREEMEM(p) free(p) #include /* memset, memcpy */ -#define MEM_INIT memset +#define MEM_INIT(p,v,s) memset((p),(v),(s)) /*-************************************ @@ -270,11 +279,6 @@ static void LZ4_writeLE16(void* memPtr, U16 value) } } -static void LZ4_copy8(void* dst, const void* src) -{ - memcpy(dst,src,8); -} - /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ LZ4_FORCE_O2_INLINE_GCC_PPC64LE void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) @@ -283,7 +287,7 @@ void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) const BYTE* s = (const BYTE*)srcPtr; BYTE* const e = (BYTE*)dstEnd; - do { LZ4_copy8(d,s); d+=8; s+=8; } while (d=2) # include @@ -450,10 +454,33 @@ static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression ru /*-************************************ * Local Structures and types **************************************/ -typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive; -typedef enum { byPtr, byU32, byU16 } tableType_t; - -typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; +typedef enum { notLimited = 0, limitedOutput = 1, fillOutput = 2 } limitedOutput_directive; +typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; + +/** + * This enum distinguishes several different modes of accessing previous + * content in the stream. + * + * - noDict : There is no preceding content. + * - withPrefix64k : Table entries up to ctx->dictSize before the current blob + * blob being compressed are valid and refer to the preceding + * content (of length ctx->dictSize), which is available + * contiguously preceding in memory the content currently + * being compressed. + * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere + * else in memory, starting at ctx->dictionary with length + * ctx->dictSize. + * - usingDictCtx : Like usingExtDict, but everything concerning the preceding + * content is in a separate context, pointed to by + * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table + * entries in the current context that refer to positions + * preceding the beginning of the current compression are + * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx + * ->dictSize describe the location and size of the preceding + * content, and matches are found by looking in the ctx + * ->dictCtx->hashTable. + */ +typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; @@ -497,10 +524,25 @@ LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tab return LZ4_hash4(LZ4_read32(p), tableType); } -static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase) +static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: /* fallthrough */ + case byPtr: { /* illegal! */ assert(0); return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } + case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } + } +} + +static void LZ4_putPositionOnHash(const BYTE* p, U32 h, + void* tableBase, tableType_t const tableType, + const BYTE* srcBase) { switch (tableType) { + case clearedTable: { /* illegal! */ assert(0); return; } case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } @@ -513,19 +555,81 @@ LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); } -static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase) +/* LZ4_getIndexOnHash() : + * Index of match position registered in hash table. + * hash position must be calculated by using base+index, or dictBase+index. + * Assumption 1 : only valid if tableType == byU32 or byU16. + * Assumption 2 : h is presumed valid (within limits of hash table) + */ +static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); + if (tableType == byU32) { + const U32* const hashTable = (const U32*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-2))); + return hashTable[h]; + } + if (tableType == byU16) { + const U16* const hashTable = (const U16*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-1))); + return hashTable[h]; + } + assert(0); return 0; /* forbidden case */ +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase) { - if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; } - if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; } - { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ + if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } + if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; } + { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ } -LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, + const void* tableBase, tableType_t tableType, + const BYTE* srcBase) { U32 const h = LZ4_hashPosition(p, tableType); return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); } +LZ4_FORCE_INLINE void LZ4_prepareTable( + LZ4_stream_t_internal* const cctx, + const int inputSize, + const tableType_t tableType) { + /* If the table hasn't been used, it's guaranteed to be zeroed out, and is + * therefore safe to use no matter what mode we're in. Otherwise, we figure + * out if it's safe to leave as is or whether it needs to be reset. + */ + if (cctx->tableType != clearedTable) { + if (cctx->tableType != tableType + || (tableType == byU16 && cctx->currentOffset + inputSize >= 0xFFFFU) + || (tableType == byU32 && cctx->currentOffset > 1 GB) + || tableType == byPtr + || inputSize >= 4 KB) + { + DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx); + MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); + cctx->currentOffset = 0; + cctx->tableType = clearedTable; + } else { + DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); + } + } + + /* Adding a gap, so all previous entries are > MAX_DISTANCE back, is faster + * than compressing without a gap. However, compressing with + * currentOffset == 0 is faster still, so we preserve that case. + */ + if (cctx->currentOffset != 0 && tableType == byU32) { + DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); + cctx->currentOffset += 64 KB; + } + + /* Finally, clear history */ + cctx->dictCtx = NULL; + cctx->dictionary = NULL; + cctx->dictSize = 0; +} /** LZ4_compress_generic() : inlined, to ensure branches are decided at compilation time */ @@ -534,50 +638,70 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( const char* const source, char* const dest, const int inputSize, + int *inputConsumed, /* only written when outputLimited == fillOutput */ const int maxOutputSize, const limitedOutput_directive outputLimited, const tableType_t tableType, - const dict_directive dict, + const dict_directive dictDirective, const dictIssue_directive dictIssue, const U32 acceleration) { const BYTE* ip = (const BYTE*) source; - const BYTE* base; + + U32 const startIndex = cctx->currentOffset; + const BYTE* base = (const BYTE*) source - startIndex; const BYTE* lowLimit; - const BYTE* const lowRefLimit = ip - cctx->dictSize; - const BYTE* const dictionary = cctx->dictionary; - const BYTE* const dictEnd = dictionary + cctx->dictSize; - const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source; + + const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; + const BYTE* const dictionary = + dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; + const U32 dictSize = + dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; + const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */ + + int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); + U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ + const BYTE* const dictEnd = dictionary + dictSize; const BYTE* anchor = (const BYTE*) source; const BYTE* const iend = ip + inputSize; - const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; const BYTE* const matchlimit = iend - LASTLITERALS; + /* the dictCtx currentOffset is indexed on the start of the dictionary, + * while a dictionary in the current context precedes the currentOffset */ + const BYTE* dictBase = dictDirective == usingDictCtx ? + dictionary + dictSize - dictCtx->currentOffset : + dictionary + dictSize - startIndex; + BYTE* op = (BYTE*) dest; BYTE* const olimit = op + maxOutputSize; + U32 offset = 0; U32 forwardH; + DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType); /* Init conditions */ + if (outputLimited == fillOutput && maxOutputSize < 1) return 0; /* Impossible to store anything */ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */ - switch(dict) - { - case noDict: - default: - base = (const BYTE*)source; - lowLimit = (const BYTE*)source; - break; - case withPrefix64k: - base = (const BYTE*)source - cctx->currentOffset; - lowLimit = (const BYTE*)source - cctx->dictSize; - break; - case usingExtDict: - base = (const BYTE*)source - cctx->currentOffset; - lowLimit = (const BYTE*)source; - break; + if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ + if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ + assert(acceleration >= 1); + + lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); + + /* Update context state */ + if (dictDirective == usingDictCtx) { + /* Subsequent linked blocks can't use the dictionary. */ + /* Instead, they use the block we just compressed. */ + cctx->dictCtx = NULL; + cctx->dictSize = (U32)inputSize; + } else { + cctx->dictSize += (U32)inputSize; } - if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ - if (inputSizecurrentOffset += (U32)inputSize; + cctx->tableType = tableType; + + if (inputSizehashTable, tableType, base); @@ -585,12 +709,12 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( /* Main Loop */ for ( ; ; ) { - ptrdiff_t refDelta = 0; const BYTE* match; BYTE* token; /* Find a match */ - { const BYTE* forwardIp = ip; + if (tableType == byPtr) { + const BYTE* forwardIp = ip; unsigned step = 1; unsigned searchMatchNb = acceleration << LZ4_skipTrigger; do { @@ -599,34 +723,89 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( forwardIp += step; step = (searchMatchNb++ >> LZ4_skipTrigger); - if (unlikely(forwardIp > mflimit)) goto _last_literals; + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); - if (dict==usingExtDict) { - if (match < (const BYTE*)source) { - refDelta = dictDelta; + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + + } while ( (match+MAX_DISTANCE < ip) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + + } else { /* byU32, byU16 */ + + const BYTE* forwardIp = ip; + unsigned step = 1; + unsigned searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + U32 const current = (U32)(forwardIp - base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex <= current); + assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + matchIndex += dictDelta; /* make dictCtx index comparable with current context */ + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); + assert(startIndex - matchIndex >= MINMATCH); + match = dictBase + matchIndex; lowLimit = dictionary; } else { - refDelta = 0; + match = base + matchIndex; lowLimit = (const BYTE*)source; - } } + } + } else { /* single continuous memory segment */ + match = base + matchIndex; + } forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + + if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) continue; /* match outside of valid area */ + assert(matchIndex < current); + if ((tableType != byU16) && (matchIndex+MAX_DISTANCE < current)) continue; /* too far */ + if (tableType == byU16) assert((current - matchIndex) <= MAX_DISTANCE); /* too_far presumed impossible with byU16 */ - } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0) - || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) - || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) ); + if (LZ4_read32(match) == LZ4_read32(ip)) { + if (maybe_extMem) offset = current - matchIndex; + break; /* match found */ + } + + } while(1); } /* Catch up */ - while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; } + while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } /* Encode Literals */ { unsigned const litLength = (unsigned)(ip - anchor); token = op++; - if ((outputLimited) && /* Check output buffer overflow */ + if ((outputLimited == limitedOutput) && /* Check output buffer overflow */ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit))) return 0; + if ((outputLimited == fillOutput) && + (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { + op--; + goto _last_literals; + } if (litLength >= RUN_MASK) { int len = (int)litLength-RUN_MASK; *token = (RUN_MASK< olimit)) { + /* the match was too close to the end, rewind and go to last literals */ + op = token; + goto _last_literals; + } + /* Encode Offset */ - LZ4_writeLE16(op, (U16)(ip-match)); op+=2; + if (maybe_extMem) { /* static test */ + DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); + assert(offset <= MAX_DISTANCE && offset > 0); + LZ4_writeLE16(op, (U16)offset); op+=2; + } else { + DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); + assert(ip-match <= MAX_DISTANCE); + LZ4_writeLE16(op, (U16)(ip - match)); op+=2; + } /* Encode MatchLength */ { unsigned matchCode; - if ((dict==usingExtDict) && (lowLimit==dictionary)) { - const BYTE* limit; - match += refDelta; - limit = ip + (dictEnd-match); + if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) + && (lowLimit==dictionary) /* match within extDict */ ) { + const BYTE* limit = ip + (dictEnd-match); + assert(dictEnd > match); if (limit > matchlimit) limit = matchlimit; matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); ip += MINMATCH + matchCode; if (ip==limit) { - unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit); + unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); matchCode += more; ip += more; } + DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); } else { matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); ip += MINMATCH + matchCode; + DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); } - if ( outputLimited && /* Check output buffer overflow */ - (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) ) - return 0; + if ((outputLimited) && /* Check output buffer overflow */ + (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) ) { + if (outputLimited == limitedOutput) + return 0; + if (outputLimited == fillOutput) { + /* Match description too long : reduce it */ + U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 2 - 1 - LASTLITERALS) * 255; + ip -= matchCode - newMatchCode; + matchCode = newMatchCode; + } + } if (matchCode >= ML_MASK) { *token += ML_MASK; matchCode -= ML_MASK; @@ -685,37 +899,80 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( anchor = ip; /* Test end of chunk */ - if (ip > mflimit) break; + if (ip >= mflimitPlusOne) break; /* Fill table */ LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); /* Test next position */ - match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); - if (dict==usingExtDict) { - if (match < (const BYTE*)source) { - refDelta = dictDelta; - lowLimit = dictionary; - } else { - refDelta = 0; - lowLimit = (const BYTE*)source; - } } - LZ4_putPosition(ip, cctx->hashTable, tableType, base); - if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1) - && (match+MAX_DISTANCE>=ip) - && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) ) - { token=op++; *token=0; goto _next_match; } + if (tableType == byPtr) { + + match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); + LZ4_putPosition(ip, cctx->hashTable, tableType, base); + if ( (match+MAX_DISTANCE >= ip) + && (LZ4_read32(match) == LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + } else { /* byU32, byU16 */ + + U32 const h = LZ4_hashPosition(ip, tableType); + U32 const current = (U32)(ip-base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex < current); + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + matchIndex += dictDelta; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else { /* single memory segment */ + match = base + matchIndex; + } + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + assert(matchIndex < current); + if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) + && ((tableType==byU16) ? 1 : (matchIndex+MAX_DISTANCE >= current)) + && (LZ4_read32(match) == LZ4_read32(ip)) ) { + token=op++; + *token=0; + if (maybe_extMem) offset = current - matchIndex; + DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", + (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); + goto _next_match; + } + } /* Prepare next loop */ forwardH = LZ4_hashPosition(++ip, tableType); + } _last_literals: /* Encode Last Literals */ - { size_t const lastRun = (size_t)(iend - anchor); + { size_t lastRun = (size_t)(iend - anchor); if ( (outputLimited) && /* Check output buffer overflow */ - ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) ) - return 0; + (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { + if (outputLimited == fillOutput) { + /* adapt lastRun to fill 'dst' */ + lastRun = (olimit-op) - 1; + lastRun -= (lastRun+240)/255; + } + if (outputLimited == limitedOutput) + return 0; + } if (lastRun >= RUN_MASK) { size_t accumulator = lastRun - RUN_MASK; *op++ = RUN_MASK << ML_BITS; @@ -725,44 +982,97 @@ LZ4_FORCE_INLINE int LZ4_compress_generic( *op++ = (BYTE)(lastRun<internal_donotuse; + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; LZ4_resetStream((LZ4_stream_t*)state); + if (maxOutputSize >= LZ4_compressBound(inputSize)) { + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + if (inputSize < LZ4_64Klimit) {; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > MAX_DISTANCE)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } +} + +/** + * LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of + * "correctly initialized"). + */ +int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) +{ + LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; - if (maxOutputSize >= LZ4_compressBound(inputSize)) { - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration); - else - return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); + if (dstCapacity >= LZ4_compressBound(srcSize)) { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } } else { - if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); - else - return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } } } int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) { + int result; #if (LZ4_HEAPMODE) - void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctxPtr == NULL) return 0; #else LZ4_stream_t ctx; - void* const ctxPtr = &ctx; + LZ4_stream_t* const ctxPtr = &ctx; #endif - - int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); + result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); #if (LZ4_HEAPMODE) FREEMEM(ctxPtr); @@ -785,172 +1095,15 @@ int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int m LZ4_resetStream(&ctx); if (inputSize < LZ4_64Klimit) - return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); else - return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration); -} - - -/*-****************************** -* *_destSize() variant -********************************/ - -static int LZ4_compress_destSize_generic( - LZ4_stream_t_internal* const ctx, - const char* const src, - char* const dst, - int* const srcSizePtr, - const int targetDstSize, - const tableType_t tableType) -{ - const BYTE* ip = (const BYTE*) src; - const BYTE* base = (const BYTE*) src; - const BYTE* lowLimit = (const BYTE*) src; - const BYTE* anchor = ip; - const BYTE* const iend = ip + *srcSizePtr; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = iend - LASTLITERALS; - - BYTE* op = (BYTE*) dst; - BYTE* const oend = op + targetDstSize; - BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */; - BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */); - BYTE* const oMaxSeq = oMaxLit - 1 /* token */; - - U32 forwardH; - - - /* Init conditions */ - if (targetDstSize < 1) return 0; /* Impossible to store anything */ - if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ - if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ - if (*srcSizePtrhashTable, tableType, base); - ip++; forwardH = LZ4_hashPosition(ip, tableType); - - /* Main Loop */ - for ( ; ; ) { - const BYTE* match; - BYTE* token; - - /* Find a match */ - { const BYTE* forwardIp = ip; - unsigned step = 1; - unsigned searchMatchNb = 1 << LZ4_skipTrigger; - - do { - U32 h = forwardH; - ip = forwardIp; - forwardIp += step; - step = (searchMatchNb++ >> LZ4_skipTrigger); - - if (unlikely(forwardIp > mflimit)) goto _last_literals; - - match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base); - forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base); - - } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) - || (LZ4_read32(match) != LZ4_read32(ip)) ); - } - - /* Catch up */ - while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } - - /* Encode Literal length */ - { unsigned litLength = (unsigned)(ip - anchor); - token = op++; - if (op + ((litLength+240)/255) + litLength > oMaxLit) { - /* Not enough space for a last match */ - op--; - goto _last_literals; - } - if (litLength>=RUN_MASK) { - unsigned len = litLength - RUN_MASK; - *token=(RUN_MASK<= 255 ; len-=255) *op++ = 255; - *op++ = (BYTE)len; - } - else *token = (BYTE)(litLength< oMaxMatch) { - /* Match description too long : reduce it */ - matchLength = (15-1) + (oMaxMatch-op) * 255; - } - ip += MINMATCH + matchLength; - - if (matchLength>=ML_MASK) { - *token += ML_MASK; - matchLength -= ML_MASK; - while (matchLength >= 255) { matchLength-=255; *op++ = 255; } - *op++ = (BYTE)matchLength; - } - else *token += (BYTE)(matchLength); - } - - anchor = ip; - - /* Test end of block */ - if (ip > mflimit) break; - if (op > oMaxSeq) break; - - /* Fill table */ - LZ4_putPosition(ip-2, ctx->hashTable, tableType, base); - - /* Test next position */ - match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); - LZ4_putPosition(ip, ctx->hashTable, tableType, base); - if ( (match+MAX_DISTANCE>=ip) - && (LZ4_read32(match)==LZ4_read32(ip)) ) - { token=op++; *token=0; goto _next_match; } - - /* Prepare next loop */ - forwardH = LZ4_hashPosition(++ip, tableType); - } - -_last_literals: - /* Encode Last Literals */ - { size_t lastRunSize = (size_t)(iend - anchor); - if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) { - /* adapt lastRunSize to fill 'dst' */ - lastRunSize = (oend-op) - 1; - lastRunSize -= (lastRunSize+240)/255; - } - ip = anchor + lastRunSize; - - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = RUN_MASK << ML_BITS; - for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRunSize<= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); } else { - if (*srcSizePtr < LZ4_64Klimit) - return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16); - else - return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr); - } + if (*srcSizePtr < LZ4_64Klimit) { + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1); + } else { + tableType_t const tableType = ((sizeof(void*)==4) && ((uptrval)src > MAX_DISTANCE)) ? byPtr : byU32; + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, tableType, noDict, noDictIssue, 1); + } } } int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) { #if (LZ4_HEAPMODE) - LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctx == NULL) return 0; #else LZ4_stream_t ctxBody; LZ4_stream_t* ctx = &ctxBody; @@ -991,21 +1146,28 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe LZ4_stream_t* LZ4_createStream(void) { - LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64); + LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */ + DEBUGLOG(4, "LZ4_createStream %p", lz4s); + if (lz4s == NULL) return NULL; LZ4_resetStream(lz4s); return lz4s; } void LZ4_resetStream (LZ4_stream_t* LZ4_stream) { - DEBUGLOG(4, "LZ4_resetStream"); + DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream); MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); } +void LZ4_resetStream_fast(LZ4_stream_t* ctx) { + LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); +} + int LZ4_freeStream (LZ4_stream_t* LZ4_stream) { if (!LZ4_stream) return 0; /* support free on NULL */ + DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream); FREEMEM(LZ4_stream); return (0); } @@ -1015,43 +1177,70 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream) int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) { LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; + const tableType_t tableType = byU32; const BYTE* p = (const BYTE*)dictionary; const BYTE* const dictEnd = p + dictSize; const BYTE* base; - if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */ - LZ4_resetStream(LZ4_dict); + DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); - if (dictSize < (int)HASH_UNIT) { - dict->dictionary = NULL; - dict->dictSize = 0; - return 0; - } + /* It's necessary to reset the context, + * and not just continue it with prepareTable() + * to avoid any risk of generating overflowing matchIndex + * when compressing using this dictionary */ + LZ4_resetStream(LZ4_dict); + + /* We always increment the offset by 64 KB, since, if the dict is longer, + * we truncate it to the last 64k, and if it's shorter, we still want to + * advance by a whole window length so we can provide the guarantee that + * there are only valid offsets in the window, which allows an optimization + * in LZ4_compress_fast_continue() where it uses noDictIssue even when the + * dictionary isn't a full 64k. */ if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; - dict->currentOffset += 64 KB; - base = p - dict->currentOffset; + base = dictEnd - 64 KB - dict->currentOffset; dict->dictionary = p; dict->dictSize = (U32)(dictEnd - p); - dict->currentOffset += dict->dictSize; + dict->currentOffset += 64 KB; + dict->tableType = tableType; + + if (dictSize < (int)HASH_UNIT) { + return 0; + } while (p <= dictEnd-HASH_UNIT) { - LZ4_putPosition(p, dict->hashTable, byU32, base); + LZ4_putPosition(p, dict->hashTable, tableType, base); p+=3; } return dict->dictSize; } +void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dictionary_stream) { + if (dictionary_stream != NULL) { + /* If the current offset is zero, we will never look in the + * external dictionary context, since there is no value a table + * entry can take that indicate a miss. In that case, we need + * to bump the offset to something non-zero. + */ + if (working_stream->internal_donotuse.currentOffset == 0) { + working_stream->internal_donotuse.currentOffset = 64 KB; + } + working_stream->internal_donotuse.dictCtx = &(dictionary_stream->internal_donotuse); + } else { + working_stream->internal_donotuse.dictCtx = NULL; + } +} + -static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) { - if ((LZ4_dict->currentOffset > 0x80000000) || - ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */ + if (LZ4_dict->currentOffset + nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ /* rescale hash table */ U32 const delta = LZ4_dict->currentOffset - 64 KB; const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; int i; + DEBUGLOG(4, "LZ4_renormDictT"); for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; else LZ4_dict->hashTable[i] -= delta; @@ -1065,15 +1254,25 @@ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) { + const tableType_t tableType = byU32; LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; - const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; + const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize); - const BYTE* smallest = (const BYTE*) source; if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ - if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd; - LZ4_renormDictT(streamPtr, smallest); + LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */ if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + /* invalidate tiny dictionaries */ + if ( (streamPtr->dictSize-1 < 4) /* intentional underflow */ + && (dictEnd != (const BYTE*)source) ) { + DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary); + streamPtr->dictSize = 0; + streamPtr->dictionary = (const BYTE*)source; + dictEnd = (const BYTE*)source; + } + /* Check overlapping input/dictionary space */ { const BYTE* sourceEnd = (const BYTE*) source + inputSize; if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { @@ -1086,46 +1285,61 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch /* prefix mode : source data follows dictionary */ if (dictEnd == (const BYTE*)source) { - int result; if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration); + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); else - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration); - streamPtr->dictSize += (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; - return result; + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); } /* external dictionary mode */ { int result; - if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration); - else - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration); + if (streamPtr->dictCtx) { + /* We depend here on the fact that dictCtx'es (produced by + * LZ4_loadDict) guarantee that their tables contain no references + * to offsets between dictCtx->currentOffset - 64 KB and + * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe + * to use noDictIssue even when the dict isn't a full 64 KB. + */ + if (inputSize > 4 KB) { + /* For compressing large blobs, it is faster to pay the setup + * cost to copy the dictionary's tables into the active context, + * so that the compression loop is only looking into one table. + */ + memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t)); + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); + } + } else { + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } + } streamPtr->dictionary = (const BYTE*)source; streamPtr->dictSize = (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; return result; } } -/* Hidden debug function, to force external dictionary mode */ -int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize) +/* Hidden debug function, to force-test external dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) { LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; int result; - const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; - const BYTE* smallest = dictEnd; - if (smallest > (const BYTE*) source) smallest = (const BYTE*) source; - LZ4_renormDictT(streamPtr, smallest); + LZ4_renormDictT(streamPtr, srcSize); - result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + } streamPtr->dictionary = (const BYTE*)source; - streamPtr->dictSize = (U32)inputSize; - streamPtr->currentOffset += (U32)inputSize; + streamPtr->dictSize = (U32)srcSize; return result; } @@ -1196,41 +1410,72 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic( const int safeDecode = (endOnInput==endOnInputSize); const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); + /* Set up the "end" pointers for the shortcut. */ + const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/; + const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/; + + DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i)", srcSize); /* Special cases */ if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => just decode everything */ if ((endOnInput) && (unlikely(outputSize==0))) return ((srcSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1); + if ((endOnInput) && unlikely(srcSize==0)) return -1; /* Main Loop : decode sequences */ while (1) { - size_t length; const BYTE* match; size_t offset; unsigned const token = *ip++; - - /* shortcut for common case : - * in most circumstances, we expect to decode small matches (<= 18 bytes) separated by few literals (<= 14 bytes). - * this shortcut was tested on x86 and x64, where it improves decoding speed. - * it has not yet been benchmarked on ARM, Power, mips, etc. */ - if (((ip + 14 /*maxLL*/ + 2 /*offset*/ <= iend) - & (op + 14 /*maxLL*/ + 18 /*maxML*/ <= oend)) - & ((token < (15<> ML_BITS; - size_t const off = LZ4_readLE16(ip+ll); - const BYTE* const matchPtr = op + ll - off; /* pointer underflow risk ? */ - if ((off >= 18) /* do not deal with overlapping matches */ & (matchPtr >= lowPrefix)) { - size_t const ml = (token & ML_MASK) + MINMATCH; - memcpy(op, ip, 16); op += ll; ip += ll + 2 /*offset*/; - memcpy(op, matchPtr, 18); op += ml; + size_t length = token >> ML_BITS; /* literal length */ + + assert(!endOnInput || ip <= iend); /* ip < iend before the increment */ + + /* A two-stage shortcut for the most common case: + * 1) If the literal length is 0..14, and there is enough space, + * enter the shortcut and copy 16 bytes on behalf of the literals + * (in the fast mode, only 8 bytes can be safely copied this way). + * 2) Further if the match length is 4..18, copy 18 bytes in a similar + * manner; but we ensure that there's enough space in the output for + * those 18 bytes earlier, upon entering the shortcut (in other words, + * there is a combined check for both stages). + */ + if ( (endOnInput ? length != RUN_MASK : length <= 8) + /* strictly "less than" on input, to re-enter the loop with at least one byte */ + && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) { + /* Copy the literals */ + memcpy(op, ip, endOnInput ? 16 : 8); + op += length; ip += length; + + /* The second stage: prepare for match copying, decode full info. + * If it doesn't work out, the info won't be wasted. */ + length = token & ML_MASK; /* match length */ + offset = LZ4_readLE16(ip); ip += 2; + match = op - offset; + + /* Do not deal with overlapping matches. */ + if ( (length != ML_MASK) + && (offset >= 8) + && (dict==withPrefix64k || match >= lowPrefix) ) { + /* Copy the match. */ + memcpy(op + 0, match + 0, 8); + memcpy(op + 8, match + 8, 8); + memcpy(op +16, match +16, 2); + op += length + MINMATCH; + /* Both stages worked, load the next token. */ continue; } + + /* The second stage didn't work out, but the info is ready. + * Propel it right to the point of match copying. */ + goto _copy_match; } /* decode literal length */ - if ((length=(token>>ML_BITS)) == RUN_MASK) { + if (length == RUN_MASK) { unsigned s; + if (unlikely(endOnInput ? ip >= iend-RUN_MASK : 0)) goto _output_error; /* overflow detection */ do { s = *ip++; length += s; @@ -1262,11 +1507,14 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic( /* get offset */ offset = LZ4_readLE16(ip); ip+=2; match = op - offset; - if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ - LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */ /* get matchlength */ length = token & ML_MASK; + +_copy_match: + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ + LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */ + if (length == ML_MASK) { unsigned s; do { @@ -1313,7 +1561,7 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic( match += inc32table[offset]; memcpy(op+4, match, 4); match -= dec64table[offset]; - } else { LZ4_copy8(op, match); match+=8; } + } else { memcpy(op, match, 8); match+=8; } op += 8; if (unlikely(cpy>oend-12)) { @@ -1326,7 +1574,7 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic( } while (op16) LZ4_wildCopy(op+8, match+8, cpy); } op = cpy; /* correction */ @@ -1344,30 +1592,105 @@ LZ4_FORCE_INLINE int LZ4_decompress_generic( } +/*===== Instantiate the API decoding functions. =====*/ + LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) { - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0); + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, + endOnInputSize, full, 0, noDict, + (BYTE*)dest, NULL, 0); } LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize) { - return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0); + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, + endOnInputSize, partial, targetOutputSize, + noDict, (BYTE*)dest, NULL, 0); } LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast(const char* source, char* dest, int originalSize) { - return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB); + return LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); } +/*===== Instantiate a few more decoding cases, used more than once. =====*/ + +LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */ +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +/* Another obsolete API function, paired with the previous one. */ +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +{ + /* LZ4_decompress_fast doesn't validate match offsets, + * and thus serves well with any prefixed dictionary. */ + return LZ4_decompress_fast(source, dest, originalSize); +} + +LZ4_FORCE_O2_GCC_PPC64LE +static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2_GCC_PPC64LE /* Exported under another name, for tests/fullbench.c */ +#define LZ4_decompress_safe_extDict LZ4_decompress_safe_forceExtDict +int LZ4_decompress_safe_extDict(const char* source, char* dest, int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2_GCC_PPC64LE +static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +/* The "double dictionary" mode, for use with e.g. ring buffers: the first part + * of the dictionary is passed as prefix, and the second via dictStart + dictSize. + * These routines are used only once, in LZ4_decompress_*_continue(). + */ +LZ4_FORCE_INLINE +int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize, const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, usingExtDict, + (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_INLINE +int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize, + size_t prefixSize, const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, usingExtDict, + (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); +} /*===== streaming decompression functions =====*/ LZ4_streamDecode_t* LZ4_createStreamDecode(void) { - LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t)); + LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); return lz4s; } @@ -1378,12 +1701,11 @@ int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) return 0; } -/*! - * LZ4_setStreamDecode() : - * Use this function to instruct where to find the dictionary. - * This function is not necessary if previous data is still available where it was decoded. - * Loading a size of 0 is allowed (same effect as no dictionary). - * Return : 1 if OK, 0 if error +/*! LZ4_setStreamDecode() : + * Use this function to instruct where to find the dictionary. + * This function is not necessary if previous data is still available where it was decoded. + * Loading a size of 0 is allowed (same effect as no dictionary). + * @return : 1 if OK, 0 if error */ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) { @@ -1395,6 +1717,25 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti return 1; } +/*! LZ4_decoderRingBufferSize() : + * when setting a ring buffer for streaming decompression (optional scenario), + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * Note : in a ring buffer scenario, + * blocks are presumed decompressed next to each other. + * When not enough space remains for next block (remainingSize < maxBlockSize), + * decoding resumes from beginning of ring buffer. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +int LZ4_decoderRingBufferSize(int maxBlockSize) +{ + if (maxBlockSize < 0) return 0; + if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; + if (maxBlockSize < 16) maxBlockSize = 16; + return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); +} + /* *_continue() : These decoding functions allow decompression of multiple blocks in "streaming" mode. @@ -1408,19 +1749,32 @@ int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; int result; - if (lz4sd->prefixEnd == (BYTE*)dest) { - result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - endOnInputSize, full, 0, - usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (lz4sd->prefixSize == 0) { + /* The first call, no dictionary yet. */ + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (result <= 0) return result; + lz4sd->prefixSize = result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + /* They're rolling the current segment. */ + if (lz4sd->prefixSize >= 64 KB - 1) + result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + else if (lz4sd->extDictSize == 0) + result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize); + else + result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize += result; lz4sd->prefixEnd += result; } else { + /* The buffer wraps around, or they're switching to another buffer. */ lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, - endOnInputSize, full, 0, - usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + result = LZ4_decompress_safe_extDict(source, dest, compressedSize, maxOutputSize, + lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize = result; lz4sd->prefixEnd = (BYTE*)dest + result; @@ -1435,19 +1789,26 @@ int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; int result; - if (lz4sd->prefixEnd == (BYTE*)dest) { - result = LZ4_decompress_generic(source, dest, 0, originalSize, - endOnOutputSize, full, 0, - usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (lz4sd->prefixSize == 0) { + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_fast(source, dest, originalSize); + if (result <= 0) return result; + lz4sd->prefixSize = originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0) + result = LZ4_decompress_fast(source, dest, originalSize); + else + result = LZ4_decompress_fast_doubleDict(source, dest, originalSize, + lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize += originalSize; lz4sd->prefixEnd += originalSize; } else { lz4sd->extDictSize = lz4sd->prefixSize; lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; - result = LZ4_decompress_generic(source, dest, 0, originalSize, - endOnOutputSize, full, 0, - usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + result = LZ4_decompress_fast_extDict(source, dest, originalSize, + lz4sd->externalDict, lz4sd->extDictSize); if (result <= 0) return result; lz4sd->prefixSize = originalSize; lz4sd->prefixEnd = (BYTE*)dest + originalSize; @@ -1464,36 +1825,23 @@ Advanced decoding functions : the dictionary must be explicitly provided within parameters */ -LZ4_FORCE_O2_GCC_PPC64LE -LZ4_FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize) +int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) { if (dictSize==0) - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0); + return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); if (dictStart+dictSize == dest) { - if (dictSize >= (int)(64 KB - 1)) - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0); - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0); + if (dictSize >= 64 KB - 1) + return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, dictSize); } - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); + return LZ4_decompress_safe_extDict(source, dest, compressedSize, maxOutputSize, dictStart, dictSize); } -LZ4_FORCE_O2_GCC_PPC64LE -int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) -{ - return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize); -} - -LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) { - return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize); -} - -/* debug function */ -LZ4_FORCE_O2_GCC_PPC64LE -int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); + if (dictSize==0 || dictStart+dictSize == dest) + return LZ4_decompress_fast(source, dest, originalSize); + return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, dictSize); } @@ -1501,64 +1849,67 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compres * Obsolete Functions ***************************************************/ /* obsolete compression functions */ -int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); } -int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); } -int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); } -int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); } -int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); } -int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); } +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compress_default(source, dest, inputSize, maxOutputSize); +} +int LZ4_compress(const char* source, char* dest, int inputSize) +{ + return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); +} +int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); +} +int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); +} +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) +{ + return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); +} +int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) +{ + return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); +} /* -These function names are deprecated and should no longer be used. +These decompression functions are deprecated and should no longer be used. They are only provided here for compatibility with older user programs. - LZ4_uncompress is totally equivalent to LZ4_decompress_fast - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe */ -int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); } -int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); } - +int LZ4_uncompress (const char* source, char* dest, int outputSize) +{ + return LZ4_decompress_fast(source, dest, outputSize); +} +int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) +{ + return LZ4_decompress_safe(source, dest, isize, maxOutputSize); +} /* Obsolete Streaming functions */ int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } -static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base) -{ - MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t)); - lz4ds->internal_donotuse.bufferStart = base; -} - int LZ4_resetStreamState(void* state, char* inputBuffer) { - if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */ - LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer); + (void)inputBuffer; + LZ4_resetStream((LZ4_stream_t*)state); return 0; } void* LZ4_create (char* inputBuffer) { - LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t)); - LZ4_init (lz4ds, (BYTE*)inputBuffer); - return lz4ds; + (void)inputBuffer; + return LZ4_createStream(); } -char* LZ4_slideInputBuffer (void* LZ4_Data) -{ - LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse; - int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB); - return (char*)(ctx->bufferStart + dictSize); -} - -/* Obsolete streaming decompression functions */ - -int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) -{ - return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); -} - -int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +char* LZ4_slideInputBuffer (void* state) { - return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); + /* avoid const char * -> char * conversion warning */ + return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; } #endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/lz4libs/lz4.h b/lz4libs/lz4.h index a06b8a46..7d131221 100644 --- a/lz4libs/lz4.h +++ b/lz4libs/lz4.h @@ -93,7 +93,7 @@ extern "C" { /*------ Version ------*/ #define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ #define LZ4_VERSION_MINOR 8 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 1 /* for tweaks, bug-fixes, or development */ +#define LZ4_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ #define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) @@ -102,8 +102,8 @@ extern "C" { #define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) #define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) -LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; to be used when checking dll version */ -LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; to be used when checking dll version */ +LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */ +LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; unseful to check dll version */ /*-************************************ @@ -113,7 +113,7 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; * LZ4_MEMORY_USAGE : * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect + * Reduced memory usage may improve speed, thanks to cache effect * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ #ifndef LZ4_MEMORY_USAGE @@ -128,12 +128,12 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; into already allocated 'dst' buffer of size 'dstCapacity'. Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). It also runs faster, so it's a recommended setting. - If the function cannot compress 'src' into a limited 'dst' budget, + If the function cannot compress 'src' into a more limited 'dst' budget, compression stops *immediately*, and the function result is zero. - As a consequence, 'dst' content is not valid. - This function never writes outside 'dst' buffer, nor read outside 'source' buffer. - srcSize : supported max value is LZ4_MAX_INPUT_VALUE - dstCapacity : full or partial size of buffer 'dst' (which must be already allocated) + Note : as a consequence, 'dst' content is not valid. + Note 2 : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). + srcSize : max supported value is LZ4_MAX_INPUT_SIZE. + dstCapacity : size of buffer 'dst' (which must be already allocated) return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) or 0 if compression fails */ LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); @@ -144,8 +144,7 @@ LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) If destination buffer is not large enough, decoding will stop and output an error code (negative value). If the source stream is detected malformed, the function will stop decoding and return a negative result. - This function is protected against buffer overflow exploits, including malicious data packets. - It never writes outside output buffer, nor reads outside input buffer. + This function is protected against malicious data packets. */ LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); @@ -161,20 +160,20 @@ LZ4_compressBound() : Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) This function is primarily useful for memory allocation purposes (destination buffer size). Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). - Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize) + Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) inputSize : max supported value is LZ4_MAX_INPUT_SIZE return : maximum output size in a "worst case" scenario - or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) + or 0, if input size is incorrect (too large or negative) */ LZ4LIB_API int LZ4_compressBound(int inputSize); /*! LZ4_compress_fast() : - Same as LZ4_compress_default(), but allows to select an "acceleration" factor. + Same as LZ4_compress_default(), but allows selection of "acceleration" factor. The larger the acceleration value, the faster the algorithm, but also the lesser the compression. It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. An acceleration value of "1" is the same as regular LZ4_compress_default() - Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1. + Values <= 0 will be replaced by ACCELERATION_DEFAULT (currently == 1, see lz4.c). */ LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); @@ -205,15 +204,19 @@ LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePt /*! -LZ4_decompress_fast() : (unsafe!!) - originalSize : is the original uncompressed size - return : the number of bytes read from the source buffer (in other words, the compressed size) - If the source stream is detected malformed, the function will stop decoding and return a negative result. - Destination buffer must be already allocated. Its size must be >= 'originalSize' bytes. - note : This function respects memory boundaries for *properly formed* compressed data. - It is a bit faster than LZ4_decompress_safe(). - However, it does not provide any protection against intentionally modified data stream (malicious input). - Use this function in trusted environment only (data to decode comes from a trusted source). +LZ4_decompress_fast() : **unsafe!** +This function is a bit faster than LZ4_decompress_safe(), +but it may misbehave on malformed input because it doesn't perform full validation of compressed data. + originalSize : is the uncompressed size to regenerate + Destination buffer must be already allocated, and its size must be >= 'originalSize' bytes. + return : number of bytes read from source buffer (== compressed size). + If the source stream is detected malformed, the function stops decoding and return a negative result. + note : This function is only usable if the originalSize of uncompressed data is known in advance. + The caller should also check that all the compressed input has been consumed properly, + i.e. that the return value matches the size of the buffer with compressed input. + The function never writes past the output buffer. However, since it doesn't know its 'src' size, + it may read past the intended input. Also, because match offsets are not validated during decoding, + reads from 'src' may underflow. Use this function in trusted environment **only**. */ LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); @@ -222,12 +225,12 @@ LZ4_decompress_safe_partial() : This function decompress a compressed block of size 'srcSize' at position 'src' into destination buffer 'dst' of size 'dstCapacity'. The function will decompress a minimum of 'targetOutputSize' bytes, and stop after that. - However, it's not accurate, and may write more than 'targetOutputSize' (but <= dstCapacity). + However, it's not accurate, and may write more than 'targetOutputSize' (but always <= dstCapacity). @return : the number of bytes decoded in the destination buffer (necessarily <= dstCapacity) - Note : this number can be < 'targetOutputSize' should the compressed block contain less data. - Always control how many bytes were decoded. - If the source stream is detected malformed, the function will stop decoding and return a negative result. - This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets. + Note : this number can also be < targetOutputSize, if compressed block contains less data. + Therefore, always control how many bytes were decoded. + If source stream is detected malformed, function returns a negative result. + This function is protected against malicious data packets. */ LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); @@ -235,7 +238,7 @@ LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcS /*-********************************************* * Streaming Compression Functions ***********************************************/ -typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ +typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ /*! LZ4_createStream() and LZ4_freeStream() : * LZ4_createStream() will allocate and initialize an `LZ4_stream_t` structure. @@ -259,67 +262,93 @@ LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); /*! LZ4_compress_fast_continue() : - * Compress content into 'src' using data from previously compressed blocks, improving compression ratio. + * Compress 'src' content using data from previously compressed blocks, for better compression ratio. * 'dst' buffer must be already allocated. * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. * - * Important : Up to 64KB of previously compressed data is assumed to remain present and unmodified in memory ! - * Special 1 : If input buffer is a double-buffer, it can have any size, including < 64 KB. - * Special 2 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * Important : The previous 64KB of compressed data is assumed to remain present and unmodified in memory! + * + * Special 1 : When input is a double-buffer, they can have any size, including < 64 KB. + * Make sure that buffers are separated by at least one byte. + * This way, each block only depends on previous block. + * Special 2 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. * * @return : size of compressed block - * or 0 if there is an error (typically, compressed data cannot fit into 'dst') + * or 0 if there is an error (typically, cannot fit into 'dst'). * After an error, the stream status is invalid, it can only be reset or freed. */ LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); /*! LZ4_saveDict() : - * If previously compressed data block is not guaranteed to remain available at its current memory location, + * If last 64KB data cannot be guaranteed to remain available at its current memory location, * save it into a safer place (char* safeBuffer). - * Note : it's not necessary to call LZ4_loadDict() after LZ4_saveDict(), dictionary is immediately usable. - * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), + * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. + * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. */ -LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize); +LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); /*-********************************************** * Streaming Decompression Functions * Bufferless synchronous API ************************************************/ -typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* incomplete type (defined later) */ +typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ /*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : - * creation / destruction of streaming decompression tracking structure. - * A tracking structure can be re-used multiple times sequentially. */ + * creation / destruction of streaming decompression tracking context. + * A tracking context can be re-used multiple times. + */ LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); /*! LZ4_setStreamDecode() : - * An LZ4_streamDecode_t structure can be allocated once and re-used multiple times. + * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. * Use this function to start decompression of a new stream of blocks. - * A dictionary can optionnally be set. Use NULL or size 0 for a simple reset order. + * A dictionary can optionnally be set. Use NULL or size 0 for a reset order. + * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. * @return : 1 if OK, 0 if error */ LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); +/*! LZ4_decoderRingBufferSize() : v1.8.2 + * Note : in a ring buffer scenario (optional), + * blocks are presumed decompressed next to each other + * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), + * at which stage it resumes from beginning of ring buffer. + * When setting such a ring buffer for streaming decompression, + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); +#define LZ4_DECODER_RING_BUFFER_SIZE(mbs) (65536 + 14 + (mbs)) /* for static allocation; mbs presumed valid */ + /*! LZ4_decompress_*_continue() : * These decoding functions allow decompression of consecutive blocks in "streaming" mode. * A block is an unsplittable entity, it must be presented entirely to a decompression function. - * Decompression functions only accept one block at a time. - * Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB). + * Decompression functions only accepts one block at a time. + * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. + * If less than 64KB of data has been decoded, all the data must be present. * - * Special : if application sets a ring buffer for decompression, it must respect one of the following conditions : - * - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions) - * In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB). - * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. - * maxBlockSize is implementation dependent. It's the maximum size of any single block. + * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : + * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). + * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. + * In which case, encoding and decoding buffers do not need to be synchronized. + * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. + * - Synchronized mode : + * Decompression buffer size is _exactly_ the same as compression buffer size, + * and follows exactly same update rule (block boundaries at same positions), + * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), + * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). + * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. * In which case, encoding and decoding buffers do not need to be synchronized, * and encoding ring buffer can have any size, including small ones ( < 64 KB). - * - _At least_ 64 KB + 8 bytes + maxBlockSize. - * In which case, encoding and decoding buffers do not need to be synchronized, - * and encoding ring buffer can have any size, including larger than decoding buffer. - * Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer, - * and indicate where it is saved using LZ4_setStreamDecode() before decompressing next block. + * + * Whenever these conditions are not possible, + * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, + * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. */ LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity); LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); @@ -329,6 +358,7 @@ LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecod * These decoding functions work the same as * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() * They are stand-alone, and don't need an LZ4_streamDecode_t structure. + * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. */ LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize); LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); @@ -337,6 +367,94 @@ LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int or /*^********************************************** * !!!!!! STATIC LINKING ONLY !!!!!! ***********************************************/ + +/*-************************************ + * Unstable declarations + ************************************** + * Declarations in this section should be considered unstable. + * Use at your own peril, etc., etc. + * They may be removed in the future. + * Their signatures may change. + **************************************/ + +#ifdef LZ4_STATIC_LINKING_ONLY + +/*! LZ4_resetStream_fast() : + * Use this, like LZ4_resetStream(), to prepare a context for a new chain of + * calls to a streaming API (e.g., LZ4_compress_fast_continue()). + * + * Note: + * Using this in advance of a non- streaming-compression function is redundant, + * and potentially bad for performance, since they all perform their own custom + * reset internally. + * + * Differences from LZ4_resetStream(): + * When an LZ4_stream_t is known to be in a internally coherent state, + * it can often be prepared for a new compression with almost no work, only + * sometimes falling back to the full, expensive reset that is always required + * when the stream is in an indeterminate state (i.e., the reset performed by + * LZ4_resetStream()). + * + * LZ4_streams are guaranteed to be in a valid state when: + * - returned from LZ4_createStream() + * - reset by LZ4_resetStream() + * - memset(stream, 0, sizeof(LZ4_stream_t)), though this is discouraged + * - the stream was in a valid state and was reset by LZ4_resetStream_fast() + * - the stream was in a valid state and was then used in any compression call + * that returned success + * - the stream was in an indeterminate state and was used in a compression + * call that fully reset the state (e.g., LZ4_compress_fast_extState()) and + * that returned success + * + * When a stream isn't known to be in a valid state, it is not safe to pass to + * any fastReset or streaming function. It must first be cleansed by the full + * LZ4_resetStream(). + */ +LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); + +/*! LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStream_fast() for a definition of "correctly + * initialized"). From a high level, the difference is that this function + * initializes the provided state with a call to something like + * LZ4_resetStream_fast() while LZ4_compress_fast_extState() starts with a + * call to LZ4_resetStream(). + */ +LZ4LIB_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_attach_dictionary() : + * This is an experimental API that allows for the efficient use of a + * static dictionary many times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a + * working LZ4_stream_t, this function introduces a no-copy setup mechanism, + * in which the working stream references the dictionary stream in-place. + * + * Several assumptions are made about the state of the dictionary stream. + * Currently, only streams which have been prepared by LZ4_loadDict() should + * be expected to work. + * + * Alternatively, the provided dictionary stream pointer may be NULL, in which + * case any existing dictionary stream is unset. + * + * If a dictionary is provided, it replaces any pre-existing stream history. + * The dictionary contents are the only history that can be referenced and + * logically immediately precede the data compressed in the first subsequent + * compression call. + * + * The dictionary will only remain attached to the working stream through the + * first compression call, at the end of which it is cleared. The dictionary + * stream (and source buffer) must remain in-place / accessible / unchanged + * through the completion of the first compression call on the stream. + */ +LZ4LIB_API void LZ4_attach_dictionary(LZ4_stream_t *working_stream, const LZ4_stream_t *dictionary_stream); + +#endif + /*-************************************ * Private definitions ************************************** @@ -351,14 +469,16 @@ LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int or #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) #include -typedef struct { +typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; +struct LZ4_stream_t_internal { uint32_t hashTable[LZ4_HASH_SIZE_U32]; uint32_t currentOffset; - uint32_t initCheck; + uint16_t initCheck; + uint16_t tableType; const uint8_t* dictionary; - uint8_t* bufferStart; /* obsolete, used for slideInputBuffer */ + const LZ4_stream_t_internal* dictCtx; uint32_t dictSize; -} LZ4_stream_t_internal; +}; typedef struct { const uint8_t* externalDict; @@ -369,14 +489,16 @@ typedef struct { #else -typedef struct { +typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; +struct LZ4_stream_t_internal { unsigned int hashTable[LZ4_HASH_SIZE_U32]; unsigned int currentOffset; - unsigned int initCheck; + unsigned short initCheck; + unsigned short tableType; const unsigned char* dictionary; - unsigned char* bufferStart; /* obsolete, used for slideInputBuffer */ + const LZ4_stream_t_internal* dictCtx; unsigned int dictSize; -} LZ4_stream_t_internal; +}; typedef struct { const unsigned char* externalDict; @@ -433,11 +555,9 @@ union LZ4_streamDecode_u { # define LZ4_DEPRECATED(message) /* disable deprecation warnings */ #else # define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -# if defined(__clang__) /* clang doesn't handle mixed C++11 and CNU attributes */ -# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) -# elif defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ # define LZ4_DEPRECATED(message) [[deprecated(message)]] -# elif (LZ4_GCC_VERSION >= 405) +# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__) # define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) # elif (LZ4_GCC_VERSION >= 301) # define LZ4_DEPRECATED(message) __attribute__((deprecated)) @@ -450,26 +570,34 @@ union LZ4_streamDecode_u { #endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ /* Obsolete compression functions */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress (const char* source, char* dest, int sourceSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* source, char* dest, int sourceSize); +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); /* Obsolete decompression functions */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast() instead") int LZ4_uncompress (const char* source, char* dest, int outputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe() instead") int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); +LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); -/* Obsolete streaming functions; use new streaming interface whenever possible */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state); +/* Obsolete streaming functions; degraded functionality; do not use! + * + * In order to perform streaming compression, these functions depended on data + * that is no longer tracked in the state. They have been preserved as well as + * possible: using them will still produce a correct output. However, they don't + * actually retain any history between compression calls. The compression ratio + * achieved will therefore be no better than compressing each chunk + * independently. + */ +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); +LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); /* Obsolete streaming decoding functions */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); +LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); #endif /* LZ4_H_2983827168210 */ diff --git a/lz4libs/lz4frame.c b/lz4libs/lz4frame.c index 0b26f75a..e1d0b1d0 100644 --- a/lz4libs/lz4frame.c +++ b/lz4libs/lz4frame.c @@ -46,11 +46,25 @@ You can contact the author at : #endif +/*-************************************ +* Tuning parameters +**************************************/ +/* + * LZ4F_HEAPMODE : + * Select how default compression functions will allocate memory for their hash table, + * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#ifndef LZ4F_HEAPMODE +# define LZ4F_HEAPMODE 0 +#endif + + /*-************************************ * Memory routines **************************************/ #include /* malloc, calloc, free */ -#define ALLOCATOR(s) calloc(1,s) +#define ALLOC(s) malloc(s) +#define ALLOC_AND_ZERO(s) calloc(1,s) #define FREEMEM free #include /* memset, memcpy, memmove */ #define MEM_INIT memset @@ -59,7 +73,9 @@ You can contact the author at : /*-************************************ * Includes **************************************/ -#include "lz4frame_static.h" +#define LZ4F_STATIC_LINKING_ONLY +#include "lz4frame.h" +#define LZ4_STATIC_LINKING_ONLY #include "lz4.h" #define LZ4_HC_STATIC_LINKING_ONLY #include "lz4hc.h" @@ -80,6 +96,19 @@ You can contact the author at : #define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG) +# include +static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + /*-************************************ * Basic Types @@ -188,7 +217,8 @@ typedef struct LZ4F_cctx_s U64 totalInSize; XXH32_state_t xxh; void* lz4CtxPtr; - U32 lz4CtxLevel; /* 0: unallocated; 1: LZ4_stream_t; 3: LZ4_streamHC_t */ + U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ + U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */ } LZ4F_cctx_t; @@ -279,7 +309,7 @@ static size_t LZ4F_compressBound_internal(size_t srcSize, size_t alreadyBuffered) { LZ4F_preferences_t prefsNull; - memset(&prefsNull, 0, sizeof(prefsNull)); + MEM_INIT(&prefsNull, 0, sizeof(prefsNull)); prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */ { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr; U32 const flush = prefsPtr->autoFlush | (srcSize==0); @@ -308,7 +338,7 @@ size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* prefere size_t const headerSize = maxFHSize; /* max header size, including optional fields */ if (preferencesPtr!=NULL) prefs = *preferencesPtr; - else memset(&prefs, 0, sizeof(prefs)); + else MEM_INIT(&prefs, 0, sizeof(prefs)); prefs.autoFlush = 1; return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);; @@ -324,27 +354,22 @@ size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* prefere * @return : number of bytes written into dstBuffer, * or an error code if it fails (can be tested using LZ4F_isError()) */ -size_t LZ4F_compressFrame_usingCDict(void* dstBuffer, size_t dstCapacity, +size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_CDict* cdict, const LZ4F_preferences_t* preferencesPtr) { - LZ4F_cctx_t cctxI; - LZ4_stream_t lz4ctx; /* pretty large on stack */ LZ4F_preferences_t prefs; LZ4F_compressOptions_t options; BYTE* const dstStart = (BYTE*) dstBuffer; BYTE* dstPtr = dstStart; BYTE* const dstEnd = dstStart + dstCapacity; - memset(&cctxI, 0, sizeof(cctxI)); - cctxI.version = LZ4F_VERSION; - cctxI.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */ - if (preferencesPtr!=NULL) prefs = *preferencesPtr; else - memset(&prefs, 0, sizeof(prefs)); + MEM_INIT(&prefs, 0, sizeof(prefs)); if (prefs.frameInfo.contentSize != 0) prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */ @@ -353,32 +378,24 @@ size_t LZ4F_compressFrame_usingCDict(void* dstBuffer, size_t dstCapacity, if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID)) prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */ - if (prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { - cctxI.lz4CtxPtr = &lz4ctx; - cctxI.lz4CtxLevel = 1; - } /* fast compression context pre-created on stack */ - - memset(&options, 0, sizeof(options)); + MEM_INIT(&options, 0, sizeof(options)); options.stableSrc = 1; if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs)) /* condition to guarantee success */ return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); - { size_t const headerSize = LZ4F_compressBegin_usingCDict(&cctxI, dstBuffer, dstCapacity, cdict, &prefs); /* write header */ + { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */ if (LZ4F_isError(headerSize)) return headerSize; dstPtr += headerSize; /* header size */ } - { size_t const cSize = LZ4F_compressUpdate(&cctxI, dstPtr, dstEnd-dstPtr, srcBuffer, srcSize, &options); + { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, dstEnd-dstPtr, srcBuffer, srcSize, &options); if (LZ4F_isError(cSize)) return cSize; dstPtr += cSize; } - { size_t const tailSize = LZ4F_compressEnd(&cctxI, dstPtr, dstEnd-dstPtr, &options); /* flush last block, and generate suffix */ + { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, dstEnd-dstPtr, &options); /* flush last block, and generate suffix */ if (LZ4F_isError(tailSize)) return tailSize; dstPtr += tailSize; } - if (prefs.compressionLevel >= LZ4HC_CLEVEL_MIN) /* Ctx allocation only for lz4hc */ - FREEMEM(cctxI.lz4CtxPtr); - return (dstPtr - dstStart); } @@ -394,9 +411,44 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_preferences_t* preferencesPtr) { - return LZ4F_compressFrame_usingCDict(dstBuffer, dstCapacity, - srcBuffer, srcSize, - NULL, preferencesPtr); + size_t result; +#if (LZ4F_HEAPMODE) + LZ4F_cctx_t *cctxPtr; + result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION); + if (LZ4F_isError(result)) return result; +#else + LZ4F_cctx_t cctx; + LZ4_stream_t lz4ctx; + LZ4F_cctx_t *cctxPtr = &cctx; + + DEBUGLOG(4, "LZ4F_compressFrame"); + MEM_INIT(&cctx, 0, sizeof(cctx)); + cctx.version = LZ4F_VERSION; + cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */ + if (preferencesPtr == NULL || + preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN) + { + LZ4_resetStream(&lz4ctx); + cctxPtr->lz4CtxPtr = &lz4ctx; + cctxPtr->lz4CtxAlloc = 1; + cctxPtr->lz4CtxState = 1; + } +#endif + + result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity, + srcBuffer, srcSize, + NULL, preferencesPtr); + +#if (LZ4F_HEAPMODE) + LZ4F_freeCompressionContext(cctxPtr); +#else + if (preferencesPtr != NULL && + preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) + { + FREEMEM(cctxPtr->lz4CtxPtr); + } +#endif + return result; } @@ -419,13 +471,14 @@ struct LZ4F_CDict_s { LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize) { const char* dictStart = (const char*)dictBuffer; - LZ4F_CDict* cdict = (LZ4F_CDict*) malloc(sizeof(*cdict)); + LZ4F_CDict* cdict = (LZ4F_CDict*) ALLOC(sizeof(*cdict)); + DEBUGLOG(4, "LZ4F_createCDict"); if (!cdict) return NULL; if (dictSize > 64 KB) { dictStart += dictSize - 64 KB; dictSize = 64 KB; } - cdict->dictContent = ALLOCATOR(dictSize); + cdict->dictContent = ALLOC(dictSize); cdict->fastCtx = LZ4_createStream(); cdict->HCCtx = LZ4_createStreamHC(); if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) { @@ -433,9 +486,8 @@ LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize) return NULL; } memcpy(cdict->dictContent, dictStart, dictSize); - LZ4_resetStream(cdict->fastCtx); LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize); - LZ4_resetStreamHC(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT); + LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT); LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize); return cdict; } @@ -464,7 +516,7 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict) */ LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_compressionContext_t* LZ4F_compressionContextPtr, unsigned version) { - LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOCATOR(sizeof(LZ4F_cctx_t)); + LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOC_AND_ZERO(sizeof(LZ4F_cctx_t)); if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed); cctxPtr->version = version; @@ -490,6 +542,36 @@ LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_compressionContext_t LZ4F_comp } +/** + * This function prepares the internal LZ4(HC) stream for a new compression, + * resetting the context and attaching the dictionary, if there is one. + * + * It needs to be called at the beginning of each independent compression + * stream (i.e., at the beginning of a frame in blockLinked mode, or at the + * beginning of each block in blockIndependent mode). + */ +static void LZ4F_initStream(void* ctx, + const LZ4F_CDict* cdict, + int level, + LZ4F_blockMode_t blockMode) { + if (level < LZ4HC_CLEVEL_MIN) { + if (cdict != NULL || blockMode == LZ4F_blockLinked) { + /* In these cases, we will call LZ4_compress_fast_continue(), + * which needs an already reset context. Otherwise, we'll call a + * one-shot API. The non-continued APIs internally perform their own + * resets at the beginning of their calls, where they know what + * tableType they need the context to be in. So in that case this + * would be misguided / wasted work. */ + LZ4_resetStream_fast((LZ4_stream_t*)ctx); + } + LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL); + } else { + LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level); + LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL); + } +} + + /*! LZ4F_compressBegin_usingCDict() : * init streaming compression and writes frame header into dstBuffer. * dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes. @@ -507,21 +589,33 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, BYTE* headerStart; if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); - memset(&prefNull, 0, sizeof(prefNull)); + MEM_INIT(&prefNull, 0, sizeof(prefNull)); if (preferencesPtr == NULL) preferencesPtr = &prefNull; cctxPtr->prefs = *preferencesPtr; /* Ctx Management */ - { U32 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; /* 0:nothing ; 1:LZ4 table ; 2:HC tables */ - if (cctxPtr->lz4CtxLevel < ctxTypeID) { + { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; + if (cctxPtr->lz4CtxAlloc < ctxTypeID) { FREEMEM(cctxPtr->lz4CtxPtr); - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) + if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { cctxPtr->lz4CtxPtr = (void*)LZ4_createStream(); - else + } else { cctxPtr->lz4CtxPtr = (void*)LZ4_createStreamHC(); + } if (cctxPtr->lz4CtxPtr == NULL) return err0r(LZ4F_ERROR_allocation_failed); - cctxPtr->lz4CtxLevel = ctxTypeID; - } } + cctxPtr->lz4CtxAlloc = ctxTypeID; + cctxPtr->lz4CtxState = ctxTypeID; + } else if (cctxPtr->lz4CtxState != ctxTypeID) { + /* otherwise, a sufficient buffer is allocated, but we need to + * reset it to the correct context type */ + if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { + LZ4_resetStream((LZ4_stream_t *) cctxPtr->lz4CtxPtr); + } else { + LZ4_resetStreamHC((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); + } + cctxPtr->lz4CtxState = ctxTypeID; + } + } /* Buffer Management */ if (cctxPtr->prefs.frameInfo.blockSizeID == 0) @@ -535,7 +629,7 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, if (cctxPtr->maxBufferSize < requiredBuffSize) { cctxPtr->maxBufferSize = 0; FREEMEM(cctxPtr->tmpBuff); - cctxPtr->tmpBuff = (BYTE*)ALLOCATOR(requiredBuffSize); + cctxPtr->tmpBuff = (BYTE*)ALLOC_AND_ZERO(requiredBuffSize); if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed); cctxPtr->maxBufferSize = requiredBuffSize; } } @@ -547,19 +641,10 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr, cctxPtr->cdict = cdict; if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) { /* frame init only for blockLinked : blockIndependent will be init at each block */ - if (cdict) { - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) { - memcpy(cctxPtr->lz4CtxPtr, cdict->fastCtx, sizeof(*cdict->fastCtx)); - } else { - memcpy(cctxPtr->lz4CtxPtr, cdict->HCCtx, sizeof(*cdict->HCCtx)); - LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel); - } - } else { - if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) - LZ4_resetStream((LZ4_stream_t*)(cctxPtr->lz4CtxPtr)); - else - LZ4_resetStreamHC((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), cctxPtr->prefs.compressionLevel); - } + LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked); + } + if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) { + LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed); } /* Magic Number */ @@ -654,11 +739,12 @@ static size_t LZ4F_makeBlock(void* dst, const void* src, size_t srcSize, static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) { int const acceleration = (level < -1) ? -level : 1; + LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); if (cdict) { - memcpy(ctx, cdict->fastCtx, sizeof(*cdict->fastCtx)); return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration); + } else { + return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration); } - return LZ4_compress_fast_extState(ctx, src, dst, srcSize, dstCapacity, acceleration); } static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) @@ -670,12 +756,11 @@ static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, in static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) { + LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent); if (cdict) { - memcpy(ctx, cdict->HCCtx, sizeof(*cdict->HCCtx)); - LZ4_setCompressionLevel((LZ4_streamHC_t*)ctx, level); return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity); } - return LZ4_compress_HC_extStateHC(ctx, src, dst, srcSize, dstCapacity, level); + return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level); } static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict) @@ -724,10 +809,12 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, LZ4F_lastBlockStatus lastBlockCompressed = notDone; compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel); + DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize); if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC); - if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); - memset(&cOptionsNull, 0, sizeof(cOptionsNull)); + if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize)) + return err0r(LZ4F_ERROR_dstMaxSize_tooSmall); + MEM_INIT(&cOptionsNull, 0, sizeof(cOptionsNull)); if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull; /* complete tmp buffer */ @@ -931,7 +1018,7 @@ struct LZ4F_dctx_s { */ LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber) { - LZ4F_dctx* const dctx = (LZ4F_dctx*)ALLOCATOR(sizeof(LZ4F_dctx)); + LZ4F_dctx* const dctx = (LZ4F_dctx*)ALLOC_AND_ZERO(sizeof(LZ4F_dctx)); if (dctx==NULL) return err0r(LZ4F_ERROR_GENERIC); dctx->version = versionNumber; @@ -1003,7 +1090,7 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize /* need to decode header to get frameInfo */ if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */ - memset(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo)); + MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo)); /* special case : skippable frames */ if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) { @@ -1136,24 +1223,31 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, LZ4F_frameInfo_t* frameInfoP /* LZ4F_updateDict() : * only used for LZ4F_blockLinked mode */ -static void LZ4F_updateDict(LZ4F_dctx* dctx, const BYTE* dstPtr, size_t dstSize, const BYTE* dstPtr0, unsigned withinTmp) +static void LZ4F_updateDict(LZ4F_dctx* dctx, + const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart, + unsigned withinTmp) { if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* priority to dictionary continuity */ - if (dctx->dict + dctx->dictSize == dstPtr) { /* dictionary continuity */ + if (dctx->dict + dctx->dictSize == dstPtr) { /* dictionary continuity, directly within dstBuffer */ dctx->dictSize += dstSize; return; } - if (dstPtr - dstPtr0 + dstSize >= 64 KB) { /* dstBuffer large enough to become dictionary */ - dctx->dict = (const BYTE*)dstPtr0; - dctx->dictSize = dstPtr - dstPtr0 + dstSize; + if (dstPtr - dstBufferStart + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */ + dctx->dict = (const BYTE*)dstBufferStart; + dctx->dictSize = dstPtr - dstBufferStart + dstSize; return; } - if ((withinTmp) && (dctx->dict == dctx->tmpOutBuffer)) { - /* assumption : dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart */ + assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */ + + /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOut */ + + if ((withinTmp) && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */ + /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */ + assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart); dctx->dictSize += dstSize; return; } @@ -1174,7 +1268,7 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx, const BYTE* dstPtr, size_t dstSize, if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */ if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */ - size_t const preserveSize = 64 KB - dstSize; /* note : dstSize < 64 KB */ + size_t const preserveSize = 64 KB - dstSize; memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); dctx->dictSize = preserveSize; } @@ -1184,7 +1278,7 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx, const BYTE* dstPtr, size_t dstSize, } /* join dict & dest into tmp */ - { size_t preserveSize = 64 KB - dstSize; /* note : dstSize < 64 KB */ + { size_t preserveSize = 64 KB - dstSize; if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize; memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize); memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize); @@ -1230,7 +1324,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, size_t nextSrcSizeHint = 1; - memset(&optionsNull, 0, sizeof(optionsNull)); + MEM_INIT(&optionsNull, 0, sizeof(optionsNull)); if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull; *srcSizePtr = 0; *dstSizePtr = 0; @@ -1251,7 +1345,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, } dctx->tmpInSize = 0; if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */ - dctx->tmpInTarget = minFHSize; /* minimum to attempt decode */ + dctx->tmpInTarget = minFHSize; /* minimum size to decode header */ dctx->dStage = dstage_storeFrameHeader; /* fall-through */ @@ -1279,11 +1373,11 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */ dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/ FREEMEM(dctx->tmpIn); - dctx->tmpIn = (BYTE*)ALLOCATOR(dctx->maxBlockSize + 4 /* block checksum */); + dctx->tmpIn = (BYTE*)ALLOC(dctx->maxBlockSize + 4 /* block checksum */); if (dctx->tmpIn == NULL) return err0r(LZ4F_ERROR_allocation_failed); FREEMEM(dctx->tmpOutBuffer); - dctx->tmpOutBuffer= (BYTE*)ALLOCATOR(bufferNeeded); + dctx->tmpOutBuffer= (BYTE*)ALLOC(bufferNeeded); if (dctx->tmpOutBuffer== NULL) return err0r(LZ4F_ERROR_allocation_failed); dctx->maxBufferSize = bufferNeeded; @@ -1408,8 +1502,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, U32 const calcCRC = XXH32_digest(&dctx->blockChecksum); if (readCRC != calcCRC) return err0r(LZ4F_ERROR_blockChecksum_invalid); - } - } + } } dctx->dStage = dstage_getBlockHeader; /* new block */ break; @@ -1450,11 +1543,19 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, } } if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) { + const char* dict = (const char*)dctx->dict; + size_t dictSize = dctx->dictSize; + int decodedSize; + if (dict && dictSize > 1 GB) { + /* the dictSize param is an int, avoid truncation / sign issues */ + dict += dictSize - 64 KB; + dictSize = 64 KB; + } /* enough capacity in `dst` to decompress directly there */ - int const decodedSize = LZ4_decompress_safe_usingDict( + decodedSize = LZ4_decompress_safe_usingDict( (const char*)selectedIn, (char*)dstPtr, (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, - (const char*)dctx->dict, (int)dctx->dictSize); + dict, (int)dictSize); if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */ if (dctx->frameInfo.contentChecksumFlag) XXH32_update(&(dctx->xxh), dstPtr, decodedSize); @@ -1482,14 +1583,21 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, } else { /* dict not within tmp */ size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB); dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace; - } - } + } } /* Decode block */ - { int const decodedSize = LZ4_decompress_safe_usingDict( + { const char* dict = (const char*)dctx->dict; + size_t dictSize = dctx->dictSize; + int decodedSize; + if (dict && dictSize > 1 GB) { + /* the dictSize param is an int, avoid truncation / sign issues */ + dict += dictSize - 64 KB; + dictSize = 64 KB; + } + decodedSize = LZ4_decompress_safe_usingDict( (const char*)selectedIn, (char*)dctx->tmpOut, (int)dctx->tmpInTarget, (int)dctx->maxBlockSize, - (const char*)dctx->dict, (int)dctx->dictSize); + dict, (int)dictSize); if (decodedSize < 0) /* decompression failed */ return err0r(LZ4F_ERROR_decompressionFailed); if (dctx->frameInfo.contentChecksumFlag) @@ -1507,8 +1615,8 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy); /* dictionary management */ - if (dctx->frameInfo.blockMode==LZ4F_blockLinked) - LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1); + if (dctx->frameInfo.blockMode == LZ4F_blockLinked) + LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/); dctx->tmpOutStart += sizeToCopy; dstPtr += sizeToCopy; @@ -1517,8 +1625,9 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, dctx->dStage = dstage_getBlockHeader; /* get next block */ break; } + /* could not flush everything : stop there, just request a block header */ + doAnotherStage = 0; nextSrcSizeHint = BHSize; - doAnotherStage = 0; /* still some data to flush */ break; } @@ -1555,7 +1664,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, selectedIn = dctx->tmpIn; } /* if (dctx->dStage == dstage_storeSuffix) */ - /* case dstage_checkSuffix: */ /* no direct call, avoid scan-build warning */ + /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */ { U32 const readCRC = LZ4F_readLE32(selectedIn); U32 const resultCRC = XXH32_digest(&(dctx->xxh)); if (readCRC != resultCRC) @@ -1579,8 +1688,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, if (dctx->dStage == dstage_storeSFrameSize) case dstage_storeSFrameSize: - { - size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, + { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr) ); memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy); srcPtr += sizeToCopy; @@ -1594,7 +1702,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, selectedIn = dctx->header + 4; } /* if (dctx->dStage == dstage_storeSFrameSize) */ - /* case dstage_decodeSFrameSize: */ /* no direct access */ + /* case dstage_decodeSFrameSize: */ /* no direct entry */ { size_t const SFrameSize = LZ4F_readLE32(selectedIn); dctx->frameInfo.contentSize = SFrameSize; dctx->tmpInTarget = SFrameSize; @@ -1613,7 +1721,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx, LZ4F_resetDecompressionContext(dctx); break; } - } + } /* switch (dctx->dStage) */ } /* while (doAnotherStage) */ /* preserve history within tmp whenever necessary */ diff --git a/lz4libs/lz4frame.h b/lz4libs/lz4frame.h index eb55e45e..fb434ff7 100644 --- a/lz4libs/lz4frame.h +++ b/lz4libs/lz4frame.h @@ -93,8 +93,8 @@ extern "C" { **************************************/ typedef size_t LZ4F_errorCode_t; -LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells if a `LZ4F_errorCode_t` function result is an error code */ -LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; useful for debugging */ +LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */ +LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */ /*-************************************ @@ -162,24 +162,25 @@ typedef LZ4F_contentChecksum_t contentChecksum_t; * It's not required to set all fields, as long as the structure was initially memset() to zero. * For all fields, 0 sets it to default value */ typedef struct { - LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB ; 0 == default */ - LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent ; 0 == default */ - LZ4F_contentChecksum_t contentChecksumFlag; /* if enabled, frame is terminated with a 32-bits checksum of decompressed data ; 0 == disabled (default) */ - LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */ - unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */ - unsigned dictID; /* Dictionary ID, sent by the compressor to help decoder select the correct dictionary; 0 == no dictID provided */ - LZ4F_blockChecksum_t blockChecksumFlag; /* if enabled, each block is followed by a checksum of block's compressed data ; 0 == disabled (default) */ + LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */ + LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */ + LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */ + LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */ + unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */ + unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */ + LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */ } LZ4F_frameInfo_t; /*! LZ4F_preferences_t : * makes it possible to supply detailed compression parameters to the stream interface. - * It's not required to set all fields, as long as the structure was initially memset() to zero. + * Structure is presumed initially memset() to zero, representing default settings. * All reserved fields must be set to zero. */ typedef struct { LZ4F_frameInfo_t frameInfo; - int compressionLevel; /* 0 == default (fast mode); values above LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values below 0 trigger "fast acceleration", proportional to value */ - unsigned autoFlush; /* 1 == always flush, to reduce usage of internal buffers */ - unsigned reserved[4]; /* must be zero for forward compatibility */ + int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */ + unsigned autoFlush; /* 1: always flush, to reduce usage of internal buffers */ + unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4LZ4HC_CLEVEL_OPT_MIN) */ /* >= v1.8.2 */ + unsigned reserved[3]; /* must be zero for forward compatibility */ } LZ4F_preferences_t; LZ4FLIB_API int LZ4F_compressionLevel_max(void); @@ -189,8 +190,10 @@ LZ4FLIB_API int LZ4F_compressionLevel_max(void); * Simple compression function ***********************************/ /*! LZ4F_compressFrameBound() : - * Returns the maximum possible size of a frame compressed with LZ4F_compressFrame() given srcSize content and preferences. - * Note : this result is only usable with LZ4F_compressFrame(), not with multi-segments compression. + * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences. + * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences. + * Note : this result is only usable with LZ4F_compressFrame(). + * It may also be used with LZ4F_compressUpdate() _if no flush() operation_ is performed. */ LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr); @@ -235,7 +238,7 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx); /*---- Compression ----*/ -#define LZ4F_HEADER_SIZE_MAX 19 +#define LZ4F_HEADER_SIZE_MAX 19 /* LZ4 Frame header size can vary from 7 to 19 bytes */ /*! LZ4F_compressBegin() : * will write the frame header into dstBuffer. * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. @@ -248,8 +251,13 @@ LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx, const LZ4F_preferences_t* prefsPtr); /*! LZ4F_compressBound() : - * Provides minimum dstCapacity for a given srcSize to guarantee operation success in worst case situations. + * Provides minimum dstCapacity required to guarantee compression success + * given a srcSize and preferences, covering worst case scenario. * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario. + * Estimation is valid for either LZ4F_compressUpdate(), LZ4F_flush() or LZ4F_compressEnd(), + * Estimation includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes. + * It also includes frame footer (ending + checksum), which would have to be generated by LZ4F_compressEnd(). + * Estimation doesn't include frame header, as it was already generated by LZ4F_compressBegin(). * Result is always the same for a srcSize and prefsPtr, so it can be trusted to size reusable buffers. * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations. */ @@ -257,36 +265,44 @@ LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* /*! LZ4F_compressUpdate() : * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary. - * An important rule is that dstCapacity MUST be large enough to ensure operation success even in worst case situations. + * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations. * This value is provided by LZ4F_compressBound(). * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode). - * LZ4F_compressUpdate() doesn't guarantee error recovery. When an error occurs, compression context must be freed or resized. + * LZ4F_compressUpdate() doesn't guarantee error recovery. + * When an error occurs, compression context must be freed or resized. * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default. * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered). * or an error code if it fails (which can be tested using LZ4F_isError()) */ -LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const void* srcBuffer, size_t srcSize, const LZ4F_compressOptions_t* cOptPtr); +LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const void* srcBuffer, size_t srcSize, + const LZ4F_compressOptions_t* cOptPtr); /*! LZ4F_flush() : * When data must be generated and sent immediately, without waiting for a block to be completely filled, * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx. * `dstCapacity` must be large enough to ensure the operation will be successful. * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default. - * @return : number of bytes written into dstBuffer (it can be zero, which means there was no data stored within cctx) + * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx) * or an error code if it fails (which can be tested using LZ4F_isError()) */ -LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* cOptPtr); +LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* cOptPtr); /*! LZ4F_compressEnd() : * To properly finish an LZ4 frame, invoke LZ4F_compressEnd(). * It will flush whatever data remained within `cctx` (like LZ4_flush()) * and properly finalize the frame, with an endMark and a checksum. * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default. - * @return : number of bytes written into dstBuffer (necessarily >= 4 (endMark), or 8 if optional frame checksum is enabled) + * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark), * or an error code if it fails (which can be tested using LZ4F_isError()) * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task. */ -LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, void* dstBuffer, size_t dstCapacity, const LZ4F_compressOptions_t* cOptPtr); +LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_compressOptions_t* cOptPtr); /*-********************************* @@ -296,7 +312,7 @@ typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */ typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */ typedef struct { - unsigned stableDst; /* pledge that at least 64KB+64Bytes of previously decompressed data remain unmodifed where it was decoded. This optimization skips storage operations in tmp buffers */ + unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified. This optimization skips storage operations in tmp buffers. */ unsigned reserved[3]; /* must be set to zero for forward compatibility */ } LZ4F_decompressOptions_t; @@ -309,7 +325,7 @@ typedef struct { * The function provides a pointer to an allocated and initialized LZ4F_dctx object. * The result is an errorCode, which can be tested using LZ4F_isError(). * dctx memory can be released using LZ4F_freeDecompressionContext(); - * The result of LZ4F_freeDecompressionContext() is indicative of the current state of decompressionContext when being released. + * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released. * That is, it should be == 0 if decompression has been completed fully and correctly. */ LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version); @@ -350,8 +366,8 @@ LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx, * The function will read up to *srcSizePtr bytes from srcBuffer, * and decompress data into dstBuffer, of capacity *dstSizePtr. * - * The number of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). - * The number of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). + * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value). + * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value). * * The function does not necessarily read all input bytes, so always check value in *srcSizePtr. * Unconsumed source data must be presented again in subsequent invocations. @@ -394,3 +410,123 @@ LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always su #endif #endif /* LZ4F_H_09782039843 */ + +#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) +#define LZ4F_H_STATIC_09782039843 + +#if defined (__cplusplus) +extern "C" { +#endif + +/* These declarations are not stable and may change in the future. They are + * therefore only safe to depend on when the caller is statically linked + * against the library. To access their declarations, define + * LZ4F_STATIC_LINKING_ONLY. + * + * There is a further protection mechanism where these symbols aren't published + * into shared/dynamic libraries. You can override this behavior and force + * them to be published by defining LZ4F_PUBLISH_STATIC_FUNCTIONS. Use at + * your own risk. + */ +#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS +#define LZ4FLIB_STATIC_API LZ4FLIB_API +#else +#define LZ4FLIB_STATIC_API +#endif + + +/* --- Error List --- */ +#define LZ4F_LIST_ERRORS(ITEM) \ + ITEM(OK_NoError) \ + ITEM(ERROR_GENERIC) \ + ITEM(ERROR_maxBlockSize_invalid) \ + ITEM(ERROR_blockMode_invalid) \ + ITEM(ERROR_contentChecksumFlag_invalid) \ + ITEM(ERROR_compressionLevel_invalid) \ + ITEM(ERROR_headerVersion_wrong) \ + ITEM(ERROR_blockChecksum_invalid) \ + ITEM(ERROR_reservedFlag_set) \ + ITEM(ERROR_allocation_failed) \ + ITEM(ERROR_srcSize_tooLarge) \ + ITEM(ERROR_dstMaxSize_tooSmall) \ + ITEM(ERROR_frameHeader_incomplete) \ + ITEM(ERROR_frameType_unknown) \ + ITEM(ERROR_frameSize_wrong) \ + ITEM(ERROR_srcPtr_wrong) \ + ITEM(ERROR_decompressionFailed) \ + ITEM(ERROR_headerChecksum_invalid) \ + ITEM(ERROR_contentChecksum_invalid) \ + ITEM(ERROR_frameDecoding_alreadyStarted) \ + ITEM(ERROR_maxCode) + +#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, + +/* enum list is exposed, to handle specific errors */ +typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes; + +LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); + + + +/********************************** + * Bulk processing dictionary API + *********************************/ +typedef struct LZ4F_CDict_s LZ4F_CDict; + +/*! LZ4_createCDict() : + * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. + * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. + * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */ +LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); +LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict); + + +/*! LZ4_compressFrame_usingCDict() : + * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. + * cctx must point to a context created by LZ4F_createCompressionContext(). + * If cdict==NULL, compress without a dictionary. + * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). + * If this condition is not respected, function will fail (@return an errorCode). + * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, + * but it's not recommended, as it's the only way to provide dictID in the frame header. + * @return : number of bytes written into dstBuffer. + * or an error code if it fails (can be tested using LZ4F_isError()) */ +LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict( + LZ4F_cctx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* preferencesPtr); + + +/*! LZ4F_compressBegin_usingCDict() : + * Inits streaming dictionary compression, and writes the frame header into dstBuffer. + * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. + * `prefsPtr` is optional : you may provide NULL as argument, + * however, it's the only way to provide dictID in the frame header. + * @return : number of bytes written into dstBuffer for the header, + * or an error code (which can be tested using LZ4F_isError()) */ +LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict( + LZ4F_cctx* cctx, + void* dstBuffer, size_t dstCapacity, + const LZ4F_CDict* cdict, + const LZ4F_preferences_t* prefsPtr); + + +/*! LZ4F_decompress_usingDict() : + * Same as LZ4F_decompress(), using a predefined dictionary. + * Dictionary is used "in place", without any preprocessing. + * It must remain accessible throughout the entire frame decoding. */ +LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict( + LZ4F_dctx* dctxPtr, + void* dstBuffer, size_t* dstSizePtr, + const void* srcBuffer, size_t* srcSizePtr, + const void* dict, size_t dictSize, + const LZ4F_decompressOptions_t* decompressOptionsPtr); + +#if defined (__cplusplus) +} +#endif + +#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */ diff --git a/lz4libs/lz4frame_static.h b/lz4libs/lz4frame_static.h index a59b94b5..925a2c5c 100644 --- a/lz4libs/lz4frame_static.h +++ b/lz4libs/lz4frame_static.h @@ -36,119 +36,12 @@ #ifndef LZ4FRAME_STATIC_H_0398209384 #define LZ4FRAME_STATIC_H_0398209384 -#if defined (__cplusplus) -extern "C" { -#endif - -/* lz4frame_static.h should be used solely in the context of static linking. - * It contains definitions which are not stable and may change in the future. - * Never use it in the context of DLL linking. - * - * Defining LZ4F_PUBLISH_STATIC_FUNCTIONS allows one to override this. Use at - * your own risk. +/* The declarations that formerly were made here have been merged into + * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward, + * it is recommended to simply include that header directly. */ -#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS -#define LZ4FLIB_STATIC_API LZ4FLIB_API -#else -#define LZ4FLIB_STATIC_API -#endif - -/* --- Dependency --- */ +#define LZ4F_STATIC_LINKING_ONLY #include "lz4frame.h" - -/* --- Error List --- */ -#define LZ4F_LIST_ERRORS(ITEM) \ - ITEM(OK_NoError) \ - ITEM(ERROR_GENERIC) \ - ITEM(ERROR_maxBlockSize_invalid) \ - ITEM(ERROR_blockMode_invalid) \ - ITEM(ERROR_contentChecksumFlag_invalid) \ - ITEM(ERROR_compressionLevel_invalid) \ - ITEM(ERROR_headerVersion_wrong) \ - ITEM(ERROR_blockChecksum_invalid) \ - ITEM(ERROR_reservedFlag_set) \ - ITEM(ERROR_allocation_failed) \ - ITEM(ERROR_srcSize_tooLarge) \ - ITEM(ERROR_dstMaxSize_tooSmall) \ - ITEM(ERROR_frameHeader_incomplete) \ - ITEM(ERROR_frameType_unknown) \ - ITEM(ERROR_frameSize_wrong) \ - ITEM(ERROR_srcPtr_wrong) \ - ITEM(ERROR_decompressionFailed) \ - ITEM(ERROR_headerChecksum_invalid) \ - ITEM(ERROR_contentChecksum_invalid) \ - ITEM(ERROR_frameDecoding_alreadyStarted) \ - ITEM(ERROR_maxCode) - -#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM, - -/* enum list is exposed, to handle specific errors */ -typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM) } LZ4F_errorCodes; - -LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult); - - - -/********************************** - * Bulk processing dictionary API - *********************************/ -typedef struct LZ4F_CDict_s LZ4F_CDict; - -/*! LZ4_createCDict() : - * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. - * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. - * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. - * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */ -LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize); -LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict); - - -/*! LZ4_compressFrame_usingCDict() : - * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary. - * If cdict==NULL, compress without a dictionary. - * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr). - * If this condition is not respected, function will fail (@return an errorCode). - * The LZ4F_preferences_t structure is optional : you may provide NULL as argument, - * but it's not recommended, as it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer. - * or an error code if it fails (can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict( - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* preferencesPtr); - - -/*! LZ4F_compressBegin_usingCDict() : - * Inits streaming dictionary compression, and writes the frame header into dstBuffer. - * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes. - * `prefsPtr` is optional : you may provide NULL as argument, - * however, it's the only way to provide dictID in the frame header. - * @return : number of bytes written into dstBuffer for the header, - * or an error code (which can be tested using LZ4F_isError()) */ -LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict( - LZ4F_cctx* cctx, - void* dstBuffer, size_t dstCapacity, - const LZ4F_CDict* cdict, - const LZ4F_preferences_t* prefsPtr); - - -/*! LZ4F_decompress_usingDict() : - * Same as LZ4F_decompress(), using a predefined dictionary. - * Dictionary is used "in place", without any preprocessing. - * It must remain accessible throughout the entire frame decoding. */ -LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict( - LZ4F_dctx* dctxPtr, - void* dstBuffer, size_t* dstSizePtr, - const void* srcBuffer, size_t* srcSizePtr, - const void* dict, size_t dictSize, - const LZ4F_decompressOptions_t* decompressOptionsPtr); - - -#if defined (__cplusplus) -} -#endif - #endif /* LZ4FRAME_STATIC_H_0398209384 */ diff --git a/lz4libs/lz4hc.c b/lz4libs/lz4hc.c index f2c2566f..8108ea01 100644 --- a/lz4libs/lz4hc.c +++ b/lz4libs/lz4hc.c @@ -67,6 +67,7 @@ /*=== Constants ===*/ #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) +#define LZ4_OPT_NUM (1<<12) /*=== Macros ===*/ @@ -78,21 +79,33 @@ static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); } +/*=== Enums ===*/ +typedef enum { noDictCtx, usingDictCtx } dictCtx_directive; /************************************** * HC Compression **************************************/ -static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start) +static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4) { MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable)); MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); - hc4->nextToUpdate = 64 KB; - hc4->base = start - 64 KB; +} + +static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start) +{ + uptrval startingOffset = hc4->end - hc4->base; + if (startingOffset > 1 GB) { + LZ4HC_clearTables(hc4); + startingOffset = 0; + } + startingOffset += 64 KB; + hc4->nextToUpdate = (U32) startingOffset; + hc4->base = start - startingOffset; hc4->end = start; - hc4->dictBase = start - 64 KB; - hc4->dictLimit = 64 KB; - hc4->lowLimit = 64 KB; + hc4->dictBase = start - startingOffset; + hc4->dictLimit = (U32) startingOffset; + hc4->lowLimit = (U32) startingOffset; } @@ -123,17 +136,21 @@ LZ4_FORCE_INLINE int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match, const BYTE* const iMin, const BYTE* const mMin) { - int back=0; - while ( (ip+back > iMin) - && (match+back > mMin) - && (ip[back-1] == match[back-1])) + int back = 0; + int const min = (int)MAX(iMin - ip, mMin - match); + assert(min <= 0); + assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31)); + assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31)); + while ( (back > min) + && (ip[back-1] == match[back-1]) ) back--; return back; } /* LZ4HC_countPattern() : * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */ -static unsigned LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) +static unsigned +LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32) { const BYTE* const iStart = ip; reg_t const pattern = (sizeof(pattern)==8) ? (reg_t)pattern32 + (((reg_t)pattern32) << 32) : pattern32; @@ -165,7 +182,8 @@ static unsigned LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 c /* LZ4HC_reverseCountPattern() : * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) * read using natural platform endianess */ -static unsigned LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) +static unsigned +LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern) { const BYTE* const iStart = ip; @@ -182,8 +200,10 @@ static unsigned LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow } typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e; +typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e; -LZ4_FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( +LZ4_FORCE_INLINE int +LZ4HC_InsertAndGetWiderMatch ( LZ4HC_CCtx_internal* hc4, const BYTE* const ip, const BYTE* const iLowLimit, @@ -192,19 +212,26 @@ LZ4_FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( const BYTE** matchpos, const BYTE** startpos, const int maxNbAttempts, - const int patternAnalysis) + const int patternAnalysis, + const int chainSwap, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) { U16* const chainTable = hc4->chainTable; U32* const HashTable = hc4->hashTable; + const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx; const BYTE* const base = hc4->base; const U32 dictLimit = hc4->dictLimit; const BYTE* const lowPrefixPtr = base + dictLimit; - const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - MAX_DISTANCE; + const U32 ipIndex = (U32)(ip - base); + const U32 lowestMatchIndex = (hc4->lowLimit + 64 KB > ipIndex) ? hc4->lowLimit : ipIndex - MAX_DISTANCE; const BYTE* const dictBase = hc4->dictBase; - int const delta = (int)(ip-iLowLimit); + int const lookBackLength = (int)(ip-iLowLimit); int nbAttempts = maxNbAttempts; + int matchChainPos = 0; U32 const pattern = LZ4_read32(ip); U32 matchIndex; + U32 dictMatchIndex; repeat_state_e repeat = rep_untested; size_t srcPatternLength = 0; @@ -212,86 +239,141 @@ LZ4_FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( /* First Match */ LZ4HC_Insert(hc4, ip); matchIndex = HashTable[LZ4HC_hashPtr(ip)]; - DEBUGLOG(7, "First match at index %u / %u (lowLimit)", - matchIndex, lowLimit); + DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)", + matchIndex, lowestMatchIndex); - while ((matchIndex>=lowLimit) && (nbAttempts)) { - DEBUGLOG(7, "remaining attempts : %i", nbAttempts); + while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) { + int matchLength=0; nbAttempts--; - if (matchIndex >= dictLimit) { + assert(matchIndex < ipIndex); + if (favorDecSpeed && (ipIndex - matchIndex < 8)) { + /* do nothing */ + } else if (matchIndex >= dictLimit) { /* within current Prefix */ const BYTE* const matchPtr = base + matchIndex; - if (*(iLowLimit + longest) == *(matchPtr - delta + longest)) { + assert(matchPtr >= lowPrefixPtr); + assert(matchPtr < ip); + assert(longest >= 1); + if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) { if (LZ4_read32(matchPtr) == pattern) { - int mlt = MINMATCH + LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); - #if 0 - /* more generic but unfortunately slower on clang */ - int const back = LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr); - #else - int back = 0; - while ( (ip+back > iLowLimit) - && (matchPtr+back > lowPrefixPtr) - && (ip[back-1] == matchPtr[back-1])) { - back--; - } - #endif - mlt -= back; - - if (mlt > longest) { - longest = mlt; - *matchpos = matchPtr+back; - *startpos = ip+back; - } } - } - } else { /* matchIndex < dictLimit */ + int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0; + matchLength = MINMATCH + LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit); + matchLength -= back; + if (matchLength > longest) { + longest = matchLength; + *matchpos = matchPtr + back; + *startpos = ip + back; + } } } + } else { /* lowestMatchIndex <= matchIndex < dictLimit */ const BYTE* const matchPtr = dictBase + matchIndex; if (LZ4_read32(matchPtr) == pattern) { - int mlt; + const BYTE* const dictStart = dictBase + hc4->lowLimit; int back = 0; const BYTE* vLimit = ip + (dictLimit - matchIndex); if (vLimit > iHighLimit) vLimit = iHighLimit; - mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; - if ((ip+mlt == vLimit) && (vLimit < iHighLimit)) - mlt += LZ4_count(ip+mlt, base+dictLimit, iHighLimit); - while ( (ip+back > iLowLimit) - && (matchIndex+back > lowLimit) - && (ip[back-1] == matchPtr[back-1])) - back--; - mlt -= back; - if (mlt > longest) { - longest = mlt; - *matchpos = base + matchIndex + back; + matchLength = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; + if ((ip+matchLength == vLimit) && (vLimit < iHighLimit)) + matchLength += LZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit); + back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0; + matchLength -= back; + if (matchLength > longest) { + longest = matchLength; + *matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */ *startpos = ip + back; } } } - { U32 const nextOffset = DELTANEXTU16(chainTable, matchIndex); - matchIndex -= nextOffset; - if (patternAnalysis && nextOffset==1) { + if (chainSwap && matchLength==longest) { /* better match => select a better chain */ + assert(lookBackLength==0); /* search forward only */ + if (matchIndex + longest <= ipIndex) { + U32 distanceToNextMatch = 1; + int pos; + for (pos = 0; pos <= longest - MINMATCH; pos++) { + U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + pos); + if (candidateDist > distanceToNextMatch) { + distanceToNextMatch = candidateDist; + matchChainPos = pos; + } } + if (distanceToNextMatch > 1) { + if (distanceToNextMatch > matchIndex) break; /* avoid overflow */ + matchIndex -= distanceToNextMatch; + continue; + } } } + + { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex); + if (patternAnalysis && distNextMatch==1 && matchChainPos==0) { + U32 const matchCandidateIdx = matchIndex-1; /* may be a repeated pattern */ if (repeat == rep_untested) { if ( ((pattern & 0xFFFF) == (pattern >> 16)) & ((pattern & 0xFF) == (pattern >> 24)) ) { repeat = rep_confirmed; - srcPatternLength = LZ4HC_countPattern(ip+4, iHighLimit, pattern) + 4; + srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); } else { repeat = rep_not; } } if ( (repeat == rep_confirmed) - && (matchIndex >= dictLimit) ) { /* same segment only */ - const BYTE* const matchPtr = base + matchIndex; + && (matchCandidateIdx >= dictLimit) ) { /* same segment only */ + const BYTE* const matchPtr = base + matchCandidateIdx; if (LZ4_read32(matchPtr) == pattern) { /* good candidate */ size_t const forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern); - const BYTE* const maxLowPtr = (lowPrefixPtr + MAX_DISTANCE >= ip) ? lowPrefixPtr : ip - MAX_DISTANCE; - size_t const backLength = LZ4HC_reverseCountPattern(matchPtr, maxLowPtr, pattern); + const BYTE* const lowestMatchPtr = (lowPrefixPtr + MAX_DISTANCE >= ip) ? lowPrefixPtr : ip - MAX_DISTANCE; + size_t const backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern); size_t const currentSegmentLength = backLength + forwardPatternLength; if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */ && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */ - matchIndex += (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ + matchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */ } else { - matchIndex -= (U32)backLength; /* let's go to farthest segment position, will find a match of length currentSegmentLength + maybe some back */ - } - } } } } - } /* while ((matchIndex>=lowLimit) && (nbAttempts)) */ + matchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */ + if (lookBackLength==0) { /* no back possible */ + size_t const maxML = MIN(currentSegmentLength, srcPatternLength); + if ((size_t)longest < maxML) { + assert(maxML < 2 GB); + longest = (int)maxML; + *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */ + *startpos = ip; + } + { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex); + if (distToNextPattern > matchIndex) break; /* avoid overflow */ + matchIndex -= distToNextPattern; + } } } + continue; + } } + } } /* PA optimization */ + + /* follow current chain */ + matchIndex -= DELTANEXTU16(chainTable, matchIndex+matchChainPos); + + } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */ + + if (dict == usingDictCtx && nbAttempts && ipIndex - lowestMatchIndex < MAX_DISTANCE) { + size_t const dictEndOffset = dictCtx->end - dictCtx->base; + assert(dictEndOffset <= 1 GB); + dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)]; + matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset; + while (ipIndex - matchIndex <= MAX_DISTANCE && nbAttempts--) { + const BYTE* const matchPtr = dictCtx->base + dictMatchIndex; + + if (LZ4_read32(matchPtr) == pattern) { + int mlt; + int back = 0; + const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex); + if (vLimit > iHighLimit) vLimit = iHighLimit; + mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH; + back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0; + mlt -= back; + if (mlt > longest) { + longest = mlt; + *matchpos = base + matchIndex + back; + *startpos = ip + back; + } + } + + { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex); + dictMatchIndex -= nextOffset; + matchIndex -= nextOffset; + } + } + } return longest; } @@ -301,13 +383,14 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl const BYTE* const ip, const BYTE* const iLimit, const BYTE** matchpos, const int maxNbAttempts, - const int patternAnalysis) + const int patternAnalysis, + const dictCtx_directive dict) { const BYTE* uselessPtr = ip; /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), * but this won't be the case here, as we define iLowLimit==ip, * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ - return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis); + return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio); } @@ -333,7 +416,7 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( size_t length; BYTE* const token = (*op)++; -#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2) +#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6) static const BYTE* start = NULL; static U32 totalCost = 0; U32 const pos = (start==NULL) ? 0 : (U32)(*anchor - start); @@ -342,8 +425,8 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0; U32 const cost = 1 + llAdd + ll + 2 + mlAdd; if (start==NULL) start = *anchor; /* only works for single segment */ - //g_debuglog_enable = (pos >= 2228) & (pos <= 2262); - DEBUGLOG(2, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u", + /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */ + DEBUGLOG(6, "pos:%7u -- literals:%3u, match:%4i, offset:%5u, cost:%3u + %u", pos, (U32)(*ip - *anchor), matchLength, (U32)(*ip-match), cost, totalCost); @@ -390,22 +473,19 @@ LZ4_FORCE_INLINE int LZ4HC_encodeSequence ( return 0; } -/* btopt */ -#include "lz4opt.h" - - -static int LZ4HC_compress_hashChain ( +LZ4_FORCE_INLINE int LZ4HC_compress_hashChain ( LZ4HC_CCtx_internal* const ctx, const char* const source, char* const dest, int* srcSizePtr, int const maxOutputSize, unsigned maxNbAttempts, - limitedOutput_directive limit + const limitedOutput_directive limit, + const dictCtx_directive dict ) { const int inputSize = *srcSizePtr; - const int patternAnalysis = (maxNbAttempts > 64); /* levels 8+ */ + const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */ const BYTE* ip = (const BYTE*) source; const BYTE* anchor = ip; @@ -417,14 +497,14 @@ static int LZ4HC_compress_hashChain ( BYTE* op = (BYTE*) dest; BYTE* oend = op + maxOutputSize; - int ml, ml2, ml3, ml0; + int ml0, ml, ml2, ml3; + const BYTE* start0; + const BYTE* ref0; const BYTE* ref = NULL; const BYTE* start2 = NULL; const BYTE* ref2 = NULL; const BYTE* start3 = NULL; const BYTE* ref3 = NULL; - const BYTE* start0; - const BYTE* ref0; /* init */ *srcSizePtr = 0; @@ -432,36 +512,32 @@ static int LZ4HC_compress_hashChain ( if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ /* Main Loop */ - while (ip < mflimit) { - ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis); + while (ip <= mflimit) { + ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict); if (ml encode ML1 */ optr = op; if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow; continue; } - if (start0 < ip) { - if (start2 < ip + ml0) { /* empirical */ - ip = start0; - ref = ref0; - ml = ml0; - } - } + if (start0 < ip) { /* first match was skipped at least once */ + if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */ + ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */ + } } /* Here, start0==ip */ if ((start2 - ip) < 3) { /* First Match too small : removed */ @@ -489,14 +565,15 @@ static int LZ4HC_compress_hashChain ( } /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */ - if (start2 + ml2 < mflimit) + if (start2 + ml2 <= mflimit) { ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, - maxNbAttempts, patternAnalysis); - else + maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio); + } else { ml3 = ml2; + } - if (ml3 == ml2) { /* No better match : 2 sequences to encode */ + if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */ /* ip & ref are known; Now for ml */ if (start2 < ip+ml) ml = (int)(start2 - ip); /* Now, encode 2 sequences */ @@ -541,11 +618,12 @@ static int LZ4HC_compress_hashChain ( } /* - * OK, now we have 3 ascending matches; let's write at least the first one - * ip & ref are known; Now for ml + * OK, now we have 3 ascending matches; + * let's write the first one ML1. + * ip & ref are known; Now decide ml. */ if (start2 < ip+ml) { - if ((start2 - ip) < (int)ML_MASK) { + if ((start2 - ip) < OPTIMAL_ML) { int correction; if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; @@ -562,14 +640,13 @@ static int LZ4HC_compress_hashChain ( optr = op; if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) goto _dest_overflow; - ip = start2; - ref = ref2; - ml = ml2; + /* ML2 becomes ML1 */ + ip = start2; ref = ref2; ml = ml2; - start2 = start3; - ref2 = ref3; - ml2 = ml3; + /* ML3 becomes ML2 */ + start2 = start3; ref2 = ref3; ml2 = ml3; + /* let's find a new ML3 */ goto _Search3; } @@ -613,14 +690,24 @@ static int LZ4HC_compress_hashChain ( } -static int LZ4HC_compress_generic ( +static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx, + const char* const source, char* dst, + int* srcSizePtr, int dstCapacity, + int const nbSearches, size_t sufficient_len, + const limitedOutput_directive limit, int const fullUpdate, + const dictCtx_directive dict, + HCfavor_e favorDecSpeed); + + +LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal ( LZ4HC_CCtx_internal* const ctx, const char* const src, char* const dst, int* const srcSizePtr, int const dstCapacity, int cLevel, - limitedOutput_directive limit + const limitedOutput_directive limit, + const dictCtx_directive dict ) { typedef enum { lz4hc, lz4opt } lz4hc_strat_e; @@ -630,49 +717,110 @@ static int LZ4HC_compress_generic ( U32 targetLength; } cParams_t; static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = { - { lz4hc, 2, 16 }, /* 0, unused */ - { lz4hc, 2, 16 }, /* 1, unused */ - { lz4hc, 2, 16 }, /* 2, unused */ - { lz4hc, 4, 16 }, /* 3 */ - { lz4hc, 8, 16 }, /* 4 */ - { lz4hc, 16, 16 }, /* 5 */ - { lz4hc, 32, 16 }, /* 6 */ - { lz4hc, 64, 16 }, /* 7 */ - { lz4hc, 128, 16 }, /* 8 */ - { lz4hc, 256, 16 }, /* 9 */ - { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/ - { lz4opt, 512,128 }, /*11 */ - { lz4opt,8192, LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */ + { lz4hc, 2, 16 }, /* 0, unused */ + { lz4hc, 2, 16 }, /* 1, unused */ + { lz4hc, 2, 16 }, /* 2, unused */ + { lz4hc, 4, 16 }, /* 3 */ + { lz4hc, 8, 16 }, /* 4 */ + { lz4hc, 16, 16 }, /* 5 */ + { lz4hc, 32, 16 }, /* 6 */ + { lz4hc, 64, 16 }, /* 7 */ + { lz4hc, 128, 16 }, /* 8 */ + { lz4hc, 256, 16 }, /* 9 */ + { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/ + { lz4opt, 512,128 }, /*11 */ + { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */ }; + DEBUGLOG(4, "LZ4HC_compress_generic(%p, %p, %d)", ctx, src, *srcSizePtr); + if (limit == limitedDestSize && dstCapacity < 1) return 0; /* Impossible to store anything */ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */ ctx->end += *srcSizePtr; if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */ cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel); - assert(cLevel >= 0); - assert(cLevel <= LZ4HC_CLEVEL_MAX); { cParams_t const cParam = clTable[cLevel]; + HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio; if (cParam.strat == lz4hc) return LZ4HC_compress_hashChain(ctx, src, dst, srcSizePtr, dstCapacity, - cParam.nbSearches, limit); + cParam.nbSearches, limit, dict); assert(cParam.strat == lz4opt); return LZ4HC_compress_optimal(ctx, src, dst, srcSizePtr, dstCapacity, cParam.nbSearches, cParam.targetLength, limit, - cLevel == LZ4HC_CLEVEL_MAX); /* ultra mode */ + cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */ + dict, favor); + } +} + +static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock); + +static int LZ4HC_compress_generic_noDictCtx ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + assert(ctx->dictCtx == NULL); + return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx); +} + +static int LZ4HC_compress_generic_dictCtx ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + const size_t position = ctx->end - ctx->base - ctx->lowLimit; + assert(ctx->dictCtx != NULL); + if (position >= 64 KB) { + ctx->dictCtx = NULL; + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else if (position == 0 && *srcSizePtr > 4 KB) { + memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal)); + LZ4HC_setExternalDict(ctx, (const BYTE *)src); + ctx->compressionLevel = (short)cLevel; + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else { + return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtx); + } +} + +static int LZ4HC_compress_generic ( + LZ4HC_CCtx_internal* const ctx, + const char* const src, + char* const dst, + int* const srcSizePtr, + int const dstCapacity, + int cLevel, + limitedOutput_directive limit + ) +{ + if (ctx->dictCtx == NULL) { + return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); + } else { + return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit); } } int LZ4_sizeofStateHC(void) { return sizeof(LZ4_streamHC_t); } -int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) +int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) { LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse; if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */ + LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel); LZ4HC_init (ctx, (const BYTE*)src); if (dstCapacity < LZ4_compressBound(srcSize)) return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput); @@ -680,10 +828,17 @@ int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int src return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, noLimit); } +int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) +{ + if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */ + LZ4_resetStreamHC ((LZ4_streamHC_t*)state, compressionLevel); + return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel); +} + int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel) { #if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1 - LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t)); + LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t)); #else LZ4_streamHC_t state; LZ4_streamHC_t* const statePtr = &state; @@ -700,6 +855,7 @@ int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, in int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel) { LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; + LZ4_resetStreamHC((LZ4_streamHC_t*)LZ4HC_Data, cLevel); LZ4HC_init(ctx, (const BYTE*) source); return LZ4HC_compress_generic(ctx, source, dest, sourceSizePtr, targetDestSize, cLevel, limitedDestSize); } @@ -710,8 +866,15 @@ int LZ4_compress_HC_destSize(void* LZ4HC_Data, const char* source, char* dest, i * Streaming Functions **************************************/ /* allocation */ -LZ4_streamHC_t* LZ4_createStreamHC(void) { return (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t)); } -int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) { +LZ4_streamHC_t* LZ4_createStreamHC(void) { + LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t)); + if (LZ4_streamHCPtr==NULL) return NULL; + LZ4_resetStreamHC(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT); + return LZ4_streamHCPtr; +} + +int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) { + DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr); if (!LZ4_streamHCPtr) return 0; /* support free on NULL */ free(LZ4_streamHCPtr); return 0; @@ -722,36 +885,61 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) { void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) { LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= sizeof(size_t) * LZ4_STREAMHCSIZE_SIZET); /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */ + DEBUGLOG(4, "LZ4_resetStreamHC(%p, %d)", LZ4_streamHCPtr, compressionLevel); + LZ4_streamHCPtr->internal_donotuse.end = (const BYTE *)(ptrdiff_t)-1; LZ4_streamHCPtr->internal_donotuse.base = NULL; + LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL; + LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = 0; + LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); +} + +void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) +{ + DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel); + LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.base; + LZ4_streamHCPtr->internal_donotuse.base = NULL; + LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL; LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel); } void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel) { - if (compressionLevel < 1) compressionLevel = 1; + if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT; if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX; - LZ4_streamHCPtr->internal_donotuse.compressionLevel = compressionLevel; + LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel; +} + +void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor) +{ + LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0); } int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int dictSize) { LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + DEBUGLOG(4, "LZ4_loadDictHC(%p, %p, %d)", LZ4_streamHCPtr, dictionary, dictSize); if (dictSize > 64 KB) { dictionary += dictSize - 64 KB; dictSize = 64 KB; } + LZ4_resetStreamHC(LZ4_streamHCPtr, ctxPtr->compressionLevel); LZ4HC_init (ctxPtr, (const BYTE*)dictionary); ctxPtr->end = (const BYTE*)dictionary + dictSize; if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); return dictSize; } +void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) { + working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL; +} /* compression */ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock) { - if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ + DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock); + if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4) + LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ ctxPtr->lowLimit = ctxPtr->dictLimit; @@ -768,6 +956,7 @@ static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr, limitedOutput_directive limit) { LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse; + DEBUGLOG(4, "LZ4_compressHC_continue_generic(%p, %p, %d)", LZ4_streamHCPtr, src, *srcSizePtr); /* auto-init if forgotten */ if (ctxPtr->base == NULL) LZ4HC_init (ctxPtr, (const BYTE*) src); @@ -816,6 +1005,7 @@ int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictS { LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse; int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit)); + DEBUGLOG(4, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize); if (dictSize > 64 KB) dictSize = 64 KB; if (dictSize < 4) dictSize = 0; if (dictSize > prefixSize) dictSize = prefixSize; @@ -855,17 +1045,17 @@ int LZ4_resetStreamStateHC(void* state, char* inputBuffer) { LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t*)state)->internal_donotuse; if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; /* Error : pointer is not aligned for pointer (32 or 64 bits) */ + LZ4_resetStreamHC((LZ4_streamHC_t*)state, ((LZ4_streamHC_t*)state)->internal_donotuse.compressionLevel); LZ4HC_init(ctx, (const BYTE*)inputBuffer); - ctx->inputBuffer = (BYTE*)inputBuffer; return 0; } -void* LZ4_createHC (char* inputBuffer) +void* LZ4_createHC (const char* inputBuffer) { - LZ4_streamHC_t* hc4 = (LZ4_streamHC_t*)ALLOCATOR(1, sizeof(LZ4_streamHC_t)); + LZ4_streamHC_t* hc4 = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t)); if (hc4 == NULL) return NULL; /* not enough memory */ + LZ4_resetStreamHC(hc4, 0 /* compressionLevel */); LZ4HC_init (&hc4->internal_donotuse, (const BYTE*)inputBuffer); - hc4->internal_donotuse.inputBuffer = (BYTE*)inputBuffer; return hc4; } @@ -887,7 +1077,333 @@ int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, c char* LZ4_slideInputBufferHC(void* LZ4HC_Data) { - LZ4HC_CCtx_internal* const hc4 = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse; - int const dictSize = LZ4_saveDictHC((LZ4_streamHC_t*)LZ4HC_Data, (char*)(hc4->inputBuffer), 64 KB); - return (char*)(hc4->inputBuffer + dictSize); + LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data; + const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit; + LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel); + /* avoid const char * -> char * conversion warning :( */ + return (char *)(uptrval)bufferStart; +} + + +/* ================================================ + * LZ4 Optimal parser (levels 10-12) + * ===============================================*/ +typedef struct { + int price; + int off; + int mlen; + int litlen; +} LZ4HC_optimal_t; + +/* price in bytes */ +LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) +{ + int price = litlen; + if (litlen >= (int)RUN_MASK) + price += 1 + (litlen-RUN_MASK)/255; + return price; +} + + +/* requires mlen >= MINMATCH */ +LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) +{ + int price = 1 + 2 ; /* token + 16-bit offset */ + + price += LZ4HC_literalsPrice(litlen); + + if (mlen >= (int)(ML_MASK+MINMATCH)) + price += 1 + (mlen-(ML_MASK+MINMATCH))/255; + + return price; +} + + +typedef struct { + int off; + int len; +} LZ4HC_match_t; + +LZ4_FORCE_INLINE LZ4HC_match_t +LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, + const BYTE* ip, const BYTE* const iHighLimit, + int minLen, int nbSearches, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) +{ + LZ4HC_match_t match = { 0 , 0 }; + const BYTE* matchPtr = NULL; + /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), + * but this won't be the case here, as we define iLowLimit==ip, + * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ + int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed); + if (matchLength <= minLen) return match; + if (favorDecSpeed) { + if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */ + } + match.len = matchLength; + match.off = (int)(ip-matchPtr); + return match; } + + +static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx, + const char* const source, + char* dst, + int* srcSizePtr, + int dstCapacity, + int const nbSearches, + size_t sufficient_len, + const limitedOutput_directive limit, + int const fullUpdate, + const dictCtx_directive dict, + const HCfavor_e favorDecSpeed) +{ +#define TRAILING_LITERALS 3 + LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */ + + const BYTE* ip = (const BYTE*) source; + const BYTE* anchor = ip; + const BYTE* const iend = ip + *srcSizePtr; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + BYTE* op = (BYTE*) dst; + BYTE* opSaved = (BYTE*) dst; + BYTE* oend = op + dstCapacity; + + /* init */ + DEBUGLOG(5, "LZ4HC_compress_optimal"); + *srcSizePtr = 0; + if (limit == limitedDestSize) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ + if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; + + /* Main Loop */ + assert(ip - anchor < LZ4_MAX_INPUT_SIZE); + while (ip <= mflimit) { + int const llen = (int)(ip - anchor); + int best_mlen, best_off; + int cur, last_match_pos = 0; + + LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); + if (firstMatch.len==0) { ip++; continue; } + + if ((size_t)firstMatch.len > sufficient_len) { + /* good enough solution : immediate encoding */ + int const firstML = firstMatch.len; + const BYTE* const matchPos = ip - firstMatch.off; + opSaved = op; + if ( LZ4HC_encodeSequence(&ip, &op, &anchor, firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */ + goto _dest_overflow; + continue; + } + + /* set prices for first positions (literals) */ + { int rPos; + for (rPos = 0 ; rPos < MINMATCH ; rPos++) { + int const cost = LZ4HC_literalsPrice(llen + rPos); + opt[rPos].mlen = 1; + opt[rPos].off = 0; + opt[rPos].litlen = llen + rPos; + opt[rPos].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + rPos, cost, opt[rPos].litlen); + } } + /* set prices using initial match */ + { int mlen = MINMATCH; + int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ + int const offset = firstMatch.off; + assert(matchML < LZ4_OPT_NUM); + for ( ; mlen <= matchML ; mlen++) { + int const cost = LZ4HC_sequencePrice(llen, mlen); + opt[mlen].mlen = mlen; + opt[mlen].off = offset; + opt[mlen].litlen = llen; + opt[mlen].price = cost; + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", + mlen, cost, mlen); + } } + last_match_pos = firstMatch.len; + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", + last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + + /* check further positions */ + for (cur = 1; cur < last_match_pos; cur++) { + const BYTE* const curPtr = ip + cur; + LZ4HC_match_t newMatch; + + if (curPtr > mflimit) break; + DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", + cur, opt[cur].price, opt[cur+1].price, cur+1); + if (fullUpdate) { + /* not useful to search here if next position has same (or lower) cost */ + if ( (opt[cur+1].price <= opt[cur].price) + /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ + && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) + continue; + } else { + /* not useful to search here if next position has same (or lower) cost */ + if (opt[cur+1].price <= opt[cur].price) continue; + } + + DEBUGLOG(7, "search at rPos:%u", cur); + if (fullUpdate) + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed); + else + /* only test matches of minimum length; slightly faster, but misses a few bytes */ + newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed); + if (!newMatch.len) continue; + + if ( ((size_t)newMatch.len > sufficient_len) + || (newMatch.len + cur >= LZ4_OPT_NUM) ) { + /* immediate encoding */ + best_mlen = newMatch.len; + best_off = newMatch.off; + last_match_pos = cur + 1; + goto encode; + } + + /* before match : set price with literals at beginning */ + { int const baseLitlen = opt[cur].litlen; + int litlen; + for (litlen = 1; litlen < MINMATCH; litlen++) { + int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); + int const pos = cur + litlen; + if (price < opt[pos].price) { + opt[pos].mlen = 1; /* literal */ + opt[pos].off = 0; + opt[pos].litlen = baseLitlen+litlen; + opt[pos].price = price; + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", + pos, price, opt[pos].litlen); + } } } + + /* set prices using match at position = cur */ + { int const matchML = newMatch.len; + int ml = MINMATCH; + + assert(cur + newMatch.len < LZ4_OPT_NUM); + for ( ; ml <= matchML ; ml++) { + int const pos = cur + ml; + int const offset = newMatch.off; + int price; + int ll; + DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", + pos, last_match_pos); + if (opt[cur].mlen == 1) { + ll = opt[cur].litlen; + price = ((cur > ll) ? opt[cur - ll].price : 0) + + LZ4HC_sequencePrice(ll, ml); + } else { + ll = 0; + price = opt[cur].price + LZ4HC_sequencePrice(0, ml); + } + + assert((U32)favorDecSpeed <= 1); + if (pos > last_match_pos+TRAILING_LITERALS + || price <= opt[pos].price - (int)favorDecSpeed) { + DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", + pos, price, ml); + assert(pos < LZ4_OPT_NUM); + if ( (ml == matchML) /* last pos of last match */ + && (last_match_pos < pos) ) + last_match_pos = pos; + opt[pos].mlen = ml; + opt[pos].off = offset; + opt[pos].litlen = ll; + opt[pos].price = price; + } } } + /* complete following positions with literals */ + { int addLit; + for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { + opt[last_match_pos+addLit].mlen = 1; /* literal */ + opt[last_match_pos+addLit].off = 0; + opt[last_match_pos+addLit].litlen = addLit; + opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); + DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); + } } + } /* for (cur = 1; cur <= last_match_pos; cur++) */ + + best_mlen = opt[last_match_pos].mlen; + best_off = opt[last_match_pos].off; + cur = last_match_pos - best_mlen; + + encode: /* cur, last_match_pos, best_mlen, best_off must be set */ + assert(cur < LZ4_OPT_NUM); + assert(last_match_pos >= 1); /* == 1 when only one candidate */ + DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos); + { int candidate_pos = cur; + int selected_matchLength = best_mlen; + int selected_offset = best_off; + while (1) { /* from end to beginning */ + int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ + int const next_offset = opt[candidate_pos].off; + DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength); + opt[candidate_pos].mlen = selected_matchLength; + opt[candidate_pos].off = selected_offset; + selected_matchLength = next_matchLength; + selected_offset = next_offset; + if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ + assert(next_matchLength > 0); /* can be 1, means literal */ + candidate_pos -= next_matchLength; + } } + + /* encode all recorded sequences in order */ + { int rPos = 0; /* relative position (to ip) */ + while (rPos < last_match_pos) { + int const ml = opt[rPos].mlen; + int const offset = opt[rPos].off; + if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ + rPos += ml; + assert(ml >= MINMATCH); + assert((offset >= 1) && (offset <= MAX_DISTANCE)); + opSaved = op; + if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */ + goto _dest_overflow; + } } + } /* while (ip <= mflimit) */ + + _last_literals: + /* Encode Last Literals */ + { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ + size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255; + size_t const totalSize = 1 + litLength + lastRunSize; + if (limit == limitedDestSize) oend += LASTLITERALS; /* restore correct value */ + if (limit && (op + totalSize > oend)) { + if (limit == limitedOutput) return 0; /* Check output limit */ + /* adapt lastRunSize to fill 'dst' */ + lastRunSize = (size_t)(oend - op) - 1; + litLength = (lastRunSize + 255 - RUN_MASK) / 255; + lastRunSize -= litLength; + } + ip = anchor + lastRunSize; + + if (lastRunSize >= RUN_MASK) { + size_t accumulator = lastRunSize - RUN_MASK; + *op++ = (RUN_MASK << ML_BITS); + for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRunSize << ML_BITS); + } + memcpy(op, anchor, lastRunSize); + op += lastRunSize; + } + + /* End */ + *srcSizePtr = (int) (((const char*)ip) - source); + return (int) ((char*)op-dst); + + _dest_overflow: + if (limit == limitedDestSize) { + op = opSaved; /* restore correct out pointer */ + goto _last_literals; + } + return 0; + } diff --git a/lz4libs/lz4hc.h b/lz4libs/lz4hc.h index d41bf420..bb5e0737 100644 --- a/lz4libs/lz4hc.h +++ b/lz4libs/lz4hc.h @@ -141,35 +141,39 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) #include -typedef struct +typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal; +struct LZ4HC_CCtx_internal { uint32_t hashTable[LZ4HC_HASHTABLESIZE]; uint16_t chainTable[LZ4HC_MAXD]; const uint8_t* end; /* next block here to continue on current prefix */ const uint8_t* base; /* All index relative to this position */ const uint8_t* dictBase; /* alternate base for extDict */ - uint8_t* inputBuffer; /* deprecated */ uint32_t dictLimit; /* below that point, need extDict */ uint32_t lowLimit; /* below that point, no more dict */ uint32_t nextToUpdate; /* index from which to continue dictionary update */ - int compressionLevel; -} LZ4HC_CCtx_internal; + short compressionLevel; + short favorDecSpeed; + const LZ4HC_CCtx_internal* dictCtx; +}; #else -typedef struct +typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal; +struct LZ4HC_CCtx_internal { unsigned int hashTable[LZ4HC_HASHTABLESIZE]; unsigned short chainTable[LZ4HC_MAXD]; const unsigned char* end; /* next block here to continue on current prefix */ const unsigned char* base; /* All index relative to this position */ const unsigned char* dictBase; /* alternate base for extDict */ - unsigned char* inputBuffer; /* deprecated */ unsigned int dictLimit; /* below that point, need extDict */ unsigned int lowLimit; /* below that point, no more dict */ unsigned int nextToUpdate; /* index from which to continue dictionary update */ - int compressionLevel; -} LZ4HC_CCtx_internal; + short compressionLevel; + short favorDecSpeed; + const LZ4HC_CCtx_internal* dictCtx; +}; #endif @@ -195,25 +199,32 @@ union LZ4_streamHC_u { /* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */ /* deprecated compression functions */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC (const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC() instead") int LZ4_compressHC2_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize); - -/* Deprecated Streaming functions using older model; should no longer be used */ -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStreamHC() instead") void* LZ4_createHC (char* inputBuffer); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_saveDictHC() instead") char* LZ4_slideInputBufferHC (void* LZ4HC_Data); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") int LZ4_freeHC (void* LZ4HC_Data); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_createStreamHC() instead") int LZ4_sizeofStreamStateHC(void); -LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") int LZ4_resetStreamStateHC(void* state, char* inputBuffer); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/* Obsolete streaming functions; degraded functionality; do not use! + * + * In order to perform streaming compression, these functions depended on data + * that is no longer tracked in the state. They have been preserved as well as + * possible: using them will still produce a correct output. However, use of + * LZ4_slideInputBufferHC() will truncate the history of the stream, rather + * than preserve a window-sized chunk of history. + */ +LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer); +LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data); +LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); +LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void); +LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer); #if defined (__cplusplus) @@ -244,9 +255,9 @@ LZ4LIB_API LZ4_DEPRECATED("use LZ4_resetStreamHC() instead") int LZ4_resetStr * `srcSizePtr` : value will be updated to indicate how much bytes were read from `src` */ int LZ4_compress_HC_destSize(void* LZ4HC_Data, - const char* src, char* dst, - int* srcSizePtr, int targetDstSize, - int compressionLevel); + const char* src, char* dst, + int* srcSizePtr, int targetDstSize, + int compressionLevel); /*! LZ4_compress_HC_continue_destSize() : v1.8.0 (experimental) * Similar as LZ4_compress_HC_continue(), @@ -266,7 +277,71 @@ int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr, */ void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); - +/*! LZ4_favorDecompressionSpeed() : v1.8.2 (experimental) + * Parser will select decisions favoring decompression over compression ratio. + * Only work at highest compression settings (level >= LZ4HC_CLEVEL_OPT_MIN) + */ +void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor); + +/*! LZ4_resetStreamHC_fast() : + * When an LZ4_streamHC_t is known to be in a internally coherent state, + * it can often be prepared for a new compression with almost no work, only + * sometimes falling back to the full, expensive reset that is always required + * when the stream is in an indeterminate state (i.e., the reset performed by + * LZ4_resetStreamHC()). + * + * LZ4_streamHCs are guaranteed to be in a valid state when: + * - returned from LZ4_createStreamHC() + * - reset by LZ4_resetStreamHC() + * - memset(stream, 0, sizeof(LZ4_streamHC_t)) + * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast() + * - the stream was in a valid state and was then used in any compression call + * that returned success + * - the stream was in an indeterminate state and was used in a compression + * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that + * returned success + */ +void LZ4_resetStreamHC_fast(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); + +/*! LZ4_compress_HC_extStateHC_fastReset() : + * A variant of LZ4_compress_HC_extStateHC(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStreamHC_fast() for a definition of + * "correctly initialized"). From a high level, the difference is that this + * function initializes the provided state with a call to + * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a + * call to LZ4_resetStreamHC(). + */ +int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel); + +/*! LZ4_attach_HC_dictionary() : + * This is an experimental API that allows for the efficient use of a + * static dictionary many times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a + * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism, + * in which the working stream references the dictionary stream in-place. + * + * Several assumptions are made about the state of the dictionary stream. + * Currently, only streams which have been prepared by LZ4_loadDictHC() should + * be expected to work. + * + * Alternatively, the provided dictionary stream pointer may be NULL, in which + * case any existing dictionary stream is unset. + * + * A dictionary should only be attached to a stream without any history (i.e., + * a stream that has just been reset). + * + * The dictionary will remain attached to the working stream only for the + * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the + * dictionary context association from the working stream. The dictionary + * stream (and source buffer) must remain in-place / accessible / unchanged + * through the lifetime of the stream session. + */ +LZ4LIB_API void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream); #endif /* LZ4_HC_SLO_098092834 */ #endif /* LZ4_HC_STATIC_LINKING_ONLY */ diff --git a/lz4libs/lz4opt.h b/lz4libs/lz4opt.h deleted file mode 100644 index 5a8438c1..00000000 --- a/lz4libs/lz4opt.h +++ /dev/null @@ -1,356 +0,0 @@ -/* - lz4opt.h - Optimal Mode of LZ4 - Copyright (C) 2015-2017, Przemyslaw Skibinski - Note : this file is intended to be included within lz4hc.c - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - LZ4 source repository : https://github.com/lz4/lz4 - - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c -*/ - -#define LZ4_OPT_NUM (1<<12) - -typedef struct { - int price; - int off; - int mlen; - int litlen; -} LZ4HC_optimal_t; - - -/* price in bytes */ -LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen) -{ - int price = litlen; - if (litlen >= (int)RUN_MASK) - price += 1 + (litlen-RUN_MASK)/255; - return price; -} - - -/* requires mlen >= MINMATCH */ -LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen) -{ - int price = 1 + 2 ; /* token + 16-bit offset */ - - price += LZ4HC_literalsPrice(litlen); - - if (mlen >= (int)(ML_MASK+MINMATCH)) - price += 1 + (mlen-(ML_MASK+MINMATCH))/255; - - return price; -} - - -/*-************************************* -* Match finder -***************************************/ -typedef struct { - int off; - int len; -} LZ4HC_match_t; - -LZ4_FORCE_INLINE -LZ4HC_match_t LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx, - const BYTE* ip, const BYTE* const iHighLimit, - int minLen, int nbSearches) -{ - LZ4HC_match_t match = { 0 , 0 }; - const BYTE* matchPtr = NULL; - /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos), - * but this won't be the case here, as we define iLowLimit==ip, - * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */ - int const matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, - ip, ip, iHighLimit, minLen, &matchPtr, &ip, - nbSearches, 1 /* patternAnalysis */); - if (matchLength <= minLen) return match; - match.len = matchLength; - match.off = (int)(ip-matchPtr); - return match; -} - - -static int LZ4HC_compress_optimal ( - LZ4HC_CCtx_internal* ctx, - const char* const source, - char* dst, - int* srcSizePtr, - int dstCapacity, - int const nbSearches, - size_t sufficient_len, - limitedOutput_directive limit, - int const fullUpdate - ) -{ -#define TRAILING_LITERALS 3 - LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* this uses a bit too much stack memory to my taste ... */ - - const BYTE* ip = (const BYTE*) source; - const BYTE* anchor = ip; - const BYTE* const iend = ip + *srcSizePtr; - const BYTE* const mflimit = iend - MFLIMIT; - const BYTE* const matchlimit = iend - LASTLITERALS; - BYTE* op = (BYTE*) dst; - BYTE* opSaved = (BYTE*) dst; - BYTE* oend = op + dstCapacity; - - /* init */ - DEBUGLOG(5, "LZ4HC_compress_optimal"); - *srcSizePtr = 0; - if (limit == limitedDestSize) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */ - if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1; - - /* Main Loop */ - assert(ip - anchor < LZ4_MAX_INPUT_SIZE); - while (ip < mflimit) { - int const llen = (int)(ip - anchor); - int best_mlen, best_off; - int cur, last_match_pos = 0; - - LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches); - if (firstMatch.len==0) { ip++; continue; } - - if ((size_t)firstMatch.len > sufficient_len) { - /* good enough solution : immediate encoding */ - int const firstML = firstMatch.len; - const BYTE* const matchPos = ip - firstMatch.off; - opSaved = op; - if ( LZ4HC_encodeSequence(&ip, &op, &anchor, firstML, matchPos, limit, oend) ) /* updates ip, op and anchor */ - goto _dest_overflow; - continue; - } - - /* set prices for first positions (literals) */ - { int rPos; - for (rPos = 0 ; rPos < MINMATCH ; rPos++) { - int const cost = LZ4HC_literalsPrice(llen + rPos); - opt[rPos].mlen = 1; - opt[rPos].off = 0; - opt[rPos].litlen = llen + rPos; - opt[rPos].price = cost; - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", - rPos, cost, opt[rPos].litlen); - } } - /* set prices using initial match */ - { int mlen = MINMATCH; - int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */ - int const offset = firstMatch.off; - assert(matchML < LZ4_OPT_NUM); - for ( ; mlen <= matchML ; mlen++) { - int const cost = LZ4HC_sequencePrice(llen, mlen); - opt[mlen].mlen = mlen; - opt[mlen].off = offset; - opt[mlen].litlen = llen; - opt[mlen].price = cost; - DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup", - mlen, cost, mlen); - } } - last_match_pos = firstMatch.len; - { int addLit; - for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { - opt[last_match_pos+addLit].mlen = 1; /* literal */ - opt[last_match_pos+addLit].off = 0; - opt[last_match_pos+addLit].litlen = addLit; - opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup", - last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); - } } - - /* check further positions */ - for (cur = 1; cur < last_match_pos; cur++) { - const BYTE* const curPtr = ip + cur; - LZ4HC_match_t newMatch; - - if (curPtr >= mflimit) break; - DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u", - cur, opt[cur].price, opt[cur+1].price, cur+1); - if (fullUpdate) { - /* not useful to search here if next position has same (or lower) cost */ - if ( (opt[cur+1].price <= opt[cur].price) - /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */ - && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) ) - continue; - } else { - /* not useful to search here if next position has same (or lower) cost */ - if (opt[cur+1].price <= opt[cur].price) continue; - } - - DEBUGLOG(7, "search at rPos:%u", cur); - if (fullUpdate) - newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches); - else - /* only test matches of minimum length; slightly faster, but misses a few bytes */ - newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches); - if (!newMatch.len) continue; - - if ( ((size_t)newMatch.len > sufficient_len) - || (newMatch.len + cur >= LZ4_OPT_NUM) ) { - /* immediate encoding */ - best_mlen = newMatch.len; - best_off = newMatch.off; - last_match_pos = cur + 1; - goto encode; - } - - /* before match : set price with literals at beginning */ - { int const baseLitlen = opt[cur].litlen; - int litlen; - for (litlen = 1; litlen < MINMATCH; litlen++) { - int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen); - int const pos = cur + litlen; - if (price < opt[pos].price) { - opt[pos].mlen = 1; /* literal */ - opt[pos].off = 0; - opt[pos].litlen = baseLitlen+litlen; - opt[pos].price = price; - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", - pos, price, opt[pos].litlen); - } } } - - /* set prices using match at position = cur */ - { int const matchML = newMatch.len; - int ml = MINMATCH; - - assert(cur + newMatch.len < LZ4_OPT_NUM); - for ( ; ml <= matchML ; ml++) { - int const pos = cur + ml; - int const offset = newMatch.off; - int price; - int ll; - DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)", - pos, last_match_pos); - if (opt[cur].mlen == 1) { - ll = opt[cur].litlen; - price = ((cur > ll) ? opt[cur - ll].price : 0) - + LZ4HC_sequencePrice(ll, ml); - } else { - ll = 0; - price = opt[cur].price + LZ4HC_sequencePrice(0, ml); - } - - if (pos > last_match_pos+TRAILING_LITERALS || price <= opt[pos].price) { - DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)", - pos, price, ml); - assert(pos < LZ4_OPT_NUM); - if ( (ml == matchML) /* last pos of last match */ - && (last_match_pos < pos) ) - last_match_pos = pos; - opt[pos].mlen = ml; - opt[pos].off = offset; - opt[pos].litlen = ll; - opt[pos].price = price; - } } } - /* complete following positions with literals */ - { int addLit; - for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) { - opt[last_match_pos+addLit].mlen = 1; /* literal */ - opt[last_match_pos+addLit].off = 0; - opt[last_match_pos+addLit].litlen = addLit; - opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit); - DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit); - } } - } /* for (cur = 1; cur <= last_match_pos; cur++) */ - - best_mlen = opt[last_match_pos].mlen; - best_off = opt[last_match_pos].off; - cur = last_match_pos - best_mlen; - -encode: /* cur, last_match_pos, best_mlen, best_off must be set */ - assert(cur < LZ4_OPT_NUM); - assert(last_match_pos >= 1); /* == 1 when only one candidate */ - DEBUGLOG(6, "reverse traversal, looking for shortest path") - DEBUGLOG(6, "last_match_pos = %i", last_match_pos); - { int candidate_pos = cur; - int selected_matchLength = best_mlen; - int selected_offset = best_off; - while (1) { /* from end to beginning */ - int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */ - int const next_offset = opt[candidate_pos].off; - DEBUGLOG(6, "pos %i: sequence length %i", candidate_pos, selected_matchLength); - opt[candidate_pos].mlen = selected_matchLength; - opt[candidate_pos].off = selected_offset; - selected_matchLength = next_matchLength; - selected_offset = next_offset; - if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */ - assert(next_matchLength > 0); /* can be 1, means literal */ - candidate_pos -= next_matchLength; - } } - - /* encode all recorded sequences in order */ - { int rPos = 0; /* relative position (to ip) */ - while (rPos < last_match_pos) { - int const ml = opt[rPos].mlen; - int const offset = opt[rPos].off; - if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */ - rPos += ml; - assert(ml >= MINMATCH); - assert((offset >= 1) && (offset <= MAX_DISTANCE)); - opSaved = op; - if ( LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ip - offset, limit, oend) ) /* updates ip, op and anchor */ - goto _dest_overflow; - } } - } /* while (ip < mflimit) */ - -_last_literals: - /* Encode Last Literals */ - { size_t lastRunSize = (size_t)(iend - anchor); /* literals */ - size_t litLength = (lastRunSize + 255 - RUN_MASK) / 255; - size_t const totalSize = 1 + litLength + lastRunSize; - if (limit == limitedDestSize) oend += LASTLITERALS; /* restore correct value */ - if (limit && (op + totalSize > oend)) { - if (limit == limitedOutput) return 0; /* Check output limit */ - /* adapt lastRunSize to fill 'dst' */ - lastRunSize = (size_t)(oend - op) - 1; - litLength = (lastRunSize + 255 - RUN_MASK) / 255; - lastRunSize -= litLength; - } - ip = anchor + lastRunSize; - - if (lastRunSize >= RUN_MASK) { - size_t accumulator = lastRunSize - RUN_MASK; - *op++ = (RUN_MASK << ML_BITS); - for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255; - *op++ = (BYTE) accumulator; - } else { - *op++ = (BYTE)(lastRunSize << ML_BITS); - } - memcpy(op, anchor, lastRunSize); - op += lastRunSize; - } - - /* End */ - *srcSizePtr = (int) (((const char*)ip) - source); - return (int) ((char*)op-dst); - -_dest_overflow: - if (limit == limitedDestSize) { - op = opSaved; /* restore correct out pointer */ - goto _last_literals; - } - return 0; -} diff --git a/lz4libs/xxhash.c b/lz4libs/xxhash.c index bcf1f1db..3fc97fd2 100644 --- a/lz4libs/xxhash.c +++ b/lz4libs/xxhash.c @@ -52,7 +52,7 @@ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define XXH_FORCE_MEMORY_ACCESS 2 -# elif defined(__INTEL_COMPILER) || \ +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) # define XXH_FORCE_MEMORY_ACCESS 1 # endif diff --git a/setup.py b/setup.py index 7dbc2331..73a8e2ae 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,5 @@ #!/usr/bin/env python from setuptools import setup, find_packages, Extension -import subprocess -import os import sys from distutils import ccompiler @@ -104,32 +102,39 @@ extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=libraries, - include_dirs=include_dirs, -) + include_dirs=include_dirs) lz4block = Extension('lz4.block._block', lz4block_sources, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=libraries, - include_dirs=include_dirs, -) + include_dirs=include_dirs) lz4frame = Extension('lz4.frame._frame', lz4frame_sources, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=libraries, - include_dirs=include_dirs, -) + include_dirs=include_dirs) -install_requires=[] +install_requires = [] # On Python earlier than 3.0 the builtins package isn't included, but it is # provided by the future package if sys.version_info < (3, 0): install_requires.append('future') + +# Dependencies for testing. We define a list here, so that we can +# refer to it for the tests_require and the extras_require arguments +# to setup below. The latter enables us to use pip install .[tests] to +# install testing dependencies. +tests_require = [ + 'pytest', + 'psutil', +], + # Finally call setup with the extension modules as defined above. setup( name='lz4', @@ -154,9 +159,13 @@ lz4block, lz4frame ], - tests_require=[ - 'pytest', - ], + tests_require=tests_require, + extras_require={ + 'tests': tests_require, + 'flake8': [ + 'flake8', + ] + }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: BSD License', diff --git a/tests/bench.py b/tests/bench.py deleted file mode 100644 index f99151ba..00000000 --- a/tests/bench.py +++ /dev/null @@ -1,24 +0,0 @@ -import uuid -import timeit -import lz4 -import os -from timeit import Timer -import sys -import blosc - -DATA = open(sys.argv[1], "rb").read() -LZ4_DATA = lz4.block.compress(DATA) -BLOSC_DATA = blosc.compress(DATA, cname='lz4', clevel=5, shuffle=True) -LOOPS = 100 - -print("Data Size:") -print(" Input: %d" % len(DATA)) -print(" LZ4: %d (%.2f)" % (len(LZ4_DATA), len(LZ4_DATA) / float(len(DATA)))) -print(" Blosc: %d (%.2f)" % (len(BLOSC_DATA), len(BLOSC_DATA) / float(len(DATA)))) -print(" LZ4 / Blosc: %f" % (float(len(LZ4_DATA)) / float(len(BLOSC_DATA)))) - -print("Benchmark: %d calls" % LOOPS) -print(" LZ4 Compression: %fs" % (Timer("lz4.block.compress(DATA)", "from __main__ import DATA; import lz4").timeit(number=LOOPS)/LOOPS)) -print(" Blosc Compression: %fs" % (Timer("blosc.compress(DATA, cname='lz4', clevel=5, shuffle=True)", "from __main__ import DATA; import blosc").timeit(number=LOOPS)/LOOPS)) -print(" LZ4 Decompression: %fs" % (Timer("lz4.block.decompress(LZ4_DATA)", "from __main__ import LZ4_DATA; import lz4").timeit(number=LOOPS)/LOOPS)) -print(" Blosc Decompression : %fs" % (Timer("blosc.decompress(BLOSC_DATA)", "from __main__ import BLOSC_DATA; import blosc").timeit(number=LOOPS)/LOOPS)) diff --git a/tests/block/conftest.py b/tests/block/conftest.py index d95a96e8..8eab729d 100644 --- a/tests/block/conftest.py +++ b/tests/block/conftest.py @@ -2,7 +2,8 @@ import os import sys -test_data=[ + +test_data = [ (b''), (os.urandom(8 * 1024)), (b'0' * 8 * 1024), @@ -15,6 +16,7 @@ (memoryview(os.urandom(8 * 1024))) ] + @pytest.fixture( params=test_data, ids=[ @@ -24,6 +26,7 @@ def data(request): return request.param + @pytest.fixture( params=[ ( @@ -41,6 +44,7 @@ def data(request): def store_size(request): return request.param + @pytest.fixture( params=[ ( @@ -58,14 +62,17 @@ def store_size(request): def return_bytearray(request): return request.param + @pytest.fixture def c_return_bytearray(return_bytearray): return return_bytearray + @pytest.fixture def d_return_bytearray(return_bytearray): return return_bytearray + @pytest.fixture( params=[ ('fast', None) @@ -81,3 +88,16 @@ def d_return_bytearray(return_bytearray): ) def mode(request): return request.param + + +@pytest.fixture( + params=[ + None, + (0, 0), + (100, 200), + (0, 8 * 1024), + os.urandom(8 * 1024) + ] +) +def dictionary(request): + return request.param diff --git a/tests/block/test_block.py b/tests/block/test_block.py deleted file mode 100644 index 658d94a3..00000000 --- a/tests/block/test_block.py +++ /dev/null @@ -1,159 +0,0 @@ -import lz4.block -import sys -from multiprocessing.pool import ThreadPool -import os -import pytest -if sys.version_info <= (3, 2): - import struct - - -def get_stored_size(buff): - if sys.version_info > (2, 7): - if isinstance(buff, memoryview): - b = buff.tobytes() - else: - b = bytes(buff) - else: - b = bytes(buff) - - if len(b) < 4: - return None - - if sys.version_info > (3, 2): - return int.from_bytes(b[:4], 'little') - else: - # This would not work on a memoryview object, hence buff.tobytes call - # above - return struct.unpack(' (2, 7): + if isinstance(buff, memoryview): + b = buff.tobytes() + else: + b = bytes(buff) + else: + b = bytes(buff) + + if len(b) < 4: + return None + + if sys.version_info > (3, 2): + return int.from_bytes(b[:4], 'little') + else: + # This would not work on a memoryview object, hence buff.tobytes call + # above + return struct.unpack('= 10800: p = [True, False] else: p = [False, ] + + @pytest.fixture( params=[ (pp) for pp in p - ] + ] ) def block_checksum(request): return request.param + compression_levels = [ - (lz4frame.COMPRESSIONLEVEL_MIN), - (lz4frame.COMPRESSIONLEVEL_MINHC), - (lz4frame.COMPRESSIONLEVEL_MAX), - ] + (lz4frame.COMPRESSIONLEVEL_MIN), + (lz4frame.COMPRESSIONLEVEL_MINHC), + (lz4frame.COMPRESSIONLEVEL_MAX), +] + + @pytest.fixture( params=compression_levels ) def compression_level(request): return request.param + @pytest.fixture( params=[ (True), @@ -64,6 +74,7 @@ def compression_level(request): def auto_flush(request): return request.param + @pytest.fixture( params=[ (True), @@ -73,6 +84,7 @@ def auto_flush(request): def store_size(request): return request.param + @pytest.fixture( params=[ (True), diff --git a/tests/frame/helpers.py b/tests/frame/helpers.py index 11cf622b..e6cb0c9e 100644 --- a/tests/frame/helpers.py +++ b/tests/frame/helpers.py @@ -1,5 +1,4 @@ import lz4.frame as lz4frame -import math def get_frame_info_check(compressed_data, @@ -15,7 +14,7 @@ def get_frame_info_check(compressed_data, assert frame_info["content_checksum"] == content_checksum assert frame_info["block_checksum"] == block_checksum - assert frame_info["skippable"] == False + assert frame_info["skippable"] is False if store_size is True: assert frame_info["content_size"] == source_size @@ -34,7 +33,8 @@ def get_frame_info_check(compressed_data, def get_chunked(data, nchunks): size = len(data) - stride = int(math.ceil(float(size)/nchunks)) # no // on py 2.6 + # stride = int(math.ceil(float(size)/nchunks)) # no // on py 2.6 + stride = size // nchunks start = 0 end = start + stride while end < size: diff --git a/tests/frame/test_frame_0.py b/tests/frame/test_frame_0.py index 7706a5c4..f03431d4 100644 --- a/tests/frame/test_frame_0.py +++ b/tests/frame/test_frame_0.py @@ -2,26 +2,31 @@ import lz4 import re + def test_library_version_number(): v = lz4.library_version_number() assert isinstance(v, int) assert v > 10000 + def test_library_version_string(): v = lz4.library_version_string() assert isinstance(v, str) assert v.count('.') == 2 - r=re.compile(r'^[0-9]*\.[0-9]*\.[0-9]*$') + r = re.compile(r'^[0-9]*\.[0-9]*\.[0-9]*$') assert r.match(v) is not None + def test_create_compression_context(): context = lz4frame.create_compression_context() assert context is not None + def test_create_decompression_context(): context = lz4frame.create_decompression_context() assert context is not None + def test_reset_decompression_context_1(): if lz4.library_version_number() >= 10800: context = lz4frame.create_decompression_context() @@ -30,14 +35,15 @@ def test_reset_decompression_context_1(): else: pass + def test_reset_decompression_context_2(): if lz4.library_version_number() >= 10800: c = lz4frame.compress(b'1234', return_bytearray=False) context = lz4frame.create_decompression_context() try: # Simulate an error by passing junk to decompress - d = lz4frame.decompress_chunk(context, c[1:3]) - except: + d = lz4frame.decompress_chunk(context, c[4:]) + except RuntimeError: pass r = lz4frame.reset_decompression_context(context) assert r is None @@ -45,18 +51,21 @@ def test_reset_decompression_context_2(): d, bytes_read, eof = lz4frame.decompress_chunk(context, c) assert d == b'1234' assert bytes_read == len(c) - assert eof == True + assert eof is True else: pass + def test_compress_return_type_1(): r = lz4frame.compress(b'', return_bytearray=False) assert isinstance(r, bytes) + def test_compress_return_type_2(): r = lz4frame.compress(b'', return_bytearray=True) assert isinstance(r, bytearray) + def test_decompress_return_type_1(): c = lz4frame.compress(b'', return_bytearray=False) r = lz4frame.decompress( @@ -66,6 +75,7 @@ def test_decompress_return_type_1(): ) assert isinstance(r, bytes) + def test_decompress_return_type_2(): c = lz4frame.compress(b'', return_bytearray=False) r = lz4frame.decompress( @@ -75,6 +85,7 @@ def test_decompress_return_type_2(): ) assert isinstance(r, bytearray) + def test_decompress_return_type_3(): c = lz4frame.compress(b'', return_bytearray=False) r = lz4frame.decompress( @@ -86,6 +97,7 @@ def test_decompress_return_type_3(): assert isinstance(r[0], bytes) assert isinstance(r[1], int) + def test_decompress_return_type_4(): c = lz4frame.compress(b'', return_bytearray=False) r = lz4frame.decompress( @@ -97,6 +109,7 @@ def test_decompress_return_type_4(): assert isinstance(r[0], bytearray) assert isinstance(r[1], int) + def test_decompress_chunk_return_type_1(): c = lz4frame.compress(b'', return_bytearray=False) d = lz4frame.create_decompression_context() @@ -109,6 +122,7 @@ def test_decompress_chunk_return_type_1(): assert isinstance(b, int) assert isinstance(e, bool) + def test_decompress_chunk_return_type_2(): c = lz4frame.compress(b'', return_bytearray=False) d = lz4frame.create_decompression_context() @@ -121,6 +135,7 @@ def test_decompress_chunk_return_type_2(): assert isinstance(b, int) assert isinstance(e, bool) + def test_decompress_chunk_return_type_3(): c = lz4frame.compress(b'', return_bytearray=False) d = lz4frame.create_decompression_context() @@ -134,6 +149,7 @@ def test_decompress_chunk_return_type_3(): assert isinstance(r[1], int) assert isinstance(r[2], bool) + def test_decompress_chunk_return_type_4(): c = lz4frame.compress(b'', return_bytearray=False) d = lz4frame.create_decompression_context() @@ -147,6 +163,7 @@ def test_decompress_chunk_return_type_4(): assert isinstance(r[1], int) assert isinstance(r[2], bool) + def test_block_size_constants(): assert lz4frame.BLOCKSIZE_DEFAULT == 0 assert lz4frame.BLOCKSIZE_MAX64KB == 4 diff --git a/tests/frame/test_frame_1.py b/tests/frame/test_frame_1.py index 444b6e8c..35110c44 100644 --- a/tests/frame/test_frame_1.py +++ b/tests/frame/test_frame_1.py @@ -5,7 +5,7 @@ from .helpers import get_frame_info_check -test_data=[ +test_data = [ (b''), (os.urandom(8 * 1024)), (b'0' * 8 * 1024), @@ -21,6 +21,7 @@ (memoryview(os.urandom(8 * 1024))) ] + @pytest.fixture( params=test_data, ids=[ @@ -59,7 +60,8 @@ def test_roundtrip_1( content_checksum, block_checksum, ) - decompressed, bytes_read = lz4frame.decompress(compressed, return_bytes_read=True) + decompressed, bytes_read = lz4frame.decompress( + compressed, return_bytes_read=True) assert bytes_read == len(compressed) assert decompressed == data @@ -103,6 +105,7 @@ def test_roundtrip_2(data, content_checksum, block_checksum, ) - decompressed, bytes_read = lz4frame.decompress(compressed, return_bytes_read=True) + decompressed, bytes_read = lz4frame.decompress( + compressed, return_bytes_read=True) assert bytes_read == len(compressed) assert decompressed == data diff --git a/tests/frame/test_frame_2.py b/tests/frame/test_frame_2.py index 3a0e6d6c..80b44b87 100644 --- a/tests/frame/test_frame_2.py +++ b/tests/frame/test_frame_2.py @@ -8,7 +8,7 @@ ) -test_data=[ +test_data = [ (b'', 1, 1), (os.urandom(8 * 1024), 8, 1), (os.urandom(8 * 1024), 1, 8), @@ -103,5 +103,5 @@ def test_roundtrip_chunked(data, block_size, block_linked, assert bytes_read == len(compressed) assert decompressed == data - assert eofs[-1] == True - assert (True in eofs[:-2]) == False + assert eofs[-1] is True + assert (True in eofs[:-2]) is False diff --git a/tests/frame/test_frame_3.py b/tests/frame/test_frame_3.py index ee1a554a..aa7c782d 100644 --- a/tests/frame/test_frame_3.py +++ b/tests/frame/test_frame_3.py @@ -3,10 +3,11 @@ import os import struct -test_data=[ +test_data = [ (os.urandom(256 * 1024)), ] + @pytest.fixture( params=test_data, ids=[ @@ -24,8 +25,9 @@ def test_decompress_truncated(data): with pytest.raises(RuntimeError, message=message): lz4frame.decompress(compressed[:6]) - for i in range(16, len(compressed) - 1, 5): # 15 is the max size of the header - message = r'^Frame incomplete. LZ4F_decompress returned: {0}'.format(len(compressed) - i) + for i in range(16, len(compressed) - 1, 5): # 15 is the max size of the header + message = r'^Frame incomplete. LZ4F_decompress returned: {0}'.format( + len(compressed) - i) try: lz4frame.decompress(compressed[:i]) except RuntimeError as r: diff --git a/tests/frame/test_frame_4.py b/tests/frame/test_frame_4.py index 490f6376..7fa16547 100644 --- a/tests/frame/test_frame_4.py +++ b/tests/frame/test_frame_4.py @@ -6,7 +6,7 @@ get_chunked, ) -test_data=[ +test_data = [ b'', (128 * (32 * os.urandom(32))), (256 * (32 * os.urandom(32))), @@ -14,6 +14,7 @@ (1024 * (32 * os.urandom(32))), ] + @pytest.fixture( params=test_data, ids=[ @@ -23,6 +24,7 @@ def data(request): return request.param + @pytest.fixture( params=[ (True), @@ -32,6 +34,7 @@ def data(request): def reset(request): return request.param + @pytest.fixture( params=[ (1), @@ -86,7 +89,8 @@ def do_compress(): block_checksum, ) - decompressed, bytes_read = lz4frame.decompress(compressed, return_bytes_read=True) + decompressed, bytes_read = lz4frame.decompress( + compressed, return_bytes_read=True) assert data == decompressed assert bytes_read == len(compressed) diff --git a/tests/frame/test_frame_5.py b/tests/frame/test_frame_5.py index 9d5c72dc..b61aec73 100644 --- a/tests/frame/test_frame_5.py +++ b/tests/frame/test_frame_5.py @@ -1,11 +1,11 @@ import lz4.frame -import time import pytest -test_data=[ +test_data = [ (b'a' * 1024 * 1024), ] + @pytest.fixture( params=test_data, ids=[ @@ -25,7 +25,7 @@ def test_frame_decompress_mem_usage(data): prev_snapshot = None for i in range(1000): - decompressed = lz4.frame.decompress(compressed) + decompressed = lz4.frame.decompress(compressed) # noqa: F841 if i % 100 == 0: snapshot = tracemalloc.take_snapshot() @@ -47,7 +47,9 @@ def test_frame_decompress_chunk_mem_usage(data): for i in range(1000): context = lz4.frame.create_decompression_context() - decompressed = lz4.frame.decompress_chunk(context, compressed) + decompressed = lz4.frame.decompress_chunk( # noqa: F841 + context, compressed + ) if i % 100 == 0: snapshot = tracemalloc.take_snapshot() @@ -70,7 +72,7 @@ def test_frame_open_decompress_mem_usage(data): for i in range(1000): with lz4.frame.open('test.lz4', 'r') as f: - decompressed = f.read() + decompressed = f.read() # noqa: F841 if i % 100 == 0: snapshot = tracemalloc.take_snapshot() @@ -82,11 +84,10 @@ def test_frame_open_decompress_mem_usage(data): prev_snapshot = snapshot -# TODO: add many more memory usage tests along the lines of this one for other funcs +# TODO: add many more memory usage tests along the lines of this one +# for other funcs def test_dummy_always_pass(): # If pytest finds all tests are skipped, then it exits with code 5 rather # than 0, which tox sees as an error. Here we add a dummy test that always passes. assert True - - diff --git a/tests/frame/test_frame_6.py b/tests/frame/test_frame_6.py index 8608f675..335d09e4 100644 --- a/tests/frame/test_frame_6.py +++ b/tests/frame/test_frame_6.py @@ -2,12 +2,13 @@ import pytest import lz4.frame as lz4frame -test_data=[ +test_data = [ b'', (128 * (32 * os.urandom(32))), (5 * 128 * os.urandom(1024)), ] + @pytest.fixture( params=test_data, ids=[ @@ -17,11 +18,14 @@ def data(request): return request.param + compression_levels = [ - (lz4frame.COMPRESSIONLEVEL_MIN), - # (lz4frame.COMPRESSIONLEVEL_MINHC), - # (lz4frame.COMPRESSIONLEVEL_MAX), - ] + (lz4frame.COMPRESSIONLEVEL_MIN), + # (lz4frame.COMPRESSIONLEVEL_MINHC), + # (lz4frame.COMPRESSIONLEVEL_MAX), +] + + @pytest.fixture( params=compression_levels ) @@ -33,6 +37,7 @@ def test_lz4frame_open_write(data): with lz4frame.open('testfile', mode='wb') as fp: fp.write(data) + def test_lz4frame_open_write_read_defaults(data): with lz4frame.open('testfile', mode='wb') as fp: fp.write(data) @@ -40,6 +45,7 @@ def test_lz4frame_open_write_read_defaults(data): data_out = fp.read() assert data_out == data + def test_lz4frame_open_write_read_text(): data = u'This is a test string' with lz4frame.open('testfile', mode='wt') as fp: @@ -48,6 +54,18 @@ def test_lz4frame_open_write_read_text(): data_out = fp.read() assert data_out == data + +def test_lz4frame_open_write_read_text_iter(): + data = u'This is a test string' + with lz4frame.open('testfile', mode='wt') as fp: + fp.write(data) + data_out = '' + with lz4frame.open('testfile', mode='rt') as fp: + for line in fp: + data_out += line + assert data_out == data + + def test_lz4frame_open_write_read( data, compression_level, diff --git a/tests/frame/test_frame_7.py b/tests/frame/test_frame_7.py index 1ec48942..583f3fbb 100644 --- a/tests/frame/test_frame_7.py +++ b/tests/frame/test_frame_7.py @@ -2,10 +2,11 @@ import pytest import os -test_data=[ +test_data = [ (os.urandom(32) * 256), ] + @pytest.fixture( params=test_data, ids=[ @@ -15,6 +16,7 @@ def data(request): return request.param + def test_roundtrip_multiframe_1(data): nframes = 4 @@ -29,6 +31,7 @@ def test_roundtrip_multiframe_1(data): assert len(decompressed) == nframes * len(data) assert data * nframes == decompressed + def test_roundtrip_multiframe_2(data): nframes = 4 @@ -46,6 +49,7 @@ def test_roundtrip_multiframe_2(data): assert len(decompressed) == nframes * len(data) assert data * nframes == decompressed + def test_roundtrip_multiframe_3(data): nframes = 4 @@ -61,12 +65,13 @@ def test_roundtrip_multiframe_3(data): for _ in range(nframes): d, bytes_read, eof = lz4frame.decompress_chunk(ctx, compressed) decompressed += d - assert eof == True + assert eof is True assert bytes_read == len(compressed) // nframes assert len(decompressed) == nframes * len(data) assert data * nframes == decompressed + def test_roundtrip_multiframe_4(data): nframes = 4 @@ -85,12 +90,13 @@ def test_roundtrip_multiframe_4(data): else: d = decompressor.unused_data decompressed += decompressor.decompress(d) - assert decompressor.eof == True - assert decompressor.needs_input == True + assert decompressor.eof is True + assert decompressor.needs_input is True if i == nframes - 1: - assert decompressor.unused_data == None + assert decompressor.unused_data is None else: - assert len(decompressor.unused_data) == len(compressed) * (nframes - i - 1) / nframes + assert len(decompressor.unused_data) == len( + compressed) * (nframes - i - 1) / nframes assert len(decompressed) == nframes * len(data) assert data * nframes == decompressed diff --git a/tox.ini b/tox.ini index a5d50251..43c08a91 100644 --- a/tox.ini +++ b/tox.ini @@ -2,16 +2,19 @@ envlist = py [testenv] -deps=pytest +deps=.[tests] +passenv = * # setenv = PYTHONMALLOC = pymalloc # PYTHONMALLOCSTATS = 'yes' commands= - pytest tests/block/test_block.py - pytest tests/frame/test_frame_0.py - pytest tests/frame/test_frame_1.py - pytest tests/frame/test_frame_2.py - pytest tests/frame/test_frame_3.py - pytest tests/frame/test_frame_4.py - pytest tests/frame/test_frame_5.py - pytest tests/frame/test_frame_6.py - pytest tests/frame/test_frame_7.py + pytest --tb=long -l tests/block/test_block_0.py + pytest --tb=long -l tests/block/test_block_1.py + pytest --tb=long -l tests/block/test_block_2.py + pytest --tb=long -l tests/frame/test_frame_0.py + pytest --tb=long -l tests/frame/test_frame_1.py + pytest --tb=long -l tests/frame/test_frame_2.py + pytest --tb=long -l tests/frame/test_frame_3.py + pytest --tb=long -l tests/frame/test_frame_4.py + pytest --tb=long -l tests/frame/test_frame_5.py + pytest --tb=long -l tests/frame/test_frame_6.py + pytest --tb=long -l tests/frame/test_frame_7.py