| @@ -0,0 +1,328 @@ | ||
| /* ****************************************************************** | ||
| FSE : Finite State Entropy decoder | ||
| Copyright (C) 2013-2015, Yann Collet. | ||
| BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
| Redistribution and use in source and binary forms, with or without | ||
| modification, are permitted provided that the following conditions are | ||
| met: | ||
| * Redistributions of source code must retain the above copyright | ||
| notice, this list of conditions and the following disclaimer. | ||
| * Redistributions in binary form must reproduce the above | ||
| copyright notice, this list of conditions and the following disclaimer | ||
| in the documentation and/or other materials provided with the | ||
| distribution. | ||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| You can contact the author at : | ||
| - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy | ||
| - Public forum : https://groups.google.com/forum/#!forum/lz4c | ||
| ****************************************************************** */ | ||
|
|
||
|
|
||
| /* ************************************************************** | ||
| * Compiler specifics | ||
| ****************************************************************/ | ||
| #ifdef _MSC_VER /* Visual Studio */ | ||
| # define FORCE_INLINE static __forceinline | ||
| # include <intrin.h> /* For Visual 2005 */ | ||
| # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ | ||
| # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ | ||
| #else | ||
| # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ | ||
| # ifdef __GNUC__ | ||
| # define FORCE_INLINE static inline __attribute__((always_inline)) | ||
| # else | ||
| # define FORCE_INLINE static inline | ||
| # endif | ||
| # else | ||
| # define FORCE_INLINE static | ||
| # endif /* __STDC_VERSION__ */ | ||
| #endif | ||
|
|
||
|
|
||
| /* ************************************************************** | ||
| * Includes | ||
| ****************************************************************/ | ||
| #include <stdlib.h> /* malloc, free, qsort */ | ||
| #include <string.h> /* memcpy, memset */ | ||
| #include "bitstream.h" | ||
| #define FSE_STATIC_LINKING_ONLY | ||
| #include "fse.h" | ||
|
|
||
|
|
||
| /* ************************************************************** | ||
| * Error Management | ||
| ****************************************************************/ | ||
| #define FSE_isError ERR_isError | ||
| #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ | ||
|
|
||
| /* check and forward error code */ | ||
| #define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; } | ||
|
|
||
|
|
||
| /* ************************************************************** | ||
| * Templates | ||
| ****************************************************************/ | ||
| /* | ||
| designed to be included | ||
| for type-specific functions (template emulation in C) | ||
| Objective is to write these functions only once, for improved maintenance | ||
| */ | ||
|
|
||
| /* safety checks */ | ||
| #ifndef FSE_FUNCTION_EXTENSION | ||
| # error "FSE_FUNCTION_EXTENSION must be defined" | ||
| #endif | ||
| #ifndef FSE_FUNCTION_TYPE | ||
| # error "FSE_FUNCTION_TYPE must be defined" | ||
| #endif | ||
|
|
||
| /* Function names */ | ||
| #define FSE_CAT(X,Y) X##Y | ||
| #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) | ||
| #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) | ||
|
|
||
|
|
||
| /* Function templates */ | ||
| FSE_DTable* FSE_createDTable (unsigned tableLog) | ||
| { | ||
| if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; | ||
| return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); | ||
| } | ||
|
|
||
| void FSE_freeDTable (FSE_DTable* dt) | ||
| { | ||
| free(dt); | ||
| } | ||
|
|
||
| size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) | ||
| { | ||
| void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ | ||
| FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); | ||
| U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; | ||
|
|
||
| U32 const maxSV1 = maxSymbolValue + 1; | ||
| U32 const tableSize = 1 << tableLog; | ||
| U32 highThreshold = tableSize-1; | ||
|
|
||
| /* Sanity Checks */ | ||
| if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); | ||
| if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); | ||
|
|
||
| /* Init, lay down lowprob symbols */ | ||
| { FSE_DTableHeader DTableH; | ||
| DTableH.tableLog = (U16)tableLog; | ||
| DTableH.fastMode = 1; | ||
| { S16 const largeLimit= (S16)(1 << (tableLog-1)); | ||
| U32 s; | ||
| for (s=0; s<maxSV1; s++) { | ||
| if (normalizedCounter[s]==-1) { | ||
| tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; | ||
| symbolNext[s] = 1; | ||
| } else { | ||
| if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0; | ||
| symbolNext[s] = normalizedCounter[s]; | ||
| } } } | ||
| memcpy(dt, &DTableH, sizeof(DTableH)); | ||
| } | ||
|
|
||
| /* Spread symbols */ | ||
| { U32 const tableMask = tableSize-1; | ||
| U32 const step = FSE_TABLESTEP(tableSize); | ||
| U32 s, position = 0; | ||
| for (s=0; s<maxSV1; s++) { | ||
| int i; | ||
| for (i=0; i<normalizedCounter[s]; i++) { | ||
| tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s; | ||
| position = (position + step) & tableMask; | ||
| while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */ | ||
| } } | ||
| if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ | ||
| } | ||
|
|
||
| /* Build Decoding table */ | ||
| { U32 u; | ||
| for (u=0; u<tableSize; u++) { | ||
| FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol); | ||
| U16 nextState = symbolNext[symbol]++; | ||
| tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) ); | ||
| tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize); | ||
| } } | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
|
|
||
| #ifndef FSE_COMMONDEFS_ONLY | ||
|
|
||
| /*-******************************************************* | ||
| * Decompression (Byte symbols) | ||
| *********************************************************/ | ||
| size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) | ||
| { | ||
| void* ptr = dt; | ||
| FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; | ||
| void* dPtr = dt + 1; | ||
| FSE_decode_t* const cell = (FSE_decode_t*)dPtr; | ||
|
|
||
| DTableH->tableLog = 0; | ||
| DTableH->fastMode = 0; | ||
|
|
||
| cell->newState = 0; | ||
| cell->symbol = symbolValue; | ||
| cell->nbBits = 0; | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
|
|
||
| size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) | ||
| { | ||
| void* ptr = dt; | ||
| FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; | ||
| void* dPtr = dt + 1; | ||
| FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; | ||
| const unsigned tableSize = 1 << nbBits; | ||
| const unsigned tableMask = tableSize - 1; | ||
| const unsigned maxSV1 = tableMask+1; | ||
| unsigned s; | ||
|
|
||
| /* Sanity checks */ | ||
| if (nbBits < 1) return ERROR(GENERIC); /* min size */ | ||
|
|
||
| /* Build Decoding Table */ | ||
| DTableH->tableLog = (U16)nbBits; | ||
| DTableH->fastMode = 1; | ||
| for (s=0; s<maxSV1; s++) { | ||
| dinfo[s].newState = 0; | ||
| dinfo[s].symbol = (BYTE)s; | ||
| dinfo[s].nbBits = (BYTE)nbBits; | ||
| } | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
| FORCE_INLINE size_t FSE_decompress_usingDTable_generic( | ||
| void* dst, size_t maxDstSize, | ||
| const void* cSrc, size_t cSrcSize, | ||
| const FSE_DTable* dt, const unsigned fast) | ||
| { | ||
| BYTE* const ostart = (BYTE*) dst; | ||
| BYTE* op = ostart; | ||
| BYTE* const omax = op + maxDstSize; | ||
| BYTE* const olimit = omax-3; | ||
|
|
||
| BIT_DStream_t bitD; | ||
| FSE_DState_t state1; | ||
| FSE_DState_t state2; | ||
|
|
||
| /* Init */ | ||
| CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize)); | ||
|
|
||
| FSE_initDState(&state1, &bitD, dt); | ||
| FSE_initDState(&state2, &bitD, dt); | ||
|
|
||
| #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) | ||
|
|
||
| /* 4 symbols per loop */ | ||
| for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) { | ||
| op[0] = FSE_GETSYMBOL(&state1); | ||
|
|
||
| if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ | ||
| BIT_reloadDStream(&bitD); | ||
|
|
||
| op[1] = FSE_GETSYMBOL(&state2); | ||
|
|
||
| if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ | ||
| { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } | ||
|
|
||
| op[2] = FSE_GETSYMBOL(&state1); | ||
|
|
||
| if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ | ||
| BIT_reloadDStream(&bitD); | ||
|
|
||
| op[3] = FSE_GETSYMBOL(&state2); | ||
| } | ||
|
|
||
| /* tail */ | ||
| /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ | ||
| while (1) { | ||
| if (op>(omax-2)) return ERROR(dstSize_tooSmall); | ||
| *op++ = FSE_GETSYMBOL(&state1); | ||
| if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { | ||
| *op++ = FSE_GETSYMBOL(&state2); | ||
| break; | ||
| } | ||
|
|
||
| if (op>(omax-2)) return ERROR(dstSize_tooSmall); | ||
| *op++ = FSE_GETSYMBOL(&state2); | ||
| if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { | ||
| *op++ = FSE_GETSYMBOL(&state1); | ||
| break; | ||
| } } | ||
|
|
||
| return op-ostart; | ||
| } | ||
|
|
||
|
|
||
| size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, | ||
| const void* cSrc, size_t cSrcSize, | ||
| const FSE_DTable* dt) | ||
| { | ||
| const void* ptr = dt; | ||
| const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; | ||
| const U32 fastMode = DTableH->fastMode; | ||
|
|
||
| /* select fast mode (static) */ | ||
| if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); | ||
| return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); | ||
| } | ||
|
|
||
|
|
||
| size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) | ||
| { | ||
| const BYTE* const istart = (const BYTE*)cSrc; | ||
| const BYTE* ip = istart; | ||
| short counting[FSE_MAX_SYMBOL_VALUE+1]; | ||
| unsigned tableLog; | ||
| unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; | ||
|
|
||
| /* normal FSE decoding mode */ | ||
| size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); | ||
| if (FSE_isError(NCountLength)) return NCountLength; | ||
| //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ | ||
| if (tableLog > maxLog) return ERROR(tableLog_tooLarge); | ||
| ip += NCountLength; | ||
| cSrcSize -= NCountLength; | ||
|
|
||
| CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); | ||
|
|
||
| return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ | ||
| } | ||
|
|
||
|
|
||
| typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; | ||
|
|
||
| size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) | ||
| { | ||
| DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ | ||
| return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG); | ||
| } | ||
|
|
||
|
|
||
|
|
||
| #endif /* FSE_COMMONDEFS_ONLY */ |
| @@ -0,0 +1,260 @@ | ||
| /* ****************************************************************** | ||
| Huffman coder, part of New Generation Entropy library | ||
| header file | ||
| Copyright (C) 2013-2016, Yann Collet. | ||
| BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
| Redistribution and use in source and binary forms, with or without | ||
| modification, are permitted provided that the following conditions are | ||
| met: | ||
| * Redistributions of source code must retain the above copyright | ||
| notice, this list of conditions and the following disclaimer. | ||
| * Redistributions in binary form must reproduce the above | ||
| copyright notice, this list of conditions and the following disclaimer | ||
| in the documentation and/or other materials provided with the | ||
| distribution. | ||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| You can contact the author at : | ||
| - Source repository : https://github.com/Cyan4973/FiniteStateEntropy | ||
| ****************************************************************** */ | ||
| #ifndef HUF_H_298734234 | ||
| #define HUF_H_298734234 | ||
|
|
||
| #if defined (__cplusplus) | ||
| extern "C" { | ||
| #endif | ||
|
|
||
|
|
||
| /* *** Dependencies *** */ | ||
| #include <stddef.h> /* size_t */ | ||
|
|
||
|
|
||
| /* *** simple functions *** */ | ||
| /** | ||
| HUF_compress() : | ||
| Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. | ||
| 'dst' buffer must be already allocated. | ||
| Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). | ||
| `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. | ||
| @return : size of compressed data (<= `dstCapacity`). | ||
| Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! | ||
| if return == 1, srcData is a single repeated byte symbol (RLE compression). | ||
| if HUF_isError(return), compression failed (more details using HUF_getErrorName()) | ||
| */ | ||
| size_t HUF_compress(void* dst, size_t dstCapacity, | ||
| const void* src, size_t srcSize); | ||
|
|
||
| /** | ||
| HUF_decompress() : | ||
| Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', | ||
| into already allocated buffer 'dst', of minimum size 'dstSize'. | ||
| `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. | ||
| Note : in contrast with FSE, HUF_decompress can regenerate | ||
| RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, | ||
| because it knows size to regenerate. | ||
| @return : size of regenerated data (== originalSize), | ||
| or an error code, which can be tested using HUF_isError() | ||
| */ | ||
| size_t HUF_decompress(void* dst, size_t originalSize, | ||
| const void* cSrc, size_t cSrcSize); | ||
|
|
||
|
|
||
| /* *** Tool functions *** */ | ||
| #define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ | ||
| size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ | ||
|
|
||
| /* Error Management */ | ||
| unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ | ||
| const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ | ||
|
|
||
|
|
||
| /* *** Advanced function *** */ | ||
|
|
||
| /** HUF_compress2() : | ||
| * Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog` . | ||
| * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ | ||
| size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); | ||
|
|
||
| /** HUF_compress4X_wksp() : | ||
| * Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */ | ||
| size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ | ||
|
|
||
|
|
||
|
|
||
| #ifdef HUF_STATIC_LINKING_ONLY | ||
|
|
||
| /* *** Dependencies *** */ | ||
| #include "mem.h" /* U32 */ | ||
|
|
||
|
|
||
| /* *** Constants *** */ | ||
| #define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ | ||
| #define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ | ||
| #define HUF_SYMBOLVALUE_MAX 255 | ||
|
|
||
| #define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ | ||
| #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) | ||
| # error "HUF_TABLELOG_MAX is too large !" | ||
| #endif | ||
|
|
||
|
|
||
| /* **************************************** | ||
| * Static allocation | ||
| ******************************************/ | ||
| /* HUF buffer bounds */ | ||
| #define HUF_CTABLEBOUND 129 | ||
| #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ | ||
| #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ | ||
|
|
||
| /* static allocation of HUF's Compression Table */ | ||
| #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ | ||
| U32 name##hb[maxSymbolValue+1]; \ | ||
| void* name##hv = &(name##hb); \ | ||
| HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ | ||
|
|
||
| /* static allocation of HUF's DTable */ | ||
| typedef U32 HUF_DTable; | ||
| #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) | ||
| #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ | ||
| HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } | ||
| #define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ | ||
| HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } | ||
|
|
||
| /* The workspace must have alignment at least 4 and be at least this large */ | ||
| #define HUF_WORKSPACE_SIZE (6 << 10) | ||
| #define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) | ||
|
|
||
|
|
||
| /* **************************************** | ||
| * Advanced decompression functions | ||
| ******************************************/ | ||
| size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ | ||
| size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ | ||
|
|
||
| size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ | ||
| size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ | ||
| size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ | ||
| size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ | ||
|
|
||
|
|
||
| /* **************************************** | ||
| * HUF detailed API | ||
| ******************************************/ | ||
| /*! | ||
| HUF_compress() does the following: | ||
| 1. count symbol occurrence from source[] into table count[] using FSE_count() | ||
| 2. (optional) refine tableLog using HUF_optimalTableLog() | ||
| 3. build Huffman table from count using HUF_buildCTable() | ||
| 4. save Huffman table to memory buffer using HUF_writeCTable() | ||
| 5. encode the data stream using HUF_compress4X_usingCTable() | ||
| The following API allows targeting specific sub-functions for advanced tasks. | ||
| For example, it's possible to compress several blocks using the same 'CTable', | ||
| or to save and regenerate 'CTable' using external methods. | ||
| */ | ||
| /* FSE_count() : find it within "fse.h" */ | ||
| unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); | ||
| typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ | ||
| size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); | ||
| size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); | ||
| size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); | ||
|
|
||
| typedef enum { | ||
| HUF_repeat_none, /**< Cannot use the previous table */ | ||
| HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ | ||
| HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ | ||
| } HUF_repeat; | ||
| /** HUF_compress4X_repeat() : | ||
| * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. | ||
| * If it uses hufTable it does not modify hufTable or repeat. | ||
| * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. | ||
| * If preferRepeat then the old table will always be used if valid. */ | ||
| size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ | ||
|
|
||
| /** HUF_buildCTable_wksp() : | ||
| * Same as HUF_buildCTable(), but using externally allocated scratch buffer. | ||
| * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. | ||
| */ | ||
| size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); | ||
|
|
||
| /*! HUF_readStats() : | ||
| Read compact Huffman tree, saved by HUF_writeCTable(). | ||
| `huffWeight` is destination buffer. | ||
| @return : size read from `src` , or an error Code . | ||
| Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ | ||
| size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, | ||
| U32* nbSymbolsPtr, U32* tableLogPtr, | ||
| const void* src, size_t srcSize); | ||
|
|
||
| /** HUF_readCTable() : | ||
| * Loading a CTable saved with HUF_writeCTable() */ | ||
| size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize); | ||
|
|
||
|
|
||
| /* | ||
| HUF_decompress() does the following: | ||
| 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics | ||
| 2. build Huffman table from save, using HUF_readDTableXn() | ||
| 3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable | ||
| */ | ||
|
|
||
| /** HUF_selectDecoder() : | ||
| * Tells which decoder is likely to decode faster, | ||
| * based on a set of pre-determined metrics. | ||
| * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . | ||
| * Assumption : 0 < cSrcSize < dstSize <= 128 KB */ | ||
| U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); | ||
|
|
||
| size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); | ||
| size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize); | ||
|
|
||
| size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); | ||
| size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); | ||
| size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); | ||
|
|
||
|
|
||
| /* single stream variants */ | ||
|
|
||
| size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); | ||
| size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ | ||
| size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); | ||
| /** HUF_compress1X_repeat() : | ||
| * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. | ||
| * If it uses hufTable it does not modify hufTable or repeat. | ||
| * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. | ||
| * If preferRepeat then the old table will always be used if valid. */ | ||
| size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ | ||
|
|
||
| size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ | ||
| size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ | ||
|
|
||
| size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); | ||
| size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ | ||
| size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ | ||
|
|
||
| size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ | ||
| size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); | ||
| size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); | ||
|
|
||
| #endif /* HUF_STATIC_LINKING_ONLY */ | ||
|
|
||
|
|
||
| #if defined (__cplusplus) | ||
| } | ||
| #endif | ||
|
|
||
| #endif /* HUF_H_298734234 */ |
| @@ -0,0 +1,374 @@ | ||
| /** | ||
| * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. | ||
| * All rights reserved. | ||
| * | ||
| * This source code is licensed under the BSD-style license found in the | ||
| * LICENSE file in the root directory of this source tree. An additional grant | ||
| * of patent rights can be found in the PATENTS file in the same directory. | ||
| */ | ||
|
|
||
| #ifndef MEM_H_MODULE | ||
| #define MEM_H_MODULE | ||
|
|
||
| #if defined (__cplusplus) | ||
| extern "C" { | ||
| #endif | ||
|
|
||
| /*-**************************************** | ||
| * Dependencies | ||
| ******************************************/ | ||
| #include <stddef.h> /* size_t, ptrdiff_t */ | ||
| #include <string.h> /* memcpy */ | ||
|
|
||
|
|
||
| /*-**************************************** | ||
| * Compiler specifics | ||
| ******************************************/ | ||
| #if defined(_MSC_VER) /* Visual Studio */ | ||
| # include <stdlib.h> /* _byteswap_ulong */ | ||
| # include <intrin.h> /* _byteswap_* */ | ||
| #endif | ||
| #if defined(__GNUC__) | ||
| # define MEM_STATIC static __inline __attribute__((unused)) | ||
| #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) | ||
| # define MEM_STATIC static inline | ||
| #elif defined(_MSC_VER) | ||
| # define MEM_STATIC static __inline | ||
| #else | ||
| # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ | ||
| #endif | ||
|
|
||
| /* code only tested on 32 and 64 bits systems */ | ||
| #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } | ||
| MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } | ||
|
|
||
|
|
||
| /*-************************************************************** | ||
| * Basic Types | ||
| *****************************************************************/ | ||
| #if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) | ||
| # include <stdint.h> | ||
| typedef uint8_t BYTE; | ||
| typedef uint16_t U16; | ||
| typedef int16_t S16; | ||
| typedef uint32_t U32; | ||
| typedef int32_t S32; | ||
| typedef uint64_t U64; | ||
| typedef int64_t S64; | ||
| typedef intptr_t iPtrDiff; | ||
| typedef uintptr_t uPtrDiff; | ||
| #else | ||
| typedef unsigned char BYTE; | ||
| typedef unsigned short U16; | ||
| typedef signed short S16; | ||
| typedef unsigned int U32; | ||
| typedef signed int S32; | ||
| typedef unsigned long long U64; | ||
| typedef signed long long S64; | ||
| typedef ptrdiff_t iPtrDiff; | ||
| typedef size_t uPtrDiff; | ||
| #endif | ||
|
|
||
|
|
||
| /*-************************************************************** | ||
| * Memory I/O | ||
| *****************************************************************/ | ||
| /* MEM_FORCE_MEMORY_ACCESS : | ||
| * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. | ||
| * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. | ||
| * The below switch allow to select different access method for improved performance. | ||
| * Method 0 (default) : use `memcpy()`. Safe and portable. | ||
| * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). | ||
| * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. | ||
| * Method 2 : direct access. This method is portable but violate C standard. | ||
| * It can generate buggy code on targets depending on alignment. | ||
| * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) | ||
| * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. | ||
| * Prefer these methods in priority order (0 > 1 > 2) | ||
| */ | ||
| #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ | ||
| # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) | ||
| # define MEM_FORCE_MEMORY_ACCESS 2 | ||
| # elif defined(__INTEL_COMPILER) /*|| defined(_MSC_VER)*/ || \ | ||
| (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) | ||
| # define MEM_FORCE_MEMORY_ACCESS 1 | ||
| # endif | ||
| #endif | ||
|
|
||
| MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } | ||
| MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } | ||
|
|
||
| MEM_STATIC unsigned MEM_isLittleEndian(void) | ||
| { | ||
| const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ | ||
| return one.c[0]; | ||
| } | ||
|
|
||
| #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) | ||
|
|
||
| /* violates C standard, by lying on structure alignment. | ||
| Only use if no other choice to achieve best performance on target platform */ | ||
| MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } | ||
| MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } | ||
| MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } | ||
| MEM_STATIC U64 MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } | ||
|
|
||
| MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } | ||
| MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } | ||
| MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } | ||
|
|
||
| #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) | ||
|
|
||
| /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ | ||
| /* currently only defined for gcc and icc */ | ||
| #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) | ||
| __pragma( pack(push, 1) ) | ||
| typedef union { U16 u16; U32 u32; U64 u64; size_t st; } unalign; | ||
| __pragma( pack(pop) ) | ||
| #else | ||
| typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign; | ||
| #endif | ||
|
|
||
| MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } | ||
| MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } | ||
| MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } | ||
| MEM_STATIC U64 MEM_readST(const void* ptr) { return ((const unalign*)ptr)->st; } | ||
|
|
||
| MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } | ||
| MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } | ||
| MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; } | ||
|
|
||
| #else | ||
|
|
||
| /* default method, safe and standard. | ||
| can sometimes prove slower */ | ||
|
|
||
| MEM_STATIC U16 MEM_read16(const void* memPtr) | ||
| { | ||
| U16 val; memcpy(&val, memPtr, sizeof(val)); return val; | ||
| } | ||
|
|
||
| MEM_STATIC U32 MEM_read32(const void* memPtr) | ||
| { | ||
| U32 val; memcpy(&val, memPtr, sizeof(val)); return val; | ||
| } | ||
|
|
||
| MEM_STATIC U64 MEM_read64(const void* memPtr) | ||
| { | ||
| U64 val; memcpy(&val, memPtr, sizeof(val)); return val; | ||
| } | ||
|
|
||
| MEM_STATIC size_t MEM_readST(const void* memPtr) | ||
| { | ||
| size_t val; memcpy(&val, memPtr, sizeof(val)); return val; | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_write16(void* memPtr, U16 value) | ||
| { | ||
| memcpy(memPtr, &value, sizeof(value)); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_write32(void* memPtr, U32 value) | ||
| { | ||
| memcpy(memPtr, &value, sizeof(value)); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_write64(void* memPtr, U64 value) | ||
| { | ||
| memcpy(memPtr, &value, sizeof(value)); | ||
| } | ||
|
|
||
| #endif /* MEM_FORCE_MEMORY_ACCESS */ | ||
|
|
||
| MEM_STATIC U32 MEM_swap32(U32 in) | ||
| { | ||
| #if defined(_MSC_VER) /* Visual Studio */ | ||
| return _byteswap_ulong(in); | ||
| #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) | ||
| return __builtin_bswap32(in); | ||
| #else | ||
| return ((in << 24) & 0xff000000 ) | | ||
| ((in << 8) & 0x00ff0000 ) | | ||
| ((in >> 8) & 0x0000ff00 ) | | ||
| ((in >> 24) & 0x000000ff ); | ||
| #endif | ||
| } | ||
|
|
||
| MEM_STATIC U64 MEM_swap64(U64 in) | ||
| { | ||
| #if defined(_MSC_VER) /* Visual Studio */ | ||
| return _byteswap_uint64(in); | ||
| #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) | ||
| return __builtin_bswap64(in); | ||
| #else | ||
| return ((in << 56) & 0xff00000000000000ULL) | | ||
| ((in << 40) & 0x00ff000000000000ULL) | | ||
| ((in << 24) & 0x0000ff0000000000ULL) | | ||
| ((in << 8) & 0x000000ff00000000ULL) | | ||
| ((in >> 8) & 0x00000000ff000000ULL) | | ||
| ((in >> 24) & 0x0000000000ff0000ULL) | | ||
| ((in >> 40) & 0x000000000000ff00ULL) | | ||
| ((in >> 56) & 0x00000000000000ffULL); | ||
| #endif | ||
| } | ||
|
|
||
| MEM_STATIC size_t MEM_swapST(size_t in) | ||
| { | ||
| if (MEM_32bits()) | ||
| return (size_t)MEM_swap32((U32)in); | ||
| else | ||
| return (size_t)MEM_swap64((U64)in); | ||
| } | ||
|
|
||
| /*=== Little endian r/w ===*/ | ||
|
|
||
| MEM_STATIC U16 MEM_readLE16(const void* memPtr) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| return MEM_read16(memPtr); | ||
| else { | ||
| const BYTE* p = (const BYTE*)memPtr; | ||
| return (U16)(p[0] + (p[1]<<8)); | ||
| } | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) | ||
| { | ||
| if (MEM_isLittleEndian()) { | ||
| MEM_write16(memPtr, val); | ||
| } else { | ||
| BYTE* p = (BYTE*)memPtr; | ||
| p[0] = (BYTE)val; | ||
| p[1] = (BYTE)(val>>8); | ||
| } | ||
| } | ||
|
|
||
| MEM_STATIC U32 MEM_readLE24(const void* memPtr) | ||
| { | ||
| return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) | ||
| { | ||
| MEM_writeLE16(memPtr, (U16)val); | ||
| ((BYTE*)memPtr)[2] = (BYTE)(val>>16); | ||
| } | ||
|
|
||
| MEM_STATIC U32 MEM_readLE32(const void* memPtr) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| return MEM_read32(memPtr); | ||
| else | ||
| return MEM_swap32(MEM_read32(memPtr)); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| MEM_write32(memPtr, val32); | ||
| else | ||
| MEM_write32(memPtr, MEM_swap32(val32)); | ||
| } | ||
|
|
||
| MEM_STATIC U64 MEM_readLE64(const void* memPtr) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| return MEM_read64(memPtr); | ||
| else | ||
| return MEM_swap64(MEM_read64(memPtr)); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| MEM_write64(memPtr, val64); | ||
| else | ||
| MEM_write64(memPtr, MEM_swap64(val64)); | ||
| } | ||
|
|
||
| MEM_STATIC size_t MEM_readLEST(const void* memPtr) | ||
| { | ||
| if (MEM_32bits()) | ||
| return (size_t)MEM_readLE32(memPtr); | ||
| else | ||
| return (size_t)MEM_readLE64(memPtr); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) | ||
| { | ||
| if (MEM_32bits()) | ||
| MEM_writeLE32(memPtr, (U32)val); | ||
| else | ||
| MEM_writeLE64(memPtr, (U64)val); | ||
| } | ||
|
|
||
| /*=== Big endian r/w ===*/ | ||
|
|
||
| MEM_STATIC U32 MEM_readBE32(const void* memPtr) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| return MEM_swap32(MEM_read32(memPtr)); | ||
| else | ||
| return MEM_read32(memPtr); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| MEM_write32(memPtr, MEM_swap32(val32)); | ||
| else | ||
| MEM_write32(memPtr, val32); | ||
| } | ||
|
|
||
| MEM_STATIC U64 MEM_readBE64(const void* memPtr) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| return MEM_swap64(MEM_read64(memPtr)); | ||
| else | ||
| return MEM_read64(memPtr); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) | ||
| { | ||
| if (MEM_isLittleEndian()) | ||
| MEM_write64(memPtr, MEM_swap64(val64)); | ||
| else | ||
| MEM_write64(memPtr, val64); | ||
| } | ||
|
|
||
| MEM_STATIC size_t MEM_readBEST(const void* memPtr) | ||
| { | ||
| if (MEM_32bits()) | ||
| return (size_t)MEM_readBE32(memPtr); | ||
| else | ||
| return (size_t)MEM_readBE64(memPtr); | ||
| } | ||
|
|
||
| MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) | ||
| { | ||
| if (MEM_32bits()) | ||
| MEM_writeBE32(memPtr, (U32)val); | ||
| else | ||
| MEM_writeBE64(memPtr, (U64)val); | ||
| } | ||
|
|
||
|
|
||
| /* function safe only for comparisons */ | ||
| MEM_STATIC U32 MEM_readMINMATCH(const void* memPtr, U32 length) | ||
| { | ||
| switch (length) | ||
| { | ||
| default : | ||
| case 4 : return MEM_read32(memPtr); | ||
| case 3 : if (MEM_isLittleEndian()) | ||
| return MEM_read32(memPtr)<<8; | ||
| else | ||
| return MEM_read32(memPtr)>>8; | ||
| } | ||
| } | ||
|
|
||
| #if defined (__cplusplus) | ||
| } | ||
| #endif | ||
|
|
||
| #endif /* MEM_H_MODULE */ |
| @@ -0,0 +1,305 @@ | ||
| /* | ||
| xxHash - Extremely Fast Hash algorithm | ||
| Header File | ||
| Copyright (C) 2012-2016, Yann Collet. | ||
| BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) | ||
| Redistribution and use in source and binary forms, with or without | ||
| modification, are permitted provided that the following conditions are | ||
| met: | ||
| * Redistributions of source code must retain the above copyright | ||
| notice, this list of conditions and the following disclaimer. | ||
| * Redistributions in binary form must reproduce the above | ||
| copyright notice, this list of conditions and the following disclaimer | ||
| in the documentation and/or other materials provided with the | ||
| distribution. | ||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| You can contact the author at : | ||
| - xxHash source repository : https://github.com/Cyan4973/xxHash | ||
| */ | ||
|
|
||
| /* Notice extracted from xxHash homepage : | ||
| xxHash is an extremely fast Hash algorithm, running at RAM speed limits. | ||
| It also successfully passes all tests from the SMHasher suite. | ||
| Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) | ||
| Name Speed Q.Score Author | ||
| xxHash 5.4 GB/s 10 | ||
| CrapWow 3.2 GB/s 2 Andrew | ||
| MumurHash 3a 2.7 GB/s 10 Austin Appleby | ||
| SpookyHash 2.0 GB/s 10 Bob Jenkins | ||
| SBox 1.4 GB/s 9 Bret Mulvey | ||
| Lookup3 1.2 GB/s 9 Bob Jenkins | ||
| SuperFastHash 1.2 GB/s 1 Paul Hsieh | ||
| CityHash64 1.05 GB/s 10 Pike & Alakuijala | ||
| FNV 0.55 GB/s 5 Fowler, Noll, Vo | ||
| CRC32 0.43 GB/s 9 | ||
| MD5-32 0.33 GB/s 10 Ronald L. Rivest | ||
| SHA1-32 0.28 GB/s 10 | ||
| Q.Score is a measure of quality of the hash function. | ||
| It depends on successfully passing SMHasher test set. | ||
| 10 is a perfect score. | ||
| A 64-bits version, named XXH64, is available since r35. | ||
| It offers much better speed, but for 64-bits applications only. | ||
| Name Speed on 64 bits Speed on 32 bits | ||
| XXH64 13.8 GB/s 1.9 GB/s | ||
| XXH32 6.8 GB/s 6.0 GB/s | ||
| */ | ||
|
|
||
| #if defined (__cplusplus) | ||
| extern "C" { | ||
| #endif | ||
|
|
||
| #ifndef XXHASH_H_5627135585666179 | ||
| #define XXHASH_H_5627135585666179 1 | ||
|
|
||
|
|
||
| /* **************************** | ||
| * Definitions | ||
| ******************************/ | ||
| #include <stddef.h> /* size_t */ | ||
| typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; | ||
|
|
||
|
|
||
| /* **************************** | ||
| * API modifier | ||
| ******************************/ | ||
| /** XXH_PRIVATE_API | ||
| * This is useful if you want to include xxhash functions in `static` mode | ||
| * in order to inline them, and remove their symbol from the public list. | ||
| * Methodology : | ||
| * #define XXH_PRIVATE_API | ||
| * #include "xxhash.h" | ||
| * `xxhash.c` is automatically included. | ||
| * It's not useful to compile and link it as a separate module anymore. | ||
| */ | ||
| #ifdef XXH_PRIVATE_API | ||
| # ifndef XXH_STATIC_LINKING_ONLY | ||
| # define XXH_STATIC_LINKING_ONLY | ||
| # endif | ||
| # if defined(__GNUC__) | ||
| # define XXH_PUBLIC_API static __inline __attribute__((unused)) | ||
| # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) | ||
| # define XXH_PUBLIC_API static inline | ||
| # elif defined(_MSC_VER) | ||
| # define XXH_PUBLIC_API static __inline | ||
| # else | ||
| # define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ | ||
| # endif | ||
| #else | ||
| # define XXH_PUBLIC_API /* do nothing */ | ||
| #endif /* XXH_PRIVATE_API */ | ||
|
|
||
| /*!XXH_NAMESPACE, aka Namespace Emulation : | ||
| If you want to include _and expose_ xxHash functions from within your own library, | ||
| but also want to avoid symbol collisions with another library which also includes xxHash, | ||
| you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library | ||
| with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). | ||
| Note that no change is required within the calling program as long as it includes `xxhash.h` : | ||
| regular symbol name will be automatically translated by this header. | ||
| */ | ||
| #ifdef XXH_NAMESPACE | ||
| # define XXH_CAT(A,B) A##B | ||
| # define XXH_NAME2(A,B) XXH_CAT(A,B) | ||
| # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) | ||
| # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) | ||
| # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) | ||
| # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) | ||
| # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) | ||
| # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) | ||
| # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) | ||
| # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) | ||
| # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) | ||
| # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) | ||
| # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) | ||
| # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) | ||
| # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) | ||
| # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) | ||
| # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) | ||
| # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) | ||
| # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) | ||
| # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) | ||
| # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) | ||
| #endif | ||
|
|
||
|
|
||
| /* ************************************* | ||
| * Version | ||
| ***************************************/ | ||
| #define XXH_VERSION_MAJOR 0 | ||
| #define XXH_VERSION_MINOR 6 | ||
| #define XXH_VERSION_RELEASE 2 | ||
| #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) | ||
| XXH_PUBLIC_API unsigned XXH_versionNumber (void); | ||
|
|
||
|
|
||
| /* **************************** | ||
| * Simple Hash Functions | ||
| ******************************/ | ||
| typedef unsigned int XXH32_hash_t; | ||
| typedef unsigned long long XXH64_hash_t; | ||
|
|
||
| XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); | ||
| XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); | ||
|
|
||
| /*! | ||
| XXH32() : | ||
| Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". | ||
| The memory between input & input+length must be valid (allocated and read-accessible). | ||
| "seed" can be used to alter the result predictably. | ||
| Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s | ||
| XXH64() : | ||
| Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". | ||
| "seed" can be used to alter the result predictably. | ||
| This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). | ||
| */ | ||
|
|
||
|
|
||
| /* **************************** | ||
| * Streaming Hash Functions | ||
| ******************************/ | ||
| typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ | ||
| typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ | ||
|
|
||
| /*! State allocation, compatible with dynamic libraries */ | ||
|
|
||
| XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); | ||
| XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); | ||
|
|
||
| XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); | ||
| XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); | ||
|
|
||
|
|
||
| /* hash streaming */ | ||
|
|
||
| XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); | ||
| XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); | ||
| XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); | ||
|
|
||
| XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); | ||
| XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); | ||
| XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); | ||
|
|
||
| /* | ||
| These functions generate the xxHash of an input provided in multiple segments. | ||
| Note that, for small input, they are slower than single-call functions, due to state management. | ||
| For small input, prefer `XXH32()` and `XXH64()` . | ||
| XXH state must first be allocated, using XXH*_createState() . | ||
| Start a new hash by initializing state with a seed, using XXH*_reset(). | ||
| Then, feed the hash state by calling XXH*_update() as many times as necessary. | ||
| Obviously, input must be allocated and read accessible. | ||
| The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. | ||
| Finally, a hash value can be produced anytime, by using XXH*_digest(). | ||
| This function returns the nn-bits hash as an int or long long. | ||
| It's still possible to continue inserting input into the hash state after a digest, | ||
| and generate some new hashes later on, by calling again XXH*_digest(). | ||
| When done, free XXH state space if it was allocated dynamically. | ||
| */ | ||
|
|
||
|
|
||
| /* ************************** | ||
| * Utils | ||
| ****************************/ | ||
| #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ | ||
| # define restrict /* disable restrict */ | ||
| #endif | ||
|
|
||
| XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); | ||
| XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); | ||
|
|
||
|
|
||
| /* ************************** | ||
| * Canonical representation | ||
| ****************************/ | ||
| /* Default result type for XXH functions are primitive unsigned 32 and 64 bits. | ||
| * The canonical representation uses human-readable write convention, aka big-endian (large digits first). | ||
| * These functions allow transformation of hash result into and from its canonical format. | ||
| * This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. | ||
| */ | ||
| typedef struct { unsigned char digest[4]; } XXH32_canonical_t; | ||
| typedef struct { unsigned char digest[8]; } XXH64_canonical_t; | ||
|
|
||
| XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); | ||
| XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); | ||
|
|
||
| XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); | ||
| XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); | ||
|
|
||
| #endif /* XXHASH_H_5627135585666179 */ | ||
|
|
||
|
|
||
|
|
||
| /* ================================================================================================ | ||
| This section contains definitions which are not guaranteed to remain stable. | ||
| They may change in future versions, becoming incompatible with a different version of the library. | ||
| They shall only be used with static linking. | ||
| Never use these definitions in association with dynamic linking ! | ||
| =================================================================================================== */ | ||
| #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) | ||
| #define XXH_STATIC_H_3543687687345 | ||
|
|
||
| /* These definitions are only meant to allow allocation of XXH state | ||
| statically, on stack, or in a struct for example. | ||
| Do not use members directly. */ | ||
|
|
||
| struct XXH32_state_s { | ||
| unsigned total_len_32; | ||
| unsigned large_len; | ||
| unsigned v1; | ||
| unsigned v2; | ||
| unsigned v3; | ||
| unsigned v4; | ||
| unsigned mem32[4]; /* buffer defined as U32 for alignment */ | ||
| unsigned memsize; | ||
| unsigned reserved; /* never read nor write, will be removed in a future version */ | ||
| }; /* typedef'd to XXH32_state_t */ | ||
|
|
||
| struct XXH64_state_s { | ||
| unsigned long long total_len; | ||
| unsigned long long v1; | ||
| unsigned long long v2; | ||
| unsigned long long v3; | ||
| unsigned long long v4; | ||
| unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ | ||
| unsigned memsize; | ||
| unsigned reserved[2]; /* never read nor write, will be removed in a future version */ | ||
| }; /* typedef'd to XXH64_state_t */ | ||
|
|
||
|
|
||
| # ifdef XXH_PRIVATE_API | ||
| # include "xxhash.c" /* include xxhash functions as `static`, for inlining */ | ||
| # endif | ||
|
|
||
| #endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ | ||
|
|
||
|
|
||
| #if defined (__cplusplus) | ||
| } | ||
| #endif |
| @@ -0,0 +1,73 @@ | ||
| /** | ||
| * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. | ||
| * All rights reserved. | ||
| * | ||
| * This source code is licensed under the BSD-style license found in the | ||
| * LICENSE file in the root directory of this source tree. An additional grant | ||
| * of patent rights can be found in the PATENTS file in the same directory. | ||
| */ | ||
|
|
||
|
|
||
|
|
||
| /*-************************************* | ||
| * Dependencies | ||
| ***************************************/ | ||
| #include <stdlib.h> /* malloc */ | ||
| #include "error_private.h" | ||
| #define ZSTD_STATIC_LINKING_ONLY | ||
| #include "zstd.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */ | ||
|
|
||
|
|
||
| /*-**************************************** | ||
| * Version | ||
| ******************************************/ | ||
| unsigned ZSTD_versionNumber (void) { return ZSTD_VERSION_NUMBER; } | ||
|
|
||
|
|
||
| /*-**************************************** | ||
| * ZSTD Error Management | ||
| ******************************************/ | ||
| /*! ZSTD_isError() : | ||
| * tells if a return value is an error code */ | ||
| unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } | ||
|
|
||
| /*! ZSTD_getErrorName() : | ||
| * provides error code string from function result (useful for debugging) */ | ||
| const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } | ||
|
|
||
| /*! ZSTD_getError() : | ||
| * convert a `size_t` function result into a proper ZSTD_errorCode enum */ | ||
| ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } | ||
|
|
||
| /*! ZSTD_getErrorString() : | ||
| * provides error code string from enum */ | ||
| const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } | ||
|
|
||
|
|
||
| /*=************************************************************** | ||
| * Custom allocator | ||
| ****************************************************************/ | ||
| /* default uses stdlib */ | ||
| void* ZSTD_defaultAllocFunction(void* opaque, size_t size) | ||
| { | ||
| void* address = malloc(size); | ||
| (void)opaque; | ||
| return address; | ||
| } | ||
|
|
||
| void ZSTD_defaultFreeFunction(void* opaque, void* address) | ||
| { | ||
| (void)opaque; | ||
| free(address); | ||
| } | ||
|
|
||
| void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) | ||
| { | ||
| return customMem.customAlloc(customMem.opaque, size); | ||
| } | ||
|
|
||
| void ZSTD_free(void* ptr, ZSTD_customMem customMem) | ||
| { | ||
| if (ptr!=NULL) | ||
| customMem.customFree(customMem.opaque, ptr); | ||
| } |
| @@ -0,0 +1,75 @@ | ||
| /** | ||
| * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. | ||
| * All rights reserved. | ||
| * | ||
| * This source code is licensed under the BSD-style license found in the | ||
| * LICENSE file in the root directory of this source tree. An additional grant | ||
| * of patent rights can be found in the PATENTS file in the same directory. | ||
| */ | ||
|
|
||
| #ifndef ZSTD_ERRORS_H_398273423 | ||
| #define ZSTD_ERRORS_H_398273423 | ||
|
|
||
| #if defined (__cplusplus) | ||
| extern "C" { | ||
| #endif | ||
|
|
||
| /*===== dependency =====*/ | ||
| #include <stddef.h> /* size_t */ | ||
|
|
||
|
|
||
| /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ | ||
| #if defined(__GNUC__) && (__GNUC__ >= 4) | ||
| # define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) | ||
| #else | ||
| # define ZSTDERRORLIB_VISIBILITY | ||
| #endif | ||
| #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) | ||
| # define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY | ||
| #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) | ||
| # define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ | ||
| #else | ||
| # define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY | ||
| #endif | ||
|
|
||
| /*-**************************************** | ||
| * error codes list | ||
| ******************************************/ | ||
| typedef enum { | ||
| ZSTD_error_no_error, | ||
| ZSTD_error_GENERIC, | ||
| ZSTD_error_prefix_unknown, | ||
| ZSTD_error_version_unsupported, | ||
| ZSTD_error_parameter_unknown, | ||
| ZSTD_error_frameParameter_unsupported, | ||
| ZSTD_error_frameParameter_unsupportedBy32bits, | ||
| ZSTD_error_frameParameter_windowTooLarge, | ||
| ZSTD_error_compressionParameter_unsupported, | ||
| ZSTD_error_init_missing, | ||
| ZSTD_error_memory_allocation, | ||
| ZSTD_error_stage_wrong, | ||
| ZSTD_error_dstSize_tooSmall, | ||
| ZSTD_error_srcSize_wrong, | ||
| ZSTD_error_corruption_detected, | ||
| ZSTD_error_checksum_wrong, | ||
| ZSTD_error_tableLog_tooLarge, | ||
| ZSTD_error_maxSymbolValue_tooLarge, | ||
| ZSTD_error_maxSymbolValue_tooSmall, | ||
| ZSTD_error_dictionary_corrupted, | ||
| ZSTD_error_dictionary_wrong, | ||
| ZSTD_error_dictionaryCreation_failed, | ||
| ZSTD_error_maxCode | ||
| } ZSTD_ErrorCode; | ||
|
|
||
| /*! ZSTD_getErrorCode() : | ||
| convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, | ||
| which can be used to compare directly with enum list published into "error_public.h" */ | ||
| ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); | ||
| ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); | ||
|
|
||
|
|
||
| #if defined (__cplusplus) | ||
| } | ||
| #endif | ||
|
|
||
| #endif /* ZSTD_ERRORS_H_398273423 */ |
| @@ -0,0 +1,283 @@ | ||
| /** | ||
| * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. | ||
| * All rights reserved. | ||
| * | ||
| * This source code is licensed under the BSD-style license found in the | ||
| * LICENSE file in the root directory of this source tree. An additional grant | ||
| * of patent rights can be found in the PATENTS file in the same directory. | ||
| */ | ||
|
|
||
| #ifndef ZSTD_CCOMMON_H_MODULE | ||
| #define ZSTD_CCOMMON_H_MODULE | ||
|
|
||
| /*-******************************************************* | ||
| * Compiler specifics | ||
| *********************************************************/ | ||
| #ifdef _MSC_VER /* Visual Studio */ | ||
| # define FORCE_INLINE static __forceinline | ||
| # include <intrin.h> /* For Visual 2005 */ | ||
| # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ | ||
| # pragma warning(disable : 4324) /* disable: C4324: padded structure */ | ||
| # pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ | ||
| #else | ||
| # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ | ||
| # ifdef __GNUC__ | ||
| # define FORCE_INLINE static inline __attribute__((always_inline)) | ||
| # else | ||
| # define FORCE_INLINE static inline | ||
| # endif | ||
| # else | ||
| # define FORCE_INLINE static | ||
| # endif /* __STDC_VERSION__ */ | ||
| #endif | ||
|
|
||
| #ifdef _MSC_VER | ||
| # define FORCE_NOINLINE static __declspec(noinline) | ||
| #else | ||
| # ifdef __GNUC__ | ||
| # define FORCE_NOINLINE static __attribute__((__noinline__)) | ||
| # else | ||
| # define FORCE_NOINLINE static | ||
| # endif | ||
| #endif | ||
|
|
||
|
|
||
| /*-************************************* | ||
| * Dependencies | ||
| ***************************************/ | ||
| #include "mem.h" | ||
| #include "error_private.h" | ||
| #define ZSTD_STATIC_LINKING_ONLY | ||
| #include "zstd.h" | ||
| #ifndef XXH_STATIC_LINKING_ONLY | ||
| # define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ | ||
| #endif | ||
| #include "xxhash.h" /* XXH_reset, update, digest */ | ||
|
|
||
|
|
||
| /*-************************************* | ||
| * shared macros | ||
| ***************************************/ | ||
| #define MIN(a,b) ((a)<(b) ? (a) : (b)) | ||
| #define MAX(a,b) ((a)>(b) ? (a) : (b)) | ||
| #define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */ | ||
| #define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */ | ||
|
|
||
|
|
||
| /*-************************************* | ||
| * Common constants | ||
| ***************************************/ | ||
| #define ZSTD_OPT_NUM (1<<12) | ||
| #define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */ | ||
|
|
||
| #define ZSTD_REP_NUM 3 /* number of repcodes */ | ||
| #define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ | ||
| #define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) | ||
| #define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) | ||
| static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; | ||
|
|
||
| #define KB *(1 <<10) | ||
| #define MB *(1 <<20) | ||
| #define GB *(1U<<30) | ||
|
|
||
| #define BIT7 128 | ||
| #define BIT6 64 | ||
| #define BIT5 32 | ||
| #define BIT4 16 | ||
| #define BIT1 2 | ||
| #define BIT0 1 | ||
|
|
||
| #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 | ||
| static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; | ||
| static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; | ||
|
|
||
| #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ | ||
| static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; | ||
| typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; | ||
|
|
||
| #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ | ||
| #define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ | ||
|
|
||
| #define HufLog 12 | ||
| typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; | ||
|
|
||
| #define LONGNBSEQ 0x7F00 | ||
|
|
||
| #define MINMATCH 3 | ||
| #define EQUAL_READ32 4 | ||
|
|
||
| #define Litbits 8 | ||
| #define MaxLit ((1<<Litbits) - 1) | ||
| #define MaxML 52 | ||
| #define MaxLL 35 | ||
| #define MaxOff 28 | ||
| #define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ | ||
| #define MLFSELog 9 | ||
| #define LLFSELog 9 | ||
| #define OffFSELog 8 | ||
|
|
||
| static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12, | ||
| 13,14,15,16 }; | ||
| static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, | ||
| 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, | ||
| -1,-1,-1,-1 }; | ||
| #define LL_DEFAULTNORMLOG 6 /* for static allocation */ | ||
| static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; | ||
|
|
||
| static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
| 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11, | ||
| 12,13,14,15,16 }; | ||
| static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, | ||
| 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | ||
| 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1, | ||
| -1,-1,-1,-1,-1 }; | ||
| #define ML_DEFAULTNORMLOG 6 /* for static allocation */ | ||
| static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG; | ||
|
|
||
| static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, | ||
| 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 }; | ||
| #define OF_DEFAULTNORMLOG 5 /* for static allocation */ | ||
| static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; | ||
|
|
||
|
|
||
| /*-******************************************* | ||
| * Shared functions to include for inlining | ||
| *********************************************/ | ||
| static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); } | ||
| #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; } | ||
|
|
||
| /*! ZSTD_wildcopy() : | ||
| * custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */ | ||
| #define WILDCOPY_OVERLENGTH 8 | ||
| MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length) | ||
| { | ||
| const BYTE* ip = (const BYTE*)src; | ||
| BYTE* op = (BYTE*)dst; | ||
| BYTE* const oend = op + length; | ||
| do | ||
| COPY8(op, ip) | ||
| while (op < oend); | ||
| } | ||
|
|
||
| MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */ | ||
| { | ||
| const BYTE* ip = (const BYTE*)src; | ||
| BYTE* op = (BYTE*)dst; | ||
| BYTE* const oend = (BYTE*)dstEnd; | ||
| do | ||
| COPY8(op, ip) | ||
| while (op < oend); | ||
| } | ||
|
|
||
|
|
||
| /*-******************************************* | ||
| * Private interfaces | ||
| *********************************************/ | ||
| typedef struct ZSTD_stats_s ZSTD_stats_t; | ||
|
|
||
| typedef struct { | ||
| U32 off; | ||
| U32 len; | ||
| } ZSTD_match_t; | ||
|
|
||
| typedef struct { | ||
| U32 price; | ||
| U32 off; | ||
| U32 mlen; | ||
| U32 litlen; | ||
| U32 rep[ZSTD_REP_NUM]; | ||
| } ZSTD_optimal_t; | ||
|
|
||
|
|
||
| typedef struct seqDef_s { | ||
| U32 offset; | ||
| U16 litLength; | ||
| U16 matchLength; | ||
| } seqDef; | ||
|
|
||
|
|
||
| typedef struct { | ||
| seqDef* sequencesStart; | ||
| seqDef* sequences; | ||
| BYTE* litStart; | ||
| BYTE* lit; | ||
| BYTE* llCode; | ||
| BYTE* mlCode; | ||
| BYTE* ofCode; | ||
| U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ | ||
| U32 longLengthPos; | ||
| /* opt */ | ||
| ZSTD_optimal_t* priceTable; | ||
| ZSTD_match_t* matchTable; | ||
| U32* matchLengthFreq; | ||
| U32* litLengthFreq; | ||
| U32* litFreq; | ||
| U32* offCodeFreq; | ||
| U32 matchLengthSum; | ||
| U32 matchSum; | ||
| U32 litLengthSum; | ||
| U32 litSum; | ||
| U32 offCodeSum; | ||
| U32 log2matchLengthSum; | ||
| U32 log2matchSum; | ||
| U32 log2litLengthSum; | ||
| U32 log2litSum; | ||
| U32 log2offCodeSum; | ||
| U32 factor; | ||
| U32 staticPrices; | ||
| U32 cachedPrice; | ||
| U32 cachedLitLength; | ||
| const BYTE* cachedLiterals; | ||
| } seqStore_t; | ||
|
|
||
| const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); | ||
| void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); | ||
| int ZSTD_isSkipFrame(ZSTD_DCtx* dctx); | ||
|
|
||
| /* custom memory allocation functions */ | ||
| void* ZSTD_defaultAllocFunction(void* opaque, size_t size); | ||
| void ZSTD_defaultFreeFunction(void* opaque, void* address); | ||
| #ifndef ZSTD_DLL_IMPORT | ||
| static const ZSTD_customMem defaultCustomMem = { ZSTD_defaultAllocFunction, ZSTD_defaultFreeFunction, NULL }; | ||
| #endif | ||
| void* ZSTD_malloc(size_t size, ZSTD_customMem customMem); | ||
| void ZSTD_free(void* ptr, ZSTD_customMem customMem); | ||
|
|
||
|
|
||
| /*====== common function ======*/ | ||
|
|
||
| MEM_STATIC U32 ZSTD_highbit32(U32 val) | ||
| { | ||
| # if defined(_MSC_VER) /* Visual */ | ||
| unsigned long r=0; | ||
| _BitScanReverse(&r, val); | ||
| return (unsigned)r; | ||
| # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ | ||
| return 31 - __builtin_clz(val); | ||
| # else /* Software version */ | ||
| static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; | ||
| U32 v = val; | ||
| int r; | ||
| v |= v >> 1; | ||
| v |= v >> 2; | ||
| v |= v >> 4; | ||
| v |= v >> 8; | ||
| v |= v >> 16; | ||
| r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27]; | ||
| return r; | ||
| # endif | ||
| } | ||
|
|
||
|
|
||
| /* hidden functions */ | ||
|
|
||
| /* ZSTD_invalidateRepCodes() : | ||
| * ensures next compression will not use repcodes from previous block. | ||
| * Note : only works with regular variant; | ||
| * do not use with extDict variant ! */ | ||
| void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); | ||
|
|
||
|
|
||
| #endif /* ZSTD_CCOMMON_H_MODULE */ |