LCOV - code coverage report
Current view: top level - pression/compressor/zstd/lib/compress - zstd_compress.c (source / functions) Hit Total Coverage
Test: Pression Lines: 575 1658 34.7 %
Date: 2016-12-06 05:44:58 Functions: 37 114 32.5 %

          Line data    Source code
       1             : /**
       2             :  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
       3             :  * All rights reserved.
       4             :  *
       5             :  * This source code is licensed under the BSD-style license found in the
       6             :  * LICENSE file in the root directory of this source tree. An additional grant
       7             :  * of patent rights can be found in the PATENTS file in the same directory.
       8             :  */
       9             : 
      10             : 
      11             : /*-*************************************
      12             : *  Dependencies
      13             : ***************************************/
      14             : #include <string.h>         /* memset */
      15             : #include "mem.h"
      16             : #define XXH_STATIC_LINKING_ONLY   /* XXH64_state_t */
      17             : #include "xxhash.h"               /* XXH_reset, update, digest */
      18             : #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
      19             : #include "fse.h"
      20             : #define HUF_STATIC_LINKING_ONLY
      21             : #include "huf.h"
      22             : #include "zstd_internal.h"  /* includes zstd.h */
      23             : 
      24             : 
      25             : /*-*************************************
      26             : *  Constants
      27             : ***************************************/
      28             : static const U32 g_searchStrength = 8;   /* control skip over incompressible data */
      29             : #define HASH_READ_SIZE 8
      30             : typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
      31             : 
      32             : 
      33             : /*-*************************************
      34             : *  Helper functions
      35             : ***************************************/
      36          74 : size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
      37             : 
      38             : 
      39             : /*-*************************************
      40             : *  Sequence storage
      41             : ***************************************/
      42        3326 : static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
      43             : {
      44        3326 :     ssPtr->lit = ssPtr->litStart;
      45        3326 :     ssPtr->sequences = ssPtr->sequencesStart;
      46        3326 :     ssPtr->longLengthID = 0;
      47        3326 : }
      48             : 
      49             : 
      50             : /*-*************************************
      51             : *  Context memory management
      52             : ***************************************/
      53             : struct ZSTD_CCtx_s
      54             : {
      55             :     const BYTE* nextSrc;    /* next block here to continue on current prefix */
      56             :     const BYTE* base;       /* All regular indexes relative to this position */
      57             :     const BYTE* dictBase;   /* extDict indexes relative to this position */
      58             :     U32   dictLimit;        /* below that point, need extDict */
      59             :     U32   lowLimit;         /* below that point, no more data */
      60             :     U32   nextToUpdate;     /* index from which to continue dictionary update */
      61             :     U32   nextToUpdate3;    /* index from which to continue dictionary update */
      62             :     U32   hashLog3;         /* dispatch table : larger == faster, more memory */
      63             :     U32   loadedDictEnd;
      64             :     ZSTD_compressionStage_e stage;
      65             :     U32   rep[ZSTD_REP_NUM];
      66             :     U32   savedRep[ZSTD_REP_NUM];
      67             :     U32   dictID;
      68             :     ZSTD_parameters params;
      69             :     void* workSpace;
      70             :     size_t workSpaceSize;
      71             :     size_t blockSize;
      72             :     U64 frameContentSize;
      73             :     XXH64_state_t xxhState;
      74             :     ZSTD_customMem customMem;
      75             : 
      76             :     seqStore_t seqStore;    /* sequences storage ptrs */
      77             :     U32* hashTable;
      78             :     U32* hashTable3;
      79             :     U32* chainTable;
      80             :     HUF_CElt* hufTable;
      81             :     U32 flagStaticTables;
      82             :     FSE_CTable offcodeCTable  [FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
      83             :     FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
      84             :     FSE_CTable litlengthCTable  [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
      85             : };
      86             : 
      87           0 : ZSTD_CCtx* ZSTD_createCCtx(void)
      88             : {
      89           0 :     return ZSTD_createCCtx_advanced(defaultCustomMem);
      90             : }
      91             : 
      92           0 : ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
      93             : {
      94             :     ZSTD_CCtx* cctx;
      95             : 
      96           0 :     if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
      97           0 :     if (!customMem.customAlloc || !customMem.customFree) return NULL;
      98             : 
      99           0 :     cctx = (ZSTD_CCtx*) ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
     100           0 :     if (!cctx) return NULL;
     101           0 :     memset(cctx, 0, sizeof(ZSTD_CCtx));
     102           0 :     memcpy(&(cctx->customMem), &customMem, sizeof(customMem));
     103           0 :     return cctx;
     104             : }
     105             : 
     106           0 : size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
     107             : {
     108           0 :     if (cctx==NULL) return 0;   /* support free on NULL */
     109           0 :     ZSTD_free(cctx->workSpace, cctx->customMem);
     110           0 :     ZSTD_free(cctx, cctx->customMem);
     111           0 :     return 0;   /* reserved as a potential error code in the future */
     112             : }
     113             : 
     114           0 : size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
     115             : {
     116           0 :     if (cctx==NULL) return 0;   /* support sizeof on NULL */
     117           0 :     return sizeof(*cctx) + cctx->workSpaceSize;
     118             : }
     119             : 
     120           0 : const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx)   /* hidden interface */
     121             : {
     122           0 :     return &(ctx->seqStore);
     123             : }
     124             : 
     125             : 
     126             : /** ZSTD_checkParams() :
     127             :     ensure param values remain within authorized range.
     128             :     @return : 0, or an error code if one value is beyond authorized range */
     129           0 : size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
     130             : {
     131             : #   define CLAMPCHECK(val,min,max) { if ((val<min) | (val>max)) return ERROR(compressionParameter_unsupported); }
     132           0 :     CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
     133           0 :     CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
     134           0 :     CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
     135           0 :     CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
     136           0 :     { U32 const searchLengthMin = ((cParams.strategy == ZSTD_fast) | (cParams.strategy == ZSTD_greedy)) ? ZSTD_SEARCHLENGTH_MIN+1 : ZSTD_SEARCHLENGTH_MIN;
     137           0 :       U32 const searchLengthMax = (cParams.strategy == ZSTD_fast) ? ZSTD_SEARCHLENGTH_MAX : ZSTD_SEARCHLENGTH_MAX-1;
     138           0 :       CLAMPCHECK(cParams.searchLength, searchLengthMin, searchLengthMax); }
     139           0 :     CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
     140           0 :     if ((U32)(cParams.strategy) > (U32)ZSTD_btopt) return ERROR(compressionParameter_unsupported);
     141           0 :     return 0;
     142             : }
     143             : 
     144             : 
     145             : /** ZSTD_adjustCParams() :
     146             :     optimize `cPar` for a given input (`srcSize` and `dictSize`).
     147             :     mostly downsizing to reduce memory consumption and initialization.
     148             :     Both `srcSize` and `dictSize` are optional (use 0 if unknown),
     149             :     but if both are 0, no optimization can be done.
     150             :     Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
     151          74 : ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
     152             : {
     153          74 :     if (srcSize+dictSize == 0) return cPar;   /* no size information available : no adjustment */
     154             : 
     155             :     /* resize params, to use less memory when necessary */
     156          74 :     {   U32 const minSrcSize = (srcSize==0) ? 500 : 0;
     157          74 :         U64 const rSize = srcSize + dictSize + minSrcSize;
     158          74 :         if (rSize < ((U64)1<<ZSTD_WINDOWLOG_MAX)) {
     159          74 :             U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
     160          74 :             if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
     161             :     }   }
     162          74 :     if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
     163          74 :     {   U32 const btPlus = (cPar.strategy == ZSTD_btlazy2) | (cPar.strategy == ZSTD_btopt);
     164          74 :         U32 const maxChainLog = cPar.windowLog+btPlus;
     165          74 :         if (cPar.chainLog > maxChainLog) cPar.chainLog = maxChainLog; }   /* <= ZSTD_CHAINLOG_MAX */
     166             : 
     167          74 :     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
     168             : 
     169          74 :     return cPar;
     170             : }
     171             : 
     172             : 
     173           0 : size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams)
     174             : {
     175           0 :     size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
     176           0 :     U32    const divider = (cParams.searchLength==3) ? 3 : 4;
     177           0 :     size_t const maxNbSeq = blockSize / divider;
     178           0 :     size_t const tokenSpace = blockSize + 11*maxNbSeq;
     179             : 
     180           0 :     size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
     181           0 :     size_t const hSize = ((size_t)1) << cParams.hashLog;
     182           0 :     U32    const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
     183           0 :     size_t const h3Size = ((size_t)1) << hashLog3;
     184           0 :     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
     185             : 
     186           0 :     size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
     187             :                           + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
     188           0 :     size_t const neededSpace = tableSpace + (256*sizeof(U32)) /* huffTable */ + tokenSpace
     189           0 :                              + ((cParams.strategy == ZSTD_btopt) ? optSpace : 0);
     190             : 
     191           0 :     return sizeof(ZSTD_CCtx) + neededSpace;
     192             : }
     193             : 
     194             : 
     195          74 : static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
     196             : {
     197          74 :     return (param1.cParams.hashLog  == param2.cParams.hashLog)
     198          74 :          & (param1.cParams.chainLog == param2.cParams.chainLog)
     199          74 :          & (param1.cParams.strategy == param2.cParams.strategy)
     200          74 :          & ((param1.cParams.searchLength==3) == (param2.cParams.searchLength==3));
     201             : }
     202             : 
     203             : /*! ZSTD_continueCCtx() :
     204             :     reuse CCtx without reset (note : requires no dictionary) */
     205           0 : static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 frameContentSize)
     206             : {
     207           0 :     U32 const end = (U32)(cctx->nextSrc - cctx->base);
     208           0 :     cctx->params = params;
     209           0 :     cctx->frameContentSize = frameContentSize;
     210           0 :     cctx->lowLimit = end;
     211           0 :     cctx->dictLimit = end;
     212           0 :     cctx->nextToUpdate = end+1;
     213           0 :     cctx->stage = ZSTDcs_init;
     214           0 :     cctx->dictID = 0;
     215           0 :     cctx->loadedDictEnd = 0;
     216           0 :     { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = repStartValue[i]; }
     217           0 :     cctx->seqStore.litLengthSum = 0;  /* force reset of btopt stats */
     218           0 :     XXH64_reset(&cctx->xxhState, 0);
     219           0 :     return 0;
     220             : }
     221             : 
     222             : typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
     223             : 
     224             : /*! ZSTD_resetCCtx_advanced() :
     225             :     note : 'params' must be validated */
     226          74 : static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
     227             :                                        ZSTD_parameters params, U64 frameContentSize,
     228             :                                        ZSTD_compResetPolicy_e const crp)
     229             : {
     230          74 :     if (crp == ZSTDcrp_continue)
     231          74 :         if (ZSTD_equivalentParams(params, zc->params))
     232           0 :             return ZSTD_continueCCtx(zc, params, frameContentSize);
     233             : 
     234          74 :     {   size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
     235          74 :         U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
     236          74 :         size_t const maxNbSeq = blockSize / divider;
     237          74 :         size_t const tokenSpace = blockSize + 11*maxNbSeq;
     238          74 :         size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
     239          74 :         size_t const hSize = ((size_t)1) << params.cParams.hashLog;
     240          74 :         U32    const hashLog3 = (params.cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
     241          74 :         size_t const h3Size = ((size_t)1) << hashLog3;
     242          74 :         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
     243             :         void* ptr;
     244             : 
     245             :         /* Check if workSpace is large enough, alloc a new one if needed */
     246          74 :         {   size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
     247             :                                   + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
     248         148 :             size_t const neededSpace = tableSpace + (256*sizeof(U32)) /* huffTable */ + tokenSpace
     249          74 :                                   + ((params.cParams.strategy == ZSTD_btopt) ? optSpace : 0);
     250          74 :             if (zc->workSpaceSize < neededSpace) {
     251          74 :                 ZSTD_free(zc->workSpace, zc->customMem);
     252          74 :                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
     253          74 :                 if (zc->workSpace == NULL) return ERROR(memory_allocation);
     254          74 :                 zc->workSpaceSize = neededSpace;
     255             :         }   }
     256             : 
     257          74 :         if (crp!=ZSTDcrp_noMemset) memset(zc->workSpace, 0, tableSpace);   /* reset tables only */
     258          74 :         XXH64_reset(&zc->xxhState, 0);
     259          74 :         zc->hashLog3 = hashLog3;
     260          74 :         zc->hashTable = (U32*)(zc->workSpace);
     261          74 :         zc->chainTable = zc->hashTable + hSize;
     262          74 :         zc->hashTable3 = zc->chainTable + chainSize;
     263          74 :         ptr = zc->hashTable3 + h3Size;
     264          74 :         zc->hufTable = (HUF_CElt*)ptr;
     265          74 :         zc->flagStaticTables = 0;
     266          74 :         ptr = ((U32*)ptr) + 256;  /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
     267             : 
     268          74 :         zc->nextToUpdate = 1;
     269          74 :         zc->nextSrc = NULL;
     270          74 :         zc->base = NULL;
     271          74 :         zc->dictBase = NULL;
     272          74 :         zc->dictLimit = 0;
     273          74 :         zc->lowLimit = 0;
     274          74 :         zc->params = params;
     275          74 :         zc->blockSize = blockSize;
     276          74 :         zc->frameContentSize = frameContentSize;
     277          74 :         { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = repStartValue[i]; }
     278             : 
     279          74 :         if (params.cParams.strategy == ZSTD_btopt) {
     280           0 :             zc->seqStore.litFreq = (U32*)ptr;
     281           0 :             zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<<Litbits);
     282           0 :             zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1);
     283           0 :             zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1);
     284           0 :             ptr = zc->seqStore.offCodeFreq + (MaxOff+1);
     285           0 :             zc->seqStore.matchTable = (ZSTD_match_t*)ptr;
     286           0 :             ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1;
     287           0 :             zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr;
     288           0 :             ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1;
     289           0 :             zc->seqStore.litLengthSum = 0;
     290             :         }
     291          74 :         zc->seqStore.sequencesStart = (seqDef*)ptr;
     292          74 :         ptr = zc->seqStore.sequencesStart + maxNbSeq;
     293          74 :         zc->seqStore.llCode = (BYTE*) ptr;
     294          74 :         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
     295          74 :         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
     296          74 :         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
     297             : 
     298          74 :         zc->stage = ZSTDcs_init;
     299          74 :         zc->dictID = 0;
     300          74 :         zc->loadedDictEnd = 0;
     301             : 
     302          74 :         return 0;
     303             :     }
     304             : }
     305             : 
     306             : 
     307             : /*! ZSTD_copyCCtx() :
     308             : *   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
     309             : *   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
     310             : *   @return : 0, or an error code */
     311           0 : size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
     312             : {
     313           0 :     if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
     314             : 
     315           0 :     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
     316           0 :     ZSTD_resetCCtx_advanced(dstCCtx, srcCCtx->params, pledgedSrcSize, ZSTDcrp_noMemset);
     317             : 
     318             :     /* copy tables */
     319           0 :     {   size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
     320           0 :         size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
     321           0 :         size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
     322           0 :         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
     323           0 :         memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
     324             :     }
     325             : 
     326             :     /* copy dictionary offsets */
     327           0 :     dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
     328           0 :     dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3;
     329           0 :     dstCCtx->nextSrc      = srcCCtx->nextSrc;
     330           0 :     dstCCtx->base         = srcCCtx->base;
     331           0 :     dstCCtx->dictBase     = srcCCtx->dictBase;
     332           0 :     dstCCtx->dictLimit    = srcCCtx->dictLimit;
     333           0 :     dstCCtx->lowLimit     = srcCCtx->lowLimit;
     334           0 :     dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd;
     335           0 :     dstCCtx->dictID       = srcCCtx->dictID;
     336             : 
     337             :     /* copy entropy tables */
     338           0 :     dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
     339           0 :     if (srcCCtx->flagStaticTables) {
     340           0 :         memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4);
     341           0 :         memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
     342           0 :         memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
     343           0 :         memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
     344             :     }
     345             : 
     346           0 :     return 0;
     347             : }
     348             : 
     349             : 
     350             : /*! ZSTD_reduceTable() :
     351             : *   reduce table indexes by `reducerValue` */
     352           0 : static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue)
     353             : {
     354             :     U32 u;
     355           0 :     for (u=0 ; u < size ; u++) {
     356           0 :         if (table[u] < reducerValue) table[u] = 0;
     357           0 :         else table[u] -= reducerValue;
     358             :     }
     359           0 : }
     360             : 
     361             : /*! ZSTD_reduceIndex() :
     362             : *   rescale all indexes to avoid future overflow (indexes are U32) */
     363           0 : static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
     364             : {
     365           0 :     { U32 const hSize = 1 << zc->params.cParams.hashLog;
     366           0 :       ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); }
     367             : 
     368           0 :     { U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
     369           0 :       ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); }
     370             : 
     371           0 :     { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
     372           0 :       ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); }
     373           0 : }
     374             : 
     375             : 
     376             : /*-*******************************************************
     377             : *  Block entropic compression
     378             : *********************************************************/
     379             : 
     380             : /* See zstd_compression_format.md for detailed format description */
     381             : 
     382           0 : size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
     383             : {
     384           0 :     if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
     385           0 :     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
     386           0 :     MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
     387           0 :     return ZSTD_blockHeaderSize+srcSize;
     388             : }
     389             : 
     390             : 
     391        1322 : static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
     392             : {
     393        1322 :     BYTE* const ostart = (BYTE* const)dst;
     394        1322 :     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
     395             : 
     396        1322 :     if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
     397             : 
     398        1322 :     switch(flSize)
     399             :     {
     400             :         case 1: /* 2 - 1 - 5 */
     401           0 :             ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
     402           0 :             break;
     403             :         case 2: /* 2 - 2 - 12 */
     404           6 :             MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
     405           6 :             break;
     406             :         default:   /*note : should not be necessary : flSize is within {1,2,3} */
     407             :         case 3: /* 2 - 2 - 20 */
     408        1316 :             MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
     409        1316 :             break;
     410             :     }
     411             : 
     412        1322 :     memcpy(ostart + flSize, src, srcSize);
     413        1322 :     return srcSize + flSize;
     414             : }
     415             : 
     416           0 : static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
     417             : {
     418           0 :     BYTE* const ostart = (BYTE* const)dst;
     419           0 :     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
     420             : 
     421             :     (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
     422             : 
     423           0 :     switch(flSize)
     424             :     {
     425             :         case 1: /* 2 - 1 - 5 */
     426           0 :             ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
     427           0 :             break;
     428             :         case 2: /* 2 - 2 - 12 */
     429           0 :             MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
     430           0 :             break;
     431             :         default:   /*note : should not be necessary : flSize is necessarily within {1,2,3} */
     432             :         case 3: /* 2 - 2 - 20 */
     433           0 :             MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
     434           0 :             break;
     435             :     }
     436             : 
     437           0 :     ostart[flSize] = *(const BYTE*)src;
     438           0 :     return flSize+1;
     439             : }
     440             : 
     441             : 
     442        6652 : static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
     443             : 
     444        3326 : static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
     445             :                                      void* dst, size_t dstCapacity,
     446             :                                const void* src, size_t srcSize)
     447             : {
     448        3326 :     size_t const minGain = ZSTD_minGain(srcSize);
     449        3326 :     size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
     450        3326 :     BYTE*  const ostart = (BYTE*)dst;
     451        3326 :     U32 singleStream = srcSize < 256;
     452        3326 :     symbolEncodingType_e hType = set_compressed;
     453             :     size_t cLitSize;
     454             : 
     455             : 
     456             :     /* small ? don't even attempt compression (speed opt) */
     457             : #   define LITERAL_NOENTROPY 63
     458        3326 :     {   size_t const minLitSize = zc->flagStaticTables ? 6 : LITERAL_NOENTROPY;
     459        3326 :         if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
     460             :     }
     461             : 
     462        3326 :     if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
     463        3326 :     if (zc->flagStaticTables && (lhSize==3)) {
     464           0 :         hType = set_repeat;
     465           0 :         singleStream = 1;
     466           0 :         cLitSize = HUF_compress1X_usingCTable(ostart+lhSize, dstCapacity-lhSize, src, srcSize, zc->hufTable);
     467             :     } else {
     468        3330 :         cLitSize = singleStream ? HUF_compress1X(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11)
     469        3330 :                                 : HUF_compress2 (ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11);
     470             :     }
     471             : 
     472        3326 :     if ((cLitSize==0) | (cLitSize >= srcSize - minGain))
     473        1322 :         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
     474        2004 :     if (cLitSize==1)
     475           0 :         return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
     476             : 
     477             :     /* Build header */
     478        2004 :     switch(lhSize)
     479             :     {
     480             :     case 3: /* 2 - 2 - 10 - 10 */
     481          18 :         {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
     482          18 :             MEM_writeLE24(ostart, lhc);
     483          18 :             break;
     484             :         }
     485             :     case 4: /* 2 - 2 - 14 - 14 */
     486         354 :         {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
     487         354 :             MEM_writeLE32(ostart, lhc);
     488         354 :             break;
     489             :         }
     490             :     default:   /* should not be necessary, lhSize is only {3,4,5} */
     491             :     case 5: /* 2 - 2 - 18 - 18 */
     492        1632 :         {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
     493        1632 :             MEM_writeLE32(ostart, lhc);
     494        1632 :             ostart[4] = (BYTE)(cLitSize >> 10);
     495        1632 :             break;
     496             :         }
     497             :     }
     498        2004 :     return lhSize+cLitSize;
     499             : }
     500             : 
     501             : static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
     502             :                                    8,  9, 10, 11, 12, 13, 14, 15,
     503             :                                   16, 16, 17, 17, 18, 18, 19, 19,
     504             :                                   20, 20, 20, 20, 21, 21, 21, 21,
     505             :                                   22, 22, 22, 22, 22, 22, 22, 22,
     506             :                                   23, 23, 23, 23, 23, 23, 23, 23,
     507             :                                   24, 24, 24, 24, 24, 24, 24, 24,
     508             :                                   24, 24, 24, 24, 24, 24, 24, 24 };
     509             : 
     510             : static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
     511             :                                   16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
     512             :                                   32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
     513             :                                   38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
     514             :                                   40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
     515             :                                   41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
     516             :                                   42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
     517             :                                   42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
     518             : 
     519             : 
     520        1408 : void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
     521             : {
     522        1408 :     BYTE const LL_deltaCode = 19;
     523        1408 :     BYTE const ML_deltaCode = 36;
     524        1408 :     const seqDef* const sequences = seqStorePtr->sequencesStart;
     525        1408 :     BYTE* const llCodeTable = seqStorePtr->llCode;
     526        1408 :     BYTE* const ofCodeTable = seqStorePtr->ofCode;
     527        1408 :     BYTE* const mlCodeTable = seqStorePtr->mlCode;
     528        1408 :     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
     529             :     U32 u;
     530    11117090 :     for (u=0; u<nbSeq; u++) {
     531    11115682 :         U32 const llv = sequences[u].litLength;
     532    11115682 :         U32 const mlv = sequences[u].matchLength;
     533    11115682 :         llCodeTable[u] = (llv> 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
     534    11115682 :         ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
     535    11115682 :         mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
     536             :     }
     537        1408 :     if (seqStorePtr->longLengthID==1)
     538          20 :         llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
     539        1408 :     if (seqStorePtr->longLengthID==2)
     540           4 :         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
     541        1408 : }
     542             : 
     543             : 
     544        3326 : size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
     545             :                               void* dst, size_t dstCapacity,
     546             :                               size_t srcSize)
     547             : {
     548        3326 :     const seqStore_t* seqStorePtr = &(zc->seqStore);
     549             :     U32 count[MaxSeq+1];
     550             :     S16 norm[MaxSeq+1];
     551        3326 :     FSE_CTable* CTable_LitLength = zc->litlengthCTable;
     552        3326 :     FSE_CTable* CTable_OffsetBits = zc->offcodeCTable;
     553        3326 :     FSE_CTable* CTable_MatchLength = zc->matchlengthCTable;
     554             :     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
     555        3326 :     const seqDef* const sequences = seqStorePtr->sequencesStart;
     556        3326 :     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
     557        3326 :     const BYTE* const llCodeTable = seqStorePtr->llCode;
     558        3326 :     const BYTE* const mlCodeTable = seqStorePtr->mlCode;
     559        3326 :     BYTE* const ostart = (BYTE*)dst;
     560        3326 :     BYTE* const oend = ostart + dstCapacity;
     561        3326 :     BYTE* op = ostart;
     562        3326 :     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
     563             :     BYTE* seqHead;
     564             : 
     565             :     /* Compress literals */
     566        3326 :     {   const BYTE* const literals = seqStorePtr->litStart;
     567        3326 :         size_t const litSize = seqStorePtr->lit - literals;
     568        3326 :         size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
     569        3326 :         if (ZSTD_isError(cSize)) return cSize;
     570        3326 :         op += cSize;
     571             :     }
     572             : 
     573             :     /* Sequences Header */
     574        3326 :     if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall);
     575        3326 :     if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq;
     576        1282 :     else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
     577           0 :     else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
     578        3326 :     if (nbSeq==0) goto _check_compressibility;
     579             : 
     580             :     /* seqHead : flags for FSE encoding type */
     581        1408 :     seqHead = op++;
     582             : 
     583             : #define MIN_SEQ_FOR_DYNAMIC_FSE   64
     584             : #define MAX_SEQ_FOR_STATIC_FSE  1000
     585             : 
     586             :     /* convert length/distances into codes */
     587        1408 :     ZSTD_seqToCodes(seqStorePtr);
     588             : 
     589             :     /* CTable for Literal Lengths */
     590        1408 :     {   U32 max = MaxLL;
     591        1408 :         size_t const mostFrequent = FSE_countFast(count, &max, llCodeTable, nbSeq);
     592        1408 :         if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
     593           0 :             *op++ = llCodeTable[0];
     594           0 :             FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
     595           0 :             LLtype = set_rle;
     596        1408 :         } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
     597           0 :             LLtype = set_repeat;
     598        1408 :         } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) {
     599         106 :             FSE_buildCTable(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog);
     600         106 :             LLtype = set_basic;
     601             :         } else {
     602        1302 :             size_t nbSeq_1 = nbSeq;
     603        1302 :             const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
     604        1302 :             if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; }
     605        1302 :             FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
     606        1302 :             { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
     607        1302 :               if (FSE_isError(NCountSize)) return ERROR(GENERIC);
     608        1302 :               op += NCountSize; }
     609        1302 :             FSE_buildCTable(CTable_LitLength, norm, max, tableLog);
     610        1302 :             LLtype = set_compressed;
     611             :     }   }
     612             : 
     613             :     /* CTable for Offsets */
     614        1408 :     {   U32 max = MaxOff;
     615        1408 :         size_t const mostFrequent = FSE_countFast(count, &max, ofCodeTable, nbSeq);
     616        1408 :         if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
     617          12 :             *op++ = ofCodeTable[0];
     618          12 :             FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
     619          12 :             Offtype = set_rle;
     620        1396 :         } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
     621           0 :             Offtype = set_repeat;
     622        1396 :         } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) {
     623          96 :             FSE_buildCTable(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog);
     624          96 :             Offtype = set_basic;
     625             :         } else {
     626        1300 :             size_t nbSeq_1 = nbSeq;
     627        1300 :             const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
     628        1300 :             if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; }
     629        1300 :             FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
     630        1300 :             { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
     631        1300 :               if (FSE_isError(NCountSize)) return ERROR(GENERIC);
     632        1300 :               op += NCountSize; }
     633        1300 :             FSE_buildCTable(CTable_OffsetBits, norm, max, tableLog);
     634        1300 :             Offtype = set_compressed;
     635             :     }   }
     636             : 
     637             :     /* CTable for MatchLengths */
     638        1408 :     {   U32 max = MaxML;
     639        1408 :         size_t const mostFrequent = FSE_countFast(count, &max, mlCodeTable, nbSeq);
     640        1408 :         if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
     641           4 :             *op++ = *mlCodeTable;
     642           4 :             FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
     643           4 :             MLtype = set_rle;
     644        1404 :         } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
     645           0 :             MLtype = set_repeat;
     646        1404 :         } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) {
     647         102 :             FSE_buildCTable(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog);
     648         102 :             MLtype = set_basic;
     649             :         } else {
     650        1302 :             size_t nbSeq_1 = nbSeq;
     651        1302 :             const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
     652        1302 :             if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; }
     653        1302 :             FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
     654        1302 :             { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
     655        1302 :               if (FSE_isError(NCountSize)) return ERROR(GENERIC);
     656        1302 :               op += NCountSize; }
     657        1302 :             FSE_buildCTable(CTable_MatchLength, norm, max, tableLog);
     658        1302 :             MLtype = set_compressed;
     659             :     }   }
     660             : 
     661        1408 :     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
     662        1408 :     zc->flagStaticTables = 0;
     663             : 
     664             :     /* Encoding Sequences */
     665             :     {   BIT_CStream_t blockStream;
     666             :         FSE_CState_t  stateMatchLength;
     667             :         FSE_CState_t  stateOffsetBits;
     668             :         FSE_CState_t  stateLitLength;
     669             : 
     670        1408 :         CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */
     671             : 
     672             :         /* first symbols */
     673        1408 :         FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
     674        1408 :         FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
     675        1408 :         FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
     676        1408 :         BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
     677        1408 :         if (MEM_32bits()) BIT_flushBits(&blockStream);
     678        1408 :         BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
     679        1408 :         if (MEM_32bits()) BIT_flushBits(&blockStream);
     680        1408 :         BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
     681        1408 :         BIT_flushBits(&blockStream);
     682             : 
     683             :         {   size_t n;
     684    11115682 :             for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
     685    11114274 :                 BYTE const llCode = llCodeTable[n];
     686    11114274 :                 BYTE const ofCode = ofCodeTable[n];
     687    11114274 :                 BYTE const mlCode = mlCodeTable[n];
     688    11114274 :                 U32  const llBits = LL_bits[llCode];
     689    11114274 :                 U32  const ofBits = ofCode;                                     /* 32b*/  /* 64b*/
     690    11114274 :                 U32  const mlBits = ML_bits[mlCode];
     691             :                                                                                 /* (7)*/  /* (7)*/
     692    11114274 :                 FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
     693    11114274 :                 FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
     694    11114274 :                 if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
     695    11114274 :                 FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
     696    11114274 :                 if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
     697         128 :                     BIT_flushBits(&blockStream);                                /* (7)*/
     698    11114274 :                 BIT_addBits(&blockStream, sequences[n].litLength, llBits);
     699    11114274 :                 if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
     700    11114274 :                 BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
     701    11114274 :                 if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
     702    11114274 :                 BIT_addBits(&blockStream, sequences[n].offset, ofBits);         /* 31 */
     703    11114274 :                 BIT_flushBits(&blockStream);                                    /* (7)*/
     704             :         }   }
     705             : 
     706        1408 :         FSE_flushCState(&blockStream, &stateMatchLength);
     707        1408 :         FSE_flushCState(&blockStream, &stateOffsetBits);
     708        1408 :         FSE_flushCState(&blockStream, &stateLitLength);
     709             : 
     710        1408 :         {   size_t const streamSize = BIT_closeCStream(&blockStream);
     711        1408 :             if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
     712        1408 :             op += streamSize;
     713             :     }   }
     714             : 
     715             :     /* check compressibility */
     716             : _check_compressibility:
     717        3326 :     { size_t const minGain = ZSTD_minGain(srcSize);
     718        3326 :       size_t const maxCSize = srcSize - minGain;
     719        3326 :       if ((size_t)(op-ostart) >= maxCSize) return 0; }
     720             : 
     721             :     /* confirm repcodes */
     722        2046 :     { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->savedRep[i]; }
     723             : 
     724        2046 :     return op - ostart;
     725             : }
     726             : 
     727             : 
     728             : /*! ZSTD_storeSeq() :
     729             :     Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
     730             :     `offsetCode` : distance to match, or 0 == repCode.
     731             :     `matchCode` : matchLength - MINMATCH
     732             : */
     733    11115682 : MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
     734             : {
     735             : #if 0  /* for debug */
     736             :     static const BYTE* g_start = NULL;
     737             :     const U32 pos = (U32)(literals - g_start);
     738             :     if (g_start==NULL) g_start = literals;
     739             :     //if ((pos > 1) && (pos < 50000))
     740             :         printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
     741             :                pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
     742             : #endif
     743             :     /* copy Literals */
     744    11115682 :     ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
     745    11115682 :     seqStorePtr->lit += litLength;
     746             : 
     747             :     /* literal Length */
     748    11115682 :     if (litLength>0xFFFF) { seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
     749    11115682 :     seqStorePtr->sequences[0].litLength = (U16)litLength;
     750             : 
     751             :     /* match offset */
     752    11115682 :     seqStorePtr->sequences[0].offset = offsetCode + 1;
     753             : 
     754             :     /* match Length */
     755    11115682 :     if (matchCode>0xFFFF) { seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
     756    11115682 :     seqStorePtr->sequences[0].matchLength = (U16)matchCode;
     757             : 
     758    11115682 :     seqStorePtr->sequences++;
     759    11115682 : }
     760             : 
     761             : 
     762             : /*-*************************************
     763             : *  Match length counter
     764             : ***************************************/
     765    11115000 : static unsigned ZSTD_NbCommonBytes (register size_t val)
     766             : {
     767    11115000 :     if (MEM_isLittleEndian()) {
     768    11115000 :         if (MEM_64bits()) {
     769             : #       if defined(_MSC_VER) && defined(_WIN64)
     770             :             unsigned long r = 0;
     771             :             _BitScanForward64( &r, (U64)val );
     772             :             return (unsigned)(r>>3);
     773             : #       elif defined(__GNUC__) && (__GNUC__ >= 3)
     774    11115000 :             return (__builtin_ctzll((U64)val) >> 3);
     775             : #       else
     776             :             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
     777             :             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
     778             : #       endif
     779             :         } else { /* 32 bits */
     780             : #       if defined(_MSC_VER)
     781             :             unsigned long r=0;
     782             :             _BitScanForward( &r, (U32)val );
     783             :             return (unsigned)(r>>3);
     784             : #       elif defined(__GNUC__) && (__GNUC__ >= 3)
     785           0 :             return (__builtin_ctz((U32)val) >> 3);
     786             : #       else
     787             :             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
     788             :             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
     789             : #       endif
     790             :         }
     791             :     } else {  /* Big Endian CPU */
     792           0 :         if (MEM_64bits()) {
     793             : #       if defined(_MSC_VER) && defined(_WIN64)
     794             :             unsigned long r = 0;
     795             :             _BitScanReverse64( &r, val );
     796             :             return (unsigned)(r>>3);
     797             : #       elif defined(__GNUC__) && (__GNUC__ >= 3)
     798           0 :             return (__builtin_clzll(val) >> 3);
     799             : #       else
     800             :             unsigned r;
     801             :             const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
     802             :             if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
     803             :             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
     804             :             r += (!val);
     805             :             return r;
     806             : #       endif
     807             :         } else { /* 32 bits */
     808             : #       if defined(_MSC_VER)
     809             :             unsigned long r = 0;
     810             :             _BitScanReverse( &r, (unsigned long)val );
     811             :             return (unsigned)(r>>3);
     812             : #       elif defined(__GNUC__) && (__GNUC__ >= 3)
     813           0 :             return (__builtin_clz((U32)val) >> 3);
     814             : #       else
     815             :             unsigned r;
     816             :             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
     817             :             r += (!val);
     818             :             return r;
     819             : #       endif
     820             :     }   }
     821             : }
     822             : 
     823             : 
     824    11115682 : static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
     825             : {
     826    11115682 :     const BYTE* const pStart = pIn;
     827    11115682 :     const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
     828             : 
     829    28013566 :     while (pIn < pInLoopLimit) {
     830    16897202 :         size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
     831    16897202 :         if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
     832    11115000 :         pIn += ZSTD_NbCommonBytes(diff);
     833    11115000 :         return (size_t)(pIn - pStart);
     834             :     }
     835         682 :     if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
     836         682 :     if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
     837         682 :     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
     838         682 :     return (size_t)(pIn - pStart);
     839             : }
     840             : 
     841             : /** ZSTD_count_2segments() :
     842             : *   can count match length with `ip` & `match` in 2 different segments.
     843             : *   convention : on reaching mEnd, match count continue starting from iStart
     844             : */
     845           0 : static size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
     846             : {
     847           0 :     const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
     848           0 :     size_t const matchLength = ZSTD_count(ip, match, vEnd);
     849           0 :     if (match + matchLength != mEnd) return matchLength;
     850           0 :     return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
     851             : }
     852             : 
     853             : 
     854             : /*-*************************************
     855             : *  Hashes
     856             : ***************************************/
     857             : static const U32 prime3bytes = 506832829U;
     858           0 : static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
     859           0 : MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); }   /* only in zstd_opt.h */
     860             : 
     861             : static const U32 prime4bytes = 2654435761U;
     862           0 : static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
     863           0 : static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
     864             : 
     865             : static const U64 prime5bytes = 889523592379ULL;
     866      827570 : static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
     867      827570 : static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
     868             : 
     869             : static const U64 prime6bytes = 227718039650203ULL;
     870    65846230 : static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
     871    65846230 : static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
     872             : 
     873             : static const U64 prime7bytes = 58295818150454627ULL;
     874           0 : static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
     875           0 : static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
     876             : 
     877             : static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
     878      642572 : static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
     879      642572 : static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
     880             : 
     881    67316372 : static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
     882             : {
     883    67316372 :     switch(mls)
     884             :     {
     885             :     default:
     886           0 :     case 4: return ZSTD_hash4Ptr(p, hBits);
     887      827570 :     case 5: return ZSTD_hash5Ptr(p, hBits);
     888    65846230 :     case 6: return ZSTD_hash6Ptr(p, hBits);
     889           0 :     case 7: return ZSTD_hash7Ptr(p, hBits);
     890      642572 :     case 8: return ZSTD_hash8Ptr(p, hBits);
     891             :     }
     892             : }
     893             : 
     894             : 
     895             : /*-*************************************
     896             : *  Fast Scan
     897             : ***************************************/
     898           0 : static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
     899             : {
     900           0 :     U32* const hashTable = zc->hashTable;
     901           0 :     U32  const hBits = zc->params.cParams.hashLog;
     902           0 :     const BYTE* const base = zc->base;
     903           0 :     const BYTE* ip = base + zc->nextToUpdate;
     904           0 :     const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
     905           0 :     const size_t fastHashFillStep = 3;
     906             : 
     907           0 :     while(ip <= iend) {
     908           0 :         hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
     909           0 :         ip += fastHashFillStep;
     910             :     }
     911           0 : }
     912             : 
     913             : 
     914             : FORCE_INLINE
     915             : void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
     916             :                                const void* src, size_t srcSize,
     917             :                                const U32 mls)
     918             : {
     919        3310 :     U32* const hashTable = cctx->hashTable;
     920        3310 :     U32  const hBits = cctx->params.cParams.hashLog;
     921        3310 :     seqStore_t* seqStorePtr = &(cctx->seqStore);
     922        3310 :     const BYTE* const base = cctx->base;
     923        3310 :     const BYTE* const istart = (const BYTE*)src;
     924        3310 :     const BYTE* ip = istart;
     925        3310 :     const BYTE* anchor = istart;
     926        3310 :     const U32   lowestIndex = cctx->dictLimit;
     927        3310 :     const BYTE* const lowest = base + lowestIndex;
     928        3310 :     const BYTE* const iend = istart + srcSize;
     929        3310 :     const BYTE* const ilimit = iend - HASH_READ_SIZE;
     930        3310 :     U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
     931        3310 :     U32 offsetSaved = 0;
     932             : 
     933             :     /* init */
     934        3310 :     ip += (ip==lowest);
     935        3310 :     {   U32 const maxRep = (U32)(ip-lowest);
     936        3310 :         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
     937        3310 :         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
     938             :     }
     939             : 
     940             :     /* Main Search Loop */
     941    44572850 :     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
     942             :         size_t mLength;
     943    44569540 :         size_t const h = ZSTD_hashPtr(ip, hBits, mls);
     944    44569540 :         U32 const current = (U32)(ip-base);
     945    44569540 :         U32 const matchIndex = hashTable[h];
     946    44569540 :         const BYTE* match = base + matchIndex;
     947    44569540 :         hashTable[h] = current;   /* update hash table */
     948             : 
     949    44569540 :         if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
     950     3489498 :             mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
     951     3489498 :             ip++;
     952     3489498 :             ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
     953             :         } else {
     954             :             U32 offset;
     955    41080042 :             if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
     956    34067702 :                 ip += ((ip-anchor) >> g_searchStrength) + 1;
     957    34067702 :                 continue;
     958             :             }
     959     7012340 :             mLength = ZSTD_count(ip+4, match+4, iend) + 4;
     960     7012340 :             offset = (U32)(ip-match);
     961     1241188 :             while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
     962     7012340 :             offset_2 = offset_1;
     963     7012340 :             offset_1 = offset;
     964             : 
     965     7012340 :             ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
     966             :         }
     967             : 
     968             :         /* match found */
     969    10501838 :         ip += mLength;
     970    10501838 :         anchor = ip;
     971             : 
     972    10501838 :         if (ip <= ilimit) {
     973             :             /* Fill Table */
     974    10501064 :             hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;  /* here because current+2 could be > iend-8 */
     975    10501064 :             hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
     976             :             /* check immediate repcode */
     977    10993532 :             while ( (ip <= ilimit)
     978    10993480 :                  && ( (offset_2>0)
     979    10993480 :                  & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
     980             :                 /* store sequence */
     981      492468 :                 size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
     982      492468 :                 { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; }  /* swap offset_2 <=> offset_1 */
     983      492468 :                 hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
     984      492468 :                 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
     985      492468 :                 ip += rLength;
     986      492468 :                 anchor = ip;
     987      492468 :                 continue;   /* faster when present ... (?) */
     988             :     }   }   }
     989             : 
     990             :     /* save reps for next block */
     991        3310 :     cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
     992        3310 :     cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
     993             : 
     994             :     /* Last Literals */
     995        3310 :     {   size_t const lastLLSize = iend - anchor;
     996        3310 :         memcpy(seqStorePtr->lit, anchor, lastLLSize);
     997        3310 :         seqStorePtr->lit += lastLLSize;
     998             :     }
     999             : }
    1000             : 
    1001             : 
    1002        3310 : static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
    1003             :                        const void* src, size_t srcSize)
    1004             : {
    1005        3310 :     const U32 mls = ctx->params.cParams.searchLength;
    1006        3310 :     switch(mls)
    1007             :     {
    1008             :     default:
    1009             :     case 4 :
    1010           0 :         ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
    1011             :     case 5 :
    1012          10 :         ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
    1013             :     case 6 :
    1014        3300 :         ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
    1015             :     case 7 :
    1016           0 :         ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
    1017             :     }
    1018             : }
    1019             : 
    1020             : 
    1021           0 : static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
    1022             :                                  const void* src, size_t srcSize,
    1023             :                                  const U32 mls)
    1024             : {
    1025           0 :     U32* hashTable = ctx->hashTable;
    1026           0 :     const U32 hBits = ctx->params.cParams.hashLog;
    1027           0 :     seqStore_t* seqStorePtr = &(ctx->seqStore);
    1028           0 :     const BYTE* const base = ctx->base;
    1029           0 :     const BYTE* const dictBase = ctx->dictBase;
    1030           0 :     const BYTE* const istart = (const BYTE*)src;
    1031           0 :     const BYTE* ip = istart;
    1032           0 :     const BYTE* anchor = istart;
    1033           0 :     const U32   lowestIndex = ctx->lowLimit;
    1034           0 :     const BYTE* const dictStart = dictBase + lowestIndex;
    1035           0 :     const U32   dictLimit = ctx->dictLimit;
    1036           0 :     const BYTE* const lowPrefixPtr = base + dictLimit;
    1037           0 :     const BYTE* const dictEnd = dictBase + dictLimit;
    1038           0 :     const BYTE* const iend = istart + srcSize;
    1039           0 :     const BYTE* const ilimit = iend - 8;
    1040           0 :     U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
    1041             : 
    1042             :     /* Search Loop */
    1043           0 :     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
    1044           0 :         const size_t h = ZSTD_hashPtr(ip, hBits, mls);
    1045           0 :         const U32 matchIndex = hashTable[h];
    1046           0 :         const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
    1047           0 :         const BYTE* match = matchBase + matchIndex;
    1048           0 :         const U32 current = (U32)(ip-base);
    1049           0 :         const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
    1050           0 :         const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
    1051           0 :         const BYTE* repMatch = repBase + repIndex;
    1052             :         size_t mLength;
    1053           0 :         hashTable[h] = current;   /* update hash table */
    1054             : 
    1055           0 :         if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
    1056           0 :            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
    1057           0 :             const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
    1058           0 :             mLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
    1059           0 :             ip++;
    1060           0 :             ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
    1061             :         } else {
    1062           0 :             if ( (matchIndex < lowestIndex) ||
    1063           0 :                  (MEM_read32(match) != MEM_read32(ip)) ) {
    1064           0 :                 ip += ((ip-anchor) >> g_searchStrength) + 1;
    1065           0 :                 continue;
    1066             :             }
    1067           0 :             {   const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
    1068           0 :                 const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
    1069             :                 U32 offset;
    1070           0 :                 mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
    1071           0 :                 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
    1072           0 :                 offset = current - matchIndex;
    1073           0 :                 offset_2 = offset_1;
    1074           0 :                 offset_1 = offset;
    1075           0 :                 ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
    1076             :         }   }
    1077             : 
    1078             :         /* found a match : store it */
    1079           0 :         ip += mLength;
    1080           0 :         anchor = ip;
    1081             : 
    1082           0 :         if (ip <= ilimit) {
    1083             :             /* Fill Table */
    1084           0 :             hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
    1085           0 :             hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
    1086             :             /* check immediate repcode */
    1087           0 :             while (ip <= ilimit) {
    1088           0 :                 U32 const current2 = (U32)(ip-base);
    1089           0 :                 U32 const repIndex2 = current2 - offset_2;
    1090           0 :                 const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
    1091           0 :                 if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
    1092           0 :                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
    1093           0 :                     const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
    1094           0 :                     size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
    1095           0 :                     U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
    1096           0 :                     ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
    1097           0 :                     hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
    1098           0 :                     ip += repLength2;
    1099           0 :                     anchor = ip;
    1100           0 :                     continue;
    1101             :                 }
    1102           0 :                 break;
    1103             :     }   }   }
    1104             : 
    1105             :     /* save reps for next block */
    1106           0 :     ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
    1107             : 
    1108             :     /* Last Literals */
    1109           0 :     {   size_t const lastLLSize = iend - anchor;
    1110           0 :         memcpy(seqStorePtr->lit, anchor, lastLLSize);
    1111           0 :         seqStorePtr->lit += lastLLSize;
    1112             :     }
    1113           0 : }
    1114             : 
    1115             : 
    1116           0 : static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
    1117             :                          const void* src, size_t srcSize)
    1118             : {
    1119           0 :     U32 const mls = ctx->params.cParams.searchLength;
    1120           0 :     switch(mls)
    1121             :     {
    1122             :     default:
    1123             :     case 4 :
    1124           0 :         ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
    1125             :     case 5 :
    1126           0 :         ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
    1127             :     case 6 :
    1128           0 :         ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
    1129             :     case 7 :
    1130           0 :         ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
    1131             :     }
    1132             : }
    1133             : 
    1134             : 
    1135             : /*-*************************************
    1136             : *  Double Fast
    1137             : ***************************************/
    1138           0 : static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls)
    1139             : {
    1140           0 :     U32* const hashLarge = cctx->hashTable;
    1141           0 :     U32  const hBitsL = cctx->params.cParams.hashLog;
    1142           0 :     U32* const hashSmall = cctx->chainTable;
    1143           0 :     U32  const hBitsS = cctx->params.cParams.chainLog;
    1144           0 :     const BYTE* const base = cctx->base;
    1145           0 :     const BYTE* ip = base + cctx->nextToUpdate;
    1146           0 :     const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
    1147           0 :     const size_t fastHashFillStep = 3;
    1148             : 
    1149           0 :     while(ip <= iend) {
    1150           0 :         hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
    1151           0 :         hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
    1152           0 :         ip += fastHashFillStep;
    1153             :     }
    1154           0 : }
    1155             : 
    1156             : 
    1157             : FORCE_INLINE
    1158             : void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
    1159             :                                  const void* src, size_t srcSize,
    1160             :                                  const U32 mls)
    1161             : {
    1162          16 :     U32* const hashLong = cctx->hashTable;
    1163          16 :     const U32 hBitsL = cctx->params.cParams.hashLog;
    1164          16 :     U32* const hashSmall = cctx->chainTable;
    1165          16 :     const U32 hBitsS = cctx->params.cParams.chainLog;
    1166          16 :     seqStore_t* seqStorePtr = &(cctx->seqStore);
    1167          16 :     const BYTE* const base = cctx->base;
    1168          16 :     const BYTE* const istart = (const BYTE*)src;
    1169          16 :     const BYTE* ip = istart;
    1170          16 :     const BYTE* anchor = istart;
    1171          16 :     const U32 lowestIndex = cctx->dictLimit;
    1172          16 :     const BYTE* const lowest = base + lowestIndex;
    1173          16 :     const BYTE* const iend = istart + srcSize;
    1174          16 :     const BYTE* const ilimit = iend - HASH_READ_SIZE;
    1175          16 :     U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
    1176          16 :     U32 offsetSaved = 0;
    1177             : 
    1178             :     /* init */
    1179          16 :     ip += (ip==lowest);
    1180          16 :     {   U32 const maxRep = (U32)(ip-lowest);
    1181          16 :         if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
    1182          16 :         if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
    1183             :     }
    1184             : 
    1185             :     /* Main Search Loop */
    1186      371822 :     while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
    1187             :         size_t mLength;
    1188      371806 :         size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
    1189      371806 :         size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
    1190      371806 :         U32 const current = (U32)(ip-base);
    1191      371806 :         U32 const matchIndexL = hashLong[h2];
    1192      371806 :         U32 const matchIndexS = hashSmall[h];
    1193      371806 :         const BYTE* matchLong = base + matchIndexL;
    1194      371806 :         const BYTE* match = base + matchIndexS;
    1195      371806 :         hashLong[h2] = hashSmall[h] = current;   /* update hash tables */
    1196             : 
    1197      371806 :         if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */
    1198       56024 :             mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
    1199       56024 :             ip++;
    1200       56024 :             ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
    1201             :         } else {
    1202             :             U32 offset;
    1203      315782 :             if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
    1204       27566 :                 mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
    1205       27566 :                 offset = (U32)(ip-matchLong);
    1206        1528 :                 while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
    1207      288216 :             } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
    1208       32908 :                 size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
    1209       32908 :                 U32 const matchIndex3 = hashLong[h3];
    1210       32908 :                 const BYTE* match3 = base + matchIndex3;
    1211       32908 :                 hashLong[h3] = current + 1;
    1212       32908 :                 if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
    1213        4394 :                     mLength = ZSTD_count(ip+9, match3+8, iend) + 8;
    1214        4394 :                     ip++;
    1215        4394 :                     offset = (U32)(ip-match3);
    1216        1878 :                     while (((ip>anchor) & (match3>lowest)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
    1217             :                 } else {
    1218       28514 :                     mLength = ZSTD_count(ip+4, match+4, iend) + 4;
    1219       28514 :                     offset = (U32)(ip-match);
    1220        3860 :                     while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
    1221             :                 }
    1222             :             } else {
    1223      255308 :                 ip += ((ip-anchor) >> g_searchStrength) + 1;
    1224      255308 :                 continue;
    1225             :             }
    1226             : 
    1227       60474 :             offset_2 = offset_1;
    1228       60474 :             offset_1 = offset;
    1229             : 
    1230       60474 :             ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
    1231             :         }
    1232             : 
    1233             :         /* match found */
    1234      116498 :         ip += mLength;
    1235      116498 :         anchor = ip;
    1236             : 
    1237      116498 :         if (ip <= ilimit) {
    1238             :             /* Fill Table */
    1239      232980 :             hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
    1240      116490 :                 hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;  /* here because current+2 could be > iend-8 */
    1241      232980 :             hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
    1242      116490 :                 hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
    1243             : 
    1244             :             /* check immediate repcode */
    1245      121368 :             while ( (ip <= ilimit)
    1246      121366 :                  && ( (offset_2>0)
    1247      121366 :                  & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
    1248             :                 /* store sequence */
    1249        4878 :                 size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
    1250        4878 :                 { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
    1251        4878 :                 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
    1252        4878 :                 hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
    1253        4878 :                 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
    1254        4878 :                 ip += rLength;
    1255        4878 :                 anchor = ip;
    1256        4878 :                 continue;   /* faster when present ... (?) */
    1257             :     }   }   }
    1258             : 
    1259             :     /* save reps for next block */
    1260          16 :     cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
    1261          16 :     cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
    1262             : 
    1263             :     /* Last Literals */
    1264          16 :     {   size_t const lastLLSize = iend - anchor;
    1265          16 :         memcpy(seqStorePtr->lit, anchor, lastLLSize);
    1266          16 :         seqStorePtr->lit += lastLLSize;
    1267             :     }
    1268             : }
    1269             : 
    1270             : 
    1271          16 : static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    1272             : {
    1273          16 :     const U32 mls = ctx->params.cParams.searchLength;
    1274          16 :     switch(mls)
    1275             :     {
    1276             :     default:
    1277             :     case 4 :
    1278           0 :         ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
    1279             :     case 5 :
    1280          16 :         ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
    1281             :     case 6 :
    1282           0 :         ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
    1283             :     case 7 :
    1284           0 :         ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
    1285             :     }
    1286             : }
    1287             : 
    1288             : 
    1289           0 : static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
    1290             :                                  const void* src, size_t srcSize,
    1291             :                                  const U32 mls)
    1292             : {
    1293           0 :     U32* const hashLong = ctx->hashTable;
    1294           0 :     U32  const hBitsL = ctx->params.cParams.hashLog;
    1295           0 :     U32* const hashSmall = ctx->chainTable;
    1296           0 :     U32  const hBitsS = ctx->params.cParams.chainLog;
    1297           0 :     seqStore_t* seqStorePtr = &(ctx->seqStore);
    1298           0 :     const BYTE* const base = ctx->base;
    1299           0 :     const BYTE* const dictBase = ctx->dictBase;
    1300           0 :     const BYTE* const istart = (const BYTE*)src;
    1301           0 :     const BYTE* ip = istart;
    1302           0 :     const BYTE* anchor = istart;
    1303           0 :     const U32   lowestIndex = ctx->lowLimit;
    1304           0 :     const BYTE* const dictStart = dictBase + lowestIndex;
    1305           0 :     const U32   dictLimit = ctx->dictLimit;
    1306           0 :     const BYTE* const lowPrefixPtr = base + dictLimit;
    1307           0 :     const BYTE* const dictEnd = dictBase + dictLimit;
    1308           0 :     const BYTE* const iend = istart + srcSize;
    1309           0 :     const BYTE* const ilimit = iend - 8;
    1310           0 :     U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
    1311             : 
    1312             :     /* Search Loop */
    1313           0 :     while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
    1314           0 :         const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
    1315           0 :         const U32 matchIndex = hashSmall[hSmall];
    1316           0 :         const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
    1317           0 :         const BYTE* match = matchBase + matchIndex;
    1318             : 
    1319           0 :         const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
    1320           0 :         const U32 matchLongIndex = hashLong[hLong];
    1321           0 :         const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
    1322           0 :         const BYTE* matchLong = matchLongBase + matchLongIndex;
    1323             : 
    1324           0 :         const U32 current = (U32)(ip-base);
    1325           0 :         const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
    1326           0 :         const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
    1327           0 :         const BYTE* repMatch = repBase + repIndex;
    1328             :         size_t mLength;
    1329           0 :         hashSmall[hSmall] = hashLong[hLong] = current;   /* update hash table */
    1330             : 
    1331           0 :         if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
    1332           0 :            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
    1333           0 :             const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
    1334           0 :             mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
    1335           0 :             ip++;
    1336           0 :             ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
    1337             :         } else {
    1338           0 :             if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
    1339           0 :                 const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
    1340           0 :                 const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
    1341             :                 U32 offset;
    1342           0 :                 mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
    1343           0 :                 offset = current - matchLongIndex;
    1344           0 :                 while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
    1345           0 :                 offset_2 = offset_1;
    1346           0 :                 offset_1 = offset;
    1347           0 :                 ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
    1348             : 
    1349           0 :             } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
    1350           0 :                 size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
    1351           0 :                 U32 const matchIndex3 = hashLong[h3];
    1352           0 :                 const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
    1353           0 :                 const BYTE* match3 = match3Base + matchIndex3;
    1354             :                 U32 offset;
    1355           0 :                 hashLong[h3] = current + 1;
    1356           0 :                 if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
    1357           0 :                     const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
    1358           0 :                     const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
    1359           0 :                     mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
    1360           0 :                     ip++;
    1361           0 :                     offset = current+1 - matchIndex3;
    1362           0 :                     while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
    1363             :                 } else {
    1364           0 :                     const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
    1365           0 :                     const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
    1366           0 :                     mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
    1367           0 :                     offset = current - matchIndex;
    1368           0 :                     while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
    1369             :                 }
    1370           0 :                 offset_2 = offset_1;
    1371           0 :                 offset_1 = offset;
    1372           0 :                 ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
    1373             : 
    1374             :             } else {
    1375           0 :                 ip += ((ip-anchor) >> g_searchStrength) + 1;
    1376           0 :                 continue;
    1377             :         }   }
    1378             : 
    1379             :         /* found a match : store it */
    1380           0 :         ip += mLength;
    1381           0 :         anchor = ip;
    1382             : 
    1383           0 :         if (ip <= ilimit) {
    1384             :             /* Fill Table */
    1385           0 :                         hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
    1386           0 :                         hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
    1387           0 :             hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
    1388           0 :             hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
    1389             :             /* check immediate repcode */
    1390           0 :             while (ip <= ilimit) {
    1391           0 :                 U32 const current2 = (U32)(ip-base);
    1392           0 :                 U32 const repIndex2 = current2 - offset_2;
    1393           0 :                 const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
    1394           0 :                 if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
    1395           0 :                    && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
    1396           0 :                     const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
    1397           0 :                     size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
    1398           0 :                     U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
    1399           0 :                     ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
    1400           0 :                     hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
    1401           0 :                     hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
    1402           0 :                     ip += repLength2;
    1403           0 :                     anchor = ip;
    1404           0 :                     continue;
    1405             :                 }
    1406           0 :                 break;
    1407             :     }   }   }
    1408             : 
    1409             :     /* save reps for next block */
    1410           0 :     ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
    1411             : 
    1412             :     /* Last Literals */
    1413           0 :     {   size_t const lastLLSize = iend - anchor;
    1414           0 :         memcpy(seqStorePtr->lit, anchor, lastLLSize);
    1415           0 :         seqStorePtr->lit += lastLLSize;
    1416             :     }
    1417           0 : }
    1418             : 
    1419             : 
    1420           0 : static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
    1421             :                          const void* src, size_t srcSize)
    1422             : {
    1423           0 :     U32 const mls = ctx->params.cParams.searchLength;
    1424           0 :     switch(mls)
    1425             :     {
    1426             :     default:
    1427             :     case 4 :
    1428           0 :         ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
    1429             :     case 5 :
    1430           0 :         ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
    1431             :     case 6 :
    1432           0 :         ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
    1433             :     case 7 :
    1434           0 :         ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
    1435             :     }
    1436             : }
    1437             : 
    1438             : 
    1439             : /*-*************************************
    1440             : *  Binary Tree search
    1441             : ***************************************/
    1442             : /** ZSTD_insertBt1() : add one or multiple positions to tree.
    1443             : *   ip : assumed <= iend-8 .
    1444             : *   @return : nb of positions added */
    1445           0 : static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares,
    1446             :                           U32 extDict)
    1447             : {
    1448           0 :     U32*   const hashTable = zc->hashTable;
    1449           0 :     U32    const hashLog = zc->params.cParams.hashLog;
    1450           0 :     size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
    1451           0 :     U32*   const bt = zc->chainTable;
    1452           0 :     U32    const btLog  = zc->params.cParams.chainLog - 1;
    1453           0 :     U32    const btMask = (1 << btLog) - 1;
    1454           0 :     U32 matchIndex = hashTable[h];
    1455           0 :     size_t commonLengthSmaller=0, commonLengthLarger=0;
    1456           0 :     const BYTE* const base = zc->base;
    1457           0 :     const BYTE* const dictBase = zc->dictBase;
    1458           0 :     const U32 dictLimit = zc->dictLimit;
    1459           0 :     const BYTE* const dictEnd = dictBase + dictLimit;
    1460           0 :     const BYTE* const prefixStart = base + dictLimit;
    1461           0 :     const BYTE* match = base + matchIndex;
    1462           0 :     const U32 current = (U32)(ip-base);
    1463           0 :     const U32 btLow = btMask >= current ? 0 : current - btMask;
    1464           0 :     U32* smallerPtr = bt + 2*(current&btMask);
    1465           0 :     U32* largerPtr  = smallerPtr + 1;
    1466             :     U32 dummy32;   /* to be nullified at the end */
    1467           0 :     U32 const windowLow = zc->lowLimit;
    1468           0 :     U32 matchEndIdx = current+8;
    1469           0 :     size_t bestLength = 8;
    1470             : #ifdef ZSTD_C_PREDICT
    1471             :     U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
    1472             :     U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
    1473             :     predictedSmall += (predictedSmall>0);
    1474             :     predictedLarge += (predictedLarge>0);
    1475             : #endif /* ZSTD_C_PREDICT */
    1476             : 
    1477           0 :     hashTable[h] = current;   /* Update Hash Table */
    1478             : 
    1479           0 :     while (nbCompares-- && (matchIndex > windowLow)) {
    1480           0 :         U32* nextPtr = bt + 2*(matchIndex & btMask);
    1481           0 :         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
    1482             : #ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
    1483             :         const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
    1484             :         if (matchIndex == predictedSmall) {
    1485             :             /* no need to check length, result known */
    1486             :             *smallerPtr = matchIndex;
    1487             :             if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
    1488             :             smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
    1489             :             matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
    1490             :             predictedSmall = predictPtr[1] + (predictPtr[1]>0);
    1491             :             continue;
    1492             :         }
    1493             :         if (matchIndex == predictedLarge) {
    1494             :             *largerPtr = matchIndex;
    1495             :             if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
    1496             :             largerPtr = nextPtr;
    1497             :             matchIndex = nextPtr[0];
    1498             :             predictedLarge = predictPtr[0] + (predictPtr[0]>0);
    1499             :             continue;
    1500             :         }
    1501             : #endif
    1502           0 :         if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
    1503           0 :             match = base + matchIndex;
    1504           0 :             if (match[matchLength] == ip[matchLength])
    1505           0 :                 matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
    1506             :         } else {
    1507           0 :             match = dictBase + matchIndex;
    1508           0 :             matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
    1509           0 :             if (matchIndex+matchLength >= dictLimit)
    1510           0 :                                 match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
    1511             :         }
    1512             : 
    1513           0 :         if (matchLength > bestLength) {
    1514           0 :             bestLength = matchLength;
    1515           0 :             if (matchLength > matchEndIdx - matchIndex)
    1516           0 :                 matchEndIdx = matchIndex + (U32)matchLength;
    1517             :         }
    1518             : 
    1519           0 :         if (ip+matchLength == iend)   /* equal : no way to know if inf or sup */
    1520           0 :             break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
    1521             : 
    1522           0 :         if (match[matchLength] < ip[matchLength]) {  /* necessarily within correct buffer */
    1523             :             /* match is smaller than current */
    1524           0 :             *smallerPtr = matchIndex;             /* update smaller idx */
    1525           0 :             commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
    1526           0 :             if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
    1527           0 :             smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
    1528           0 :             matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
    1529             :         } else {
    1530             :             /* match is larger than current */
    1531           0 :             *largerPtr = matchIndex;
    1532           0 :             commonLengthLarger = matchLength;
    1533           0 :             if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
    1534           0 :             largerPtr = nextPtr;
    1535           0 :             matchIndex = nextPtr[0];
    1536             :     }   }
    1537             : 
    1538           0 :     *smallerPtr = *largerPtr = 0;
    1539           0 :     if (bestLength > 384) return MIN(192, (U32)(bestLength - 384));   /* speed optimization */
    1540           0 :     if (matchEndIdx > current + 8) return matchEndIdx - current - 8;
    1541           0 :     return 1;
    1542             : }
    1543             : 
    1544             : 
    1545           0 : static size_t ZSTD_insertBtAndFindBestMatch (
    1546             :                         ZSTD_CCtx* zc,
    1547             :                         const BYTE* const ip, const BYTE* const iend,
    1548             :                         size_t* offsetPtr,
    1549             :                         U32 nbCompares, const U32 mls,
    1550             :                         U32 extDict)
    1551             : {
    1552           0 :     U32*   const hashTable = zc->hashTable;
    1553           0 :     U32    const hashLog = zc->params.cParams.hashLog;
    1554           0 :     size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
    1555           0 :     U32*   const bt = zc->chainTable;
    1556           0 :     U32    const btLog  = zc->params.cParams.chainLog - 1;
    1557           0 :     U32    const btMask = (1 << btLog) - 1;
    1558           0 :     U32 matchIndex  = hashTable[h];
    1559           0 :     size_t commonLengthSmaller=0, commonLengthLarger=0;
    1560           0 :     const BYTE* const base = zc->base;
    1561           0 :     const BYTE* const dictBase = zc->dictBase;
    1562           0 :     const U32 dictLimit = zc->dictLimit;
    1563           0 :     const BYTE* const dictEnd = dictBase + dictLimit;
    1564           0 :     const BYTE* const prefixStart = base + dictLimit;
    1565           0 :     const U32 current = (U32)(ip-base);
    1566           0 :     const U32 btLow = btMask >= current ? 0 : current - btMask;
    1567           0 :     const U32 windowLow = zc->lowLimit;
    1568           0 :     U32* smallerPtr = bt + 2*(current&btMask);
    1569           0 :     U32* largerPtr  = bt + 2*(current&btMask) + 1;
    1570           0 :     U32 matchEndIdx = current+8;
    1571             :     U32 dummy32;   /* to be nullified at the end */
    1572           0 :     size_t bestLength = 0;
    1573             : 
    1574           0 :     hashTable[h] = current;   /* Update Hash Table */
    1575             : 
    1576           0 :     while (nbCompares-- && (matchIndex > windowLow)) {
    1577           0 :         U32* nextPtr = bt + 2*(matchIndex & btMask);
    1578           0 :         size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
    1579             :         const BYTE* match;
    1580             : 
    1581           0 :         if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
    1582           0 :             match = base + matchIndex;
    1583           0 :             if (match[matchLength] == ip[matchLength])
    1584           0 :                 matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
    1585             :         } else {
    1586           0 :             match = dictBase + matchIndex;
    1587           0 :             matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
    1588           0 :             if (matchIndex+matchLength >= dictLimit)
    1589           0 :                                 match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
    1590             :         }
    1591             : 
    1592           0 :         if (matchLength > bestLength) {
    1593           0 :             if (matchLength > matchEndIdx - matchIndex)
    1594           0 :                 matchEndIdx = matchIndex + (U32)matchLength;
    1595           0 :             if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
    1596           0 :                 bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
    1597           0 :             if (ip+matchLength == iend)   /* equal : no way to know if inf or sup */
    1598           0 :                 break;   /* drop, to guarantee consistency (miss a little bit of compression) */
    1599             :         }
    1600             : 
    1601           0 :         if (match[matchLength] < ip[matchLength]) {
    1602             :             /* match is smaller than current */
    1603           0 :             *smallerPtr = matchIndex;             /* update smaller idx */
    1604           0 :             commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
    1605           0 :             if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
    1606           0 :             smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
    1607           0 :             matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
    1608             :         } else {
    1609             :             /* match is larger than current */
    1610           0 :             *largerPtr = matchIndex;
    1611           0 :             commonLengthLarger = matchLength;
    1612           0 :             if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
    1613           0 :             largerPtr = nextPtr;
    1614           0 :             matchIndex = nextPtr[0];
    1615             :     }   }
    1616             : 
    1617           0 :     *smallerPtr = *largerPtr = 0;
    1618             : 
    1619           0 :     zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
    1620           0 :     return bestLength;
    1621             : }
    1622             : 
    1623             : 
    1624           0 : static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
    1625             : {
    1626           0 :     const BYTE* const base = zc->base;
    1627           0 :     const U32 target = (U32)(ip - base);
    1628           0 :     U32 idx = zc->nextToUpdate;
    1629             : 
    1630           0 :     while(idx < target)
    1631           0 :         idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0);
    1632           0 : }
    1633             : 
    1634             : /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
    1635           0 : static size_t ZSTD_BtFindBestMatch (
    1636             :                         ZSTD_CCtx* zc,
    1637             :                         const BYTE* const ip, const BYTE* const iLimit,
    1638             :                         size_t* offsetPtr,
    1639             :                         const U32 maxNbAttempts, const U32 mls)
    1640             : {
    1641           0 :     if (ip < zc->base + zc->nextToUpdate) return 0;   /* skipped area */
    1642           0 :     ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
    1643           0 :     return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
    1644             : }
    1645             : 
    1646             : 
    1647           0 : static size_t ZSTD_BtFindBestMatch_selectMLS (
    1648             :                         ZSTD_CCtx* zc,   /* Index table will be updated */
    1649             :                         const BYTE* ip, const BYTE* const iLimit,
    1650             :                         size_t* offsetPtr,
    1651             :                         const U32 maxNbAttempts, const U32 matchLengthSearch)
    1652             : {
    1653           0 :     switch(matchLengthSearch)
    1654             :     {
    1655             :     default :
    1656           0 :     case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
    1657           0 :     case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
    1658           0 :     case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
    1659             :     }
    1660             : }
    1661             : 
    1662             : 
    1663           0 : static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
    1664             : {
    1665           0 :     const BYTE* const base = zc->base;
    1666           0 :     const U32 target = (U32)(ip - base);
    1667           0 :     U32 idx = zc->nextToUpdate;
    1668             : 
    1669           0 :     while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1);
    1670           0 : }
    1671             : 
    1672             : 
    1673             : /** Tree updater, providing best match */
    1674           0 : static size_t ZSTD_BtFindBestMatch_extDict (
    1675             :                         ZSTD_CCtx* zc,
    1676             :                         const BYTE* const ip, const BYTE* const iLimit,
    1677             :                         size_t* offsetPtr,
    1678             :                         const U32 maxNbAttempts, const U32 mls)
    1679             : {
    1680           0 :     if (ip < zc->base + zc->nextToUpdate) return 0;   /* skipped area */
    1681           0 :     ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
    1682           0 :     return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
    1683             : }
    1684             : 
    1685             : 
    1686           0 : static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
    1687             :                         ZSTD_CCtx* zc,   /* Index table will be updated */
    1688             :                         const BYTE* ip, const BYTE* const iLimit,
    1689             :                         size_t* offsetPtr,
    1690             :                         const U32 maxNbAttempts, const U32 matchLengthSearch)
    1691             : {
    1692           0 :     switch(matchLengthSearch)
    1693             :     {
    1694             :     default :
    1695           0 :     case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
    1696           0 :     case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
    1697           0 :     case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
    1698             :     }
    1699             : }
    1700             : 
    1701             : 
    1702             : 
    1703             : /* *********************************
    1704             : *  Hash Chain
    1705             : ***********************************/
    1706             : #define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & mask]
    1707             : 
    1708             : /* Update chains up to ip (excluded)
    1709             :    Assumption : always within prefix (ie. not within extDict) */
    1710             : FORCE_INLINE
    1711             : U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
    1712             : {
    1713           0 :     U32* const hashTable  = zc->hashTable;
    1714           0 :     const U32 hashLog = zc->params.cParams.hashLog;
    1715           0 :     U32* const chainTable = zc->chainTable;
    1716           0 :     const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
    1717           0 :     const BYTE* const base = zc->base;
    1718           0 :     const U32 target = (U32)(ip - base);
    1719           0 :     U32 idx = zc->nextToUpdate;
    1720             : 
    1721           0 :     while(idx < target) { /* catch up */
    1722           0 :         size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
    1723           0 :         NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
    1724           0 :         hashTable[h] = idx;
    1725           0 :         idx++;
    1726             :     }
    1727             : 
    1728           0 :     zc->nextToUpdate = target;
    1729           0 :     return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
    1730             : }
    1731             : 
    1732             : 
    1733             : 
    1734             : FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
    1735             : size_t ZSTD_HcFindBestMatch_generic (
    1736             :                         ZSTD_CCtx* zc,   /* Index table will be updated */
    1737             :                         const BYTE* const ip, const BYTE* const iLimit,
    1738             :                         size_t* offsetPtr,
    1739             :                         const U32 maxNbAttempts, const U32 mls, const U32 extDict)
    1740             : {
    1741           0 :     U32* const chainTable = zc->chainTable;
    1742           0 :     const U32 chainSize = (1 << zc->params.cParams.chainLog);
    1743           0 :     const U32 chainMask = chainSize-1;
    1744           0 :     const BYTE* const base = zc->base;
    1745           0 :     const BYTE* const dictBase = zc->dictBase;
    1746           0 :     const U32 dictLimit = zc->dictLimit;
    1747           0 :     const BYTE* const prefixStart = base + dictLimit;
    1748           0 :     const BYTE* const dictEnd = dictBase + dictLimit;
    1749           0 :     const U32 lowLimit = zc->lowLimit;
    1750           0 :     const U32 current = (U32)(ip-base);
    1751           0 :     const U32 minChain = current > chainSize ? current - chainSize : 0;
    1752           0 :     int nbAttempts=maxNbAttempts;
    1753           0 :     size_t ml=EQUAL_READ32-1;
    1754             : 
    1755             :     /* HC4 match finder */
    1756           0 :     U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
    1757             : 
    1758           0 :     for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
    1759             :         const BYTE* match;
    1760           0 :         size_t currentMl=0;
    1761           0 :         if ((!extDict) || matchIndex >= dictLimit) {
    1762           0 :             match = base + matchIndex;
    1763           0 :             if (match[ml] == ip[ml])   /* potentially better */
    1764           0 :                 currentMl = ZSTD_count(ip, match, iLimit);
    1765             :         } else {
    1766           0 :             match = dictBase + matchIndex;
    1767           0 :             if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
    1768           0 :                 currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
    1769             :         }
    1770             : 
    1771             :         /* save best solution */
    1772           0 :         if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ }
    1773             : 
    1774           0 :         if (matchIndex <= minChain) break;
    1775           0 :         matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
    1776             :     }
    1777             : 
    1778           0 :     return ml;
    1779             : }
    1780             : 
    1781             : 
    1782           0 : FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
    1783             :                         ZSTD_CCtx* zc,
    1784             :                         const BYTE* ip, const BYTE* const iLimit,
    1785             :                         size_t* offsetPtr,
    1786             :                         const U32 maxNbAttempts, const U32 matchLengthSearch)
    1787             : {
    1788           0 :     switch(matchLengthSearch)
    1789             :     {
    1790             :     default :
    1791           0 :     case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
    1792           0 :     case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
    1793           0 :     case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
    1794             :     }
    1795             : }
    1796             : 
    1797             : 
    1798           0 : FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
    1799             :                         ZSTD_CCtx* zc,
    1800             :                         const BYTE* ip, const BYTE* const iLimit,
    1801             :                         size_t* offsetPtr,
    1802             :                         const U32 maxNbAttempts, const U32 matchLengthSearch)
    1803             : {
    1804           0 :     switch(matchLengthSearch)
    1805             :     {
    1806             :     default :
    1807           0 :     case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
    1808           0 :     case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
    1809           0 :     case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
    1810             :     }
    1811             : }
    1812             : 
    1813             : 
    1814             : /* *******************************
    1815             : *  Common parser - lazy strategy
    1816             : *********************************/
    1817             : FORCE_INLINE
    1818             : void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
    1819             :                                      const void* src, size_t srcSize,
    1820             :                                      const U32 searchMethod, const U32 depth)
    1821             : {
    1822           0 :     seqStore_t* seqStorePtr = &(ctx->seqStore);
    1823           0 :     const BYTE* const istart = (const BYTE*)src;
    1824           0 :     const BYTE* ip = istart;
    1825           0 :     const BYTE* anchor = istart;
    1826           0 :     const BYTE* const iend = istart + srcSize;
    1827           0 :     const BYTE* const ilimit = iend - 8;
    1828           0 :     const BYTE* const base = ctx->base + ctx->dictLimit;
    1829             : 
    1830           0 :     U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
    1831           0 :     U32 const mls = ctx->params.cParams.searchLength;
    1832             : 
    1833             :     typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
    1834             :                         size_t* offsetPtr,
    1835             :                         U32 maxNbAttempts, U32 matchLengthSearch);
    1836           0 :     searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
    1837           0 :     U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0;
    1838             : 
    1839             :     /* init */
    1840           0 :     ip += (ip==base);
    1841           0 :     ctx->nextToUpdate3 = ctx->nextToUpdate;
    1842           0 :     {   U32 const maxRep = (U32)(ip-base);
    1843           0 :         if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
    1844           0 :         if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
    1845             :     }
    1846             : 
    1847             :     /* Match Loop */
    1848           0 :     while (ip < ilimit) {
    1849           0 :         size_t matchLength=0;
    1850           0 :         size_t offset=0;
    1851           0 :         const BYTE* start=ip+1;
    1852             : 
    1853             :         /* check repCode */
    1854           0 :         if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
    1855             :             /* repcode : we take it */
    1856           0 :             matchLength = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
    1857             :             if (depth==0) goto _storeSequence;
    1858             :         }
    1859             : 
    1860             :         /* first search (depth 0) */
    1861           0 :         {   size_t offsetFound = 99999999;
    1862           0 :             size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
    1863           0 :             if (ml2 > matchLength)
    1864           0 :                 matchLength = ml2, start = ip, offset=offsetFound;
    1865             :         }
    1866             : 
    1867           0 :         if (matchLength < EQUAL_READ32) {
    1868           0 :             ip += ((ip-anchor) >> g_searchStrength) + 1;   /* jump faster over incompressible sections */
    1869           0 :             continue;
    1870             :         }
    1871             : 
    1872             :         /* let's try to find a better solution */
    1873             :         if (depth>=1)
    1874           0 :         while (ip<ilimit) {
    1875           0 :             ip ++;
    1876           0 :             if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
    1877           0 :                 size_t const mlRep = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
    1878           0 :                 int const gain2 = (int)(mlRep * 3);
    1879           0 :                 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
    1880           0 :                 if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
    1881           0 :                     matchLength = mlRep, offset = 0, start = ip;
    1882             :             }
    1883           0 :             {   size_t offset2=99999999;
    1884           0 :                 size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
    1885           0 :                 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
    1886           0 :                 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
    1887           0 :                 if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    1888           0 :                     matchLength = ml2, offset = offset2, start = ip;
    1889           0 :                     continue;   /* search a better one */
    1890             :             }   }
    1891             : 
    1892             :             /* let's find an even better one */
    1893           0 :             if ((depth==2) && (ip<ilimit)) {
    1894           0 :                 ip ++;
    1895           0 :                 if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
    1896           0 :                     size_t const ml2 = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
    1897           0 :                     int const gain2 = (int)(ml2 * 4);
    1898           0 :                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
    1899           0 :                     if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
    1900           0 :                         matchLength = ml2, offset = 0, start = ip;
    1901             :                 }
    1902           0 :                 {   size_t offset2=99999999;
    1903           0 :                     size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
    1904           0 :                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
    1905           0 :                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
    1906           0 :                     if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    1907           0 :                         matchLength = ml2, offset = offset2, start = ip;
    1908           0 :                         continue;
    1909             :             }   }   }
    1910             :             break;  /* nothing found : store previous solution */
    1911             :         }
    1912             : 
    1913             :         /* catch up */
    1914           0 :         if (offset) {
    1915           0 :             while ((start>anchor) && (start>base+offset-ZSTD_REP_MOVE) && (start[-1] == start[-1-offset+ZSTD_REP_MOVE]))   /* only search for offset within prefix */
    1916           0 :                 { start--; matchLength++; }
    1917           0 :             offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
    1918             :         }
    1919             : 
    1920             :         /* store sequence */
    1921             : _storeSequence:
    1922           0 :         {   size_t const litLength = start - anchor;
    1923           0 :             ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
    1924           0 :             anchor = ip = start + matchLength;
    1925             :         }
    1926             : 
    1927             :         /* check immediate repcode */
    1928           0 :         while ( (ip <= ilimit)
    1929           0 :              && ((offset_2>0)
    1930           0 :              & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
    1931             :             /* store sequence */
    1932           0 :             matchLength = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32;
    1933           0 :             offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
    1934           0 :             ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
    1935           0 :             ip += matchLength;
    1936           0 :             anchor = ip;
    1937           0 :             continue;   /* faster when present ... (?) */
    1938             :     }   }
    1939             : 
    1940             :     /* Save reps for next block */
    1941           0 :     ctx->savedRep[0] = offset_1 ? offset_1 : savedOffset;
    1942           0 :     ctx->savedRep[1] = offset_2 ? offset_2 : savedOffset;
    1943             : 
    1944             :     /* Last Literals */
    1945           0 :     {   size_t const lastLLSize = iend - anchor;
    1946           0 :         memcpy(seqStorePtr->lit, anchor, lastLLSize);
    1947           0 :         seqStorePtr->lit += lastLLSize;
    1948             :     }
    1949             : }
    1950             : 
    1951             : 
    1952           0 : static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    1953             : {
    1954             :     ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
    1955           0 : }
    1956             : 
    1957           0 : static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    1958             : {
    1959             :     ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
    1960           0 : }
    1961             : 
    1962           0 : static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    1963             : {
    1964             :     ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
    1965           0 : }
    1966             : 
    1967           0 : static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    1968             : {
    1969             :     ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
    1970           0 : }
    1971             : 
    1972             : 
    1973             : FORCE_INLINE
    1974             : void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
    1975             :                                      const void* src, size_t srcSize,
    1976             :                                      const U32 searchMethod, const U32 depth)
    1977             : {
    1978           0 :     seqStore_t* seqStorePtr = &(ctx->seqStore);
    1979           0 :     const BYTE* const istart = (const BYTE*)src;
    1980           0 :     const BYTE* ip = istart;
    1981           0 :     const BYTE* anchor = istart;
    1982           0 :     const BYTE* const iend = istart + srcSize;
    1983           0 :     const BYTE* const ilimit = iend - 8;
    1984           0 :     const BYTE* const base = ctx->base;
    1985           0 :     const U32 dictLimit = ctx->dictLimit;
    1986           0 :     const U32 lowestIndex = ctx->lowLimit;
    1987           0 :     const BYTE* const prefixStart = base + dictLimit;
    1988           0 :     const BYTE* const dictBase = ctx->dictBase;
    1989           0 :     const BYTE* const dictEnd  = dictBase + dictLimit;
    1990           0 :     const BYTE* const dictStart  = dictBase + ctx->lowLimit;
    1991             : 
    1992           0 :     const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
    1993           0 :     const U32 mls = ctx->params.cParams.searchLength;
    1994             : 
    1995             :     typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
    1996             :                         size_t* offsetPtr,
    1997             :                         U32 maxNbAttempts, U32 matchLengthSearch);
    1998           0 :     searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
    1999             : 
    2000           0 :     U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
    2001             : 
    2002             :     /* init */
    2003           0 :     ctx->nextToUpdate3 = ctx->nextToUpdate;
    2004           0 :     ip += (ip == prefixStart);
    2005             : 
    2006             :     /* Match Loop */
    2007           0 :     while (ip < ilimit) {
    2008           0 :         size_t matchLength=0;
    2009           0 :         size_t offset=0;
    2010           0 :         const BYTE* start=ip+1;
    2011           0 :         U32 current = (U32)(ip-base);
    2012             : 
    2013             :         /* check repCode */
    2014           0 :         {   const U32 repIndex = (U32)(current+1 - offset_1);
    2015           0 :             const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
    2016           0 :             const BYTE* const repMatch = repBase + repIndex;
    2017           0 :             if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))   /* intentional overflow */
    2018           0 :             if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
    2019             :                 /* repcode detected we should take it */
    2020           0 :                 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    2021           0 :                 matchLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    2022             :                 if (depth==0) goto _storeSequence;
    2023             :         }   }
    2024             : 
    2025             :         /* first search (depth 0) */
    2026           0 :         {   size_t offsetFound = 99999999;
    2027           0 :             size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
    2028           0 :             if (ml2 > matchLength)
    2029           0 :                 matchLength = ml2, start = ip, offset=offsetFound;
    2030             :         }
    2031             : 
    2032           0 :          if (matchLength < EQUAL_READ32) {
    2033           0 :             ip += ((ip-anchor) >> g_searchStrength) + 1;   /* jump faster over incompressible sections */
    2034           0 :             continue;
    2035             :         }
    2036             : 
    2037             :         /* let's try to find a better solution */
    2038             :         if (depth>=1)
    2039           0 :         while (ip<ilimit) {
    2040           0 :             ip ++;
    2041           0 :             current++;
    2042             :             /* check repCode */
    2043           0 :             if (offset) {
    2044           0 :                 const U32 repIndex = (U32)(current - offset_1);
    2045           0 :                 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
    2046           0 :                 const BYTE* const repMatch = repBase + repIndex;
    2047           0 :                 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
    2048           0 :                 if (MEM_read32(ip) == MEM_read32(repMatch)) {
    2049             :                     /* repcode detected */
    2050           0 :                     const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    2051           0 :                     size_t const repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    2052           0 :                     int const gain2 = (int)(repLength * 3);
    2053           0 :                     int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
    2054           0 :                     if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
    2055           0 :                         matchLength = repLength, offset = 0, start = ip;
    2056             :             }   }
    2057             : 
    2058             :             /* search match, depth 1 */
    2059           0 :             {   size_t offset2=99999999;
    2060           0 :                 size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
    2061           0 :                 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
    2062           0 :                 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
    2063           0 :                 if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    2064           0 :                     matchLength = ml2, offset = offset2, start = ip;
    2065           0 :                     continue;   /* search a better one */
    2066             :             }   }
    2067             : 
    2068             :             /* let's find an even better one */
    2069           0 :             if ((depth==2) && (ip<ilimit)) {
    2070           0 :                 ip ++;
    2071           0 :                 current++;
    2072             :                 /* check repCode */
    2073           0 :                 if (offset) {
    2074           0 :                     const U32 repIndex = (U32)(current - offset_1);
    2075           0 :                     const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
    2076           0 :                     const BYTE* const repMatch = repBase + repIndex;
    2077           0 :                     if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
    2078           0 :                     if (MEM_read32(ip) == MEM_read32(repMatch)) {
    2079             :                         /* repcode detected */
    2080           0 :                         const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    2081           0 :                         size_t repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    2082           0 :                         int gain2 = (int)(repLength * 4);
    2083           0 :                         int gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
    2084           0 :                         if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
    2085           0 :                             matchLength = repLength, offset = 0, start = ip;
    2086             :                 }   }
    2087             : 
    2088             :                 /* search match, depth 2 */
    2089           0 :                 {   size_t offset2=99999999;
    2090           0 :                     size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
    2091           0 :                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
    2092           0 :                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
    2093           0 :                     if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    2094           0 :                         matchLength = ml2, offset = offset2, start = ip;
    2095           0 :                         continue;
    2096             :             }   }   }
    2097             :             break;  /* nothing found : store previous solution */
    2098             :         }
    2099             : 
    2100             :         /* catch up */
    2101           0 :         if (offset) {
    2102           0 :             U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
    2103           0 :             const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
    2104           0 :             const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
    2105           0 :             while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
    2106           0 :             offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
    2107             :         }
    2108             : 
    2109             :         /* store sequence */
    2110             : _storeSequence:
    2111           0 :         {   size_t const litLength = start - anchor;
    2112           0 :             ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
    2113           0 :             anchor = ip = start + matchLength;
    2114             :         }
    2115             : 
    2116             :         /* check immediate repcode */
    2117           0 :         while (ip <= ilimit) {
    2118           0 :             const U32 repIndex = (U32)((ip-base) - offset_2);
    2119           0 :             const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
    2120           0 :             const BYTE* const repMatch = repBase + repIndex;
    2121           0 :             if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
    2122           0 :             if (MEM_read32(ip) == MEM_read32(repMatch)) {
    2123             :                 /* repcode detected we should take it */
    2124           0 :                 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    2125           0 :                 matchLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    2126           0 :                 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
    2127           0 :                 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
    2128           0 :                 ip += matchLength;
    2129           0 :                 anchor = ip;
    2130           0 :                 continue;   /* faster when present ... (?) */
    2131             :             }
    2132             :             break;
    2133             :     }   }
    2134             : 
    2135             :     /* Save reps for next block */
    2136           0 :     ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
    2137             : 
    2138             :     /* Last Literals */
    2139           0 :     {   size_t const lastLLSize = iend - anchor;
    2140           0 :         memcpy(seqStorePtr->lit, anchor, lastLLSize);
    2141           0 :         seqStorePtr->lit += lastLLSize;
    2142             :     }
    2143             : }
    2144             : 
    2145             : 
    2146           0 : void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    2147             : {
    2148             :     ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
    2149           0 : }
    2150             : 
    2151           0 : static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    2152             : {
    2153             :     ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
    2154           0 : }
    2155             : 
    2156           0 : static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    2157             : {
    2158             :     ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
    2159           0 : }
    2160             : 
    2161           0 : static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    2162             : {
    2163             :     ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
    2164           0 : }
    2165             : 
    2166             : 
    2167             : /* The optimal parser */
    2168             : #include "zstd_opt.h"
    2169             : 
    2170           0 : static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    2171             : {
    2172             : #ifdef ZSTD_OPT_H_91842398743
    2173             :     ZSTD_compressBlock_opt_generic(ctx, src, srcSize);
    2174             : #else
    2175             :     (void)ctx; (void)src; (void)srcSize;
    2176             :     return;
    2177             : #endif
    2178           0 : }
    2179             : 
    2180           0 : static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
    2181             : {
    2182             : #ifdef ZSTD_OPT_H_91842398743
    2183             :     ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize);
    2184             : #else
    2185             :     (void)ctx; (void)src; (void)srcSize;
    2186             :     return;
    2187             : #endif
    2188           0 : }
    2189             : 
    2190             : 
    2191             : typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
    2192             : 
    2193        3326 : static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
    2194             : {
    2195             :     static const ZSTD_blockCompressor blockCompressor[2][7] = {
    2196             :         { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt },
    2197             :         { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict }
    2198             :     };
    2199             : 
    2200        3326 :     return blockCompressor[extDict][(U32)strat];
    2201             : }
    2202             : 
    2203             : 
    2204        3326 : static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
    2205             : {
    2206        3326 :     ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
    2207        3326 :     const BYTE* const base = zc->base;
    2208        3326 :     const BYTE* const istart = (const BYTE*)src;
    2209        3326 :     const U32 current = (U32)(istart-base);
    2210        3326 :     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0;   /* don't even attempt compression below a certain srcSize */
    2211        3326 :     ZSTD_resetSeqStore(&(zc->seqStore));
    2212        3326 :     if (current > zc->nextToUpdate + 384)
    2213        3252 :         zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384));   /* update tree not updated after finding very long rep matches */
    2214        3326 :     blockCompressor(zc, src, srcSize);
    2215        3326 :     return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
    2216             : }
    2217             : 
    2218             : 
    2219             : /*! ZSTD_compress_generic() :
    2220             : *   Compress a chunk of data into one or multiple blocks.
    2221             : *   All blocks will be terminated, all input will be consumed.
    2222             : *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
    2223             : *   Frame is supposed already started (header already produced)
    2224             : *   @return : compressed size, or an error code
    2225             : */
    2226          74 : static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
    2227             :                                      void* dst, size_t dstCapacity,
    2228             :                                const void* src, size_t srcSize,
    2229             :                                      U32 lastFrameChunk)
    2230             : {
    2231          74 :     size_t blockSize = cctx->blockSize;
    2232          74 :     size_t remaining = srcSize;
    2233          74 :     const BYTE* ip = (const BYTE*)src;
    2234          74 :     BYTE* const ostart = (BYTE*)dst;
    2235          74 :     BYTE* op = ostart;
    2236          74 :     U32 const maxDist = 1 << cctx->params.cParams.windowLog;
    2237             : 
    2238          74 :     if (cctx->params.fParams.checksumFlag)
    2239           0 :         XXH64_update(&cctx->xxhState, src, srcSize);
    2240             : 
    2241        3474 :     while (remaining) {
    2242        3326 :         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
    2243             :         size_t cSize;
    2244             : 
    2245        3326 :         if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
    2246        3326 :         if (remaining < blockSize) blockSize = remaining;
    2247             : 
    2248             :         /* preemptive overflow correction */
    2249        3326 :         if (cctx->lowLimit > (1<<30)) {
    2250           0 :             U32 const btplus = (cctx->params.cParams.strategy == ZSTD_btlazy2) | (cctx->params.cParams.strategy == ZSTD_btopt);
    2251           0 :             U32 const chainMask = (1 << (cctx->params.cParams.chainLog - btplus)) - 1;
    2252           0 :             U32 const supLog = MAX(cctx->params.cParams.chainLog, 17 /* blockSize */);
    2253           0 :             U32 const newLowLimit = (cctx->lowLimit & chainMask) + (1 << supLog);   /* preserve position % chainSize, ensure current-repcode doesn't underflow */
    2254           0 :             U32 const correction = cctx->lowLimit - newLowLimit;
    2255           0 :             ZSTD_reduceIndex(cctx, correction);
    2256           0 :             cctx->base += correction;
    2257           0 :             cctx->dictBase += correction;
    2258           0 :             cctx->lowLimit = newLowLimit;
    2259           0 :             cctx->dictLimit -= correction;
    2260           0 :             if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0;
    2261           0 :             else cctx->nextToUpdate -= correction;
    2262             :         }
    2263             : 
    2264        3326 :         if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
    2265             :             /* enforce maxDist */
    2266        3076 :             U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist;
    2267        3076 :             if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit;
    2268        3076 :             if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit;
    2269             :         }
    2270             : 
    2271        3326 :         cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize);
    2272        3326 :         if (ZSTD_isError(cSize)) return cSize;
    2273             : 
    2274        3326 :         if (cSize == 0) {  /* block is not compressible */
    2275        1280 :             U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
    2276        1280 :             if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
    2277        1280 :             MEM_writeLE32(op, cBlockHeader24);   /* no pb, 4th byte will be overwritten */
    2278        1280 :             memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
    2279        1280 :             cSize = ZSTD_blockHeaderSize+blockSize;
    2280             :         } else {
    2281        2046 :             U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
    2282        2046 :             MEM_writeLE24(op, cBlockHeader24);
    2283        2046 :             cSize += ZSTD_blockHeaderSize;
    2284             :         }
    2285             : 
    2286        3326 :         remaining -= blockSize;
    2287        3326 :         dstCapacity -= cSize;
    2288        3326 :         ip += blockSize;
    2289        3326 :         op += cSize;
    2290             :     }
    2291             : 
    2292          74 :     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
    2293          74 :     return op-ostart;
    2294             : }
    2295             : 
    2296             : 
    2297          74 : static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
    2298             :                                     ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
    2299          74 : {   BYTE* const op = (BYTE*)dst;
    2300          74 :     U32   const dictIDSizeCode = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
    2301          74 :     U32   const checksumFlag = params.fParams.checksumFlag>0;
    2302          74 :     U32   const windowSize = 1U << params.cParams.windowLog;
    2303          74 :     U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize > (pledgedSrcSize-1));
    2304          74 :     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
    2305         148 :     U32   const fcsCode = params.fParams.contentSizeFlag ?
    2306          74 :                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) :   /* 0-3 */
    2307             :                       0;
    2308          74 :     BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
    2309             :     size_t pos;
    2310             : 
    2311          74 :     if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
    2312             : 
    2313          74 :     MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
    2314          74 :     op[4] = frameHeaderDecriptionByte; pos=5;
    2315          74 :     if (!singleSegment) op[pos++] = windowLogByte;
    2316          74 :     switch(dictIDSizeCode)
    2317             :     {
    2318             :         default:   /* impossible */
    2319          74 :         case 0 : break;
    2320           0 :         case 1 : op[pos] = (BYTE)(dictID); pos++; break;
    2321           0 :         case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
    2322           0 :         case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
    2323             :     }
    2324          74 :     switch(fcsCode)
    2325             :     {
    2326             :         default:   /* impossible */
    2327           0 :         case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
    2328           8 :         case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
    2329          66 :         case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
    2330           0 :         case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
    2331             :     }
    2332          74 :     return pos;
    2333             : }
    2334             : 
    2335             : 
    2336          74 : static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
    2337             :                               void* dst, size_t dstCapacity,
    2338             :                         const void* src, size_t srcSize,
    2339             :                                U32 frame, U32 lastFrameChunk)
    2340             : {
    2341          74 :     const BYTE* const ip = (const BYTE*) src;
    2342          74 :     size_t fhSize = 0;
    2343             : 
    2344          74 :     if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
    2345             : 
    2346          74 :     if (frame && (cctx->stage==ZSTDcs_init)) {
    2347          74 :         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
    2348          74 :         if (ZSTD_isError(fhSize)) return fhSize;
    2349          74 :         dstCapacity -= fhSize;
    2350          74 :         dst = (char*)dst + fhSize;
    2351          74 :         cctx->stage = ZSTDcs_ongoing;
    2352             :     }
    2353             : 
    2354             :     /* Check if blocks follow each other */
    2355          74 :     if (src != cctx->nextSrc) {
    2356             :         /* not contiguous */
    2357          74 :         ptrdiff_t const delta = cctx->nextSrc - ip;
    2358          74 :         cctx->lowLimit = cctx->dictLimit;
    2359          74 :         cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
    2360          74 :         cctx->dictBase = cctx->base;
    2361          74 :         cctx->base -= delta;
    2362          74 :         cctx->nextToUpdate = cctx->dictLimit;
    2363          74 :         if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit;   /* too small extDict */
    2364             :     }
    2365             : 
    2366             :     /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
    2367          74 :     if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
    2368           0 :         ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
    2369           0 :         U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
    2370           0 :         cctx->lowLimit = lowLimitMax;
    2371             :     }
    2372             : 
    2373          74 :     cctx->nextSrc = ip + srcSize;
    2374             : 
    2375          74 :     {   size_t const cSize = frame ?
    2376          74 :                              ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
    2377             :                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
    2378          74 :         if (ZSTD_isError(cSize)) return cSize;
    2379          74 :         return cSize + fhSize;
    2380             :     }
    2381             : }
    2382             : 
    2383             : 
    2384           0 : size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
    2385             :                               void* dst, size_t dstCapacity,
    2386             :                         const void* src, size_t srcSize)
    2387             : {
    2388           0 :     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
    2389             : }
    2390             : 
    2391             : 
    2392           0 : size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx)
    2393             : {
    2394           0 :     return MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog);
    2395             : }
    2396             : 
    2397           0 : size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
    2398             : {
    2399           0 :     size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
    2400           0 :     if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
    2401           0 :     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
    2402             : }
    2403             : 
    2404             : 
    2405           0 : static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize)
    2406             : {
    2407           0 :     const BYTE* const ip = (const BYTE*) src;
    2408           0 :     const BYTE* const iend = ip + srcSize;
    2409             : 
    2410             :     /* input becomes current prefix */
    2411           0 :     zc->lowLimit = zc->dictLimit;
    2412           0 :     zc->dictLimit = (U32)(zc->nextSrc - zc->base);
    2413           0 :     zc->dictBase = zc->base;
    2414           0 :     zc->base += ip - zc->nextSrc;
    2415           0 :     zc->nextToUpdate = zc->dictLimit;
    2416           0 :     zc->loadedDictEnd = (U32)(iend - zc->base);
    2417             : 
    2418           0 :     zc->nextSrc = iend;
    2419           0 :     if (srcSize <= HASH_READ_SIZE) return 0;
    2420             : 
    2421           0 :     switch(zc->params.cParams.strategy)
    2422             :     {
    2423             :     case ZSTD_fast:
    2424           0 :         ZSTD_fillHashTable (zc, iend, zc->params.cParams.searchLength);
    2425           0 :         break;
    2426             : 
    2427             :     case ZSTD_dfast:
    2428           0 :         ZSTD_fillDoubleHashTable (zc, iend, zc->params.cParams.searchLength);
    2429           0 :         break;
    2430             : 
    2431             :     case ZSTD_greedy:
    2432             :     case ZSTD_lazy:
    2433             :     case ZSTD_lazy2:
    2434           0 :         ZSTD_insertAndFindFirstIndex (zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength);
    2435           0 :         break;
    2436             : 
    2437             :     case ZSTD_btlazy2:
    2438             :     case ZSTD_btopt:
    2439           0 :         ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
    2440           0 :         break;
    2441             : 
    2442             :     default:
    2443           0 :         return ERROR(GENERIC);   /* strategy doesn't exist; impossible */
    2444             :     }
    2445             : 
    2446           0 :     zc->nextToUpdate = zc->loadedDictEnd;
    2447           0 :     return 0;
    2448             : }
    2449             : 
    2450             : 
    2451             : /* Dictionary format :
    2452             :      Magic == ZSTD_DICT_MAGIC (4 bytes)
    2453             :      HUF_writeCTable(256)
    2454             :      FSE_writeNCount(off)
    2455             :      FSE_writeNCount(ml)
    2456             :      FSE_writeNCount(ll)
    2457             :      RepOffsets
    2458             :      Dictionary content
    2459             : */
    2460             : /*! ZSTD_loadDictEntropyStats() :
    2461             :     @return : size read from dictionary
    2462             :     note : magic number supposed already checked */
    2463           0 : static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
    2464             : {
    2465           0 :     const BYTE* dictPtr = (const BYTE*)dict;
    2466           0 :     const BYTE* const dictEnd = dictPtr + dictSize;
    2467             : 
    2468           0 :     {   size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dict, dictSize);
    2469           0 :         if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
    2470           0 :         dictPtr += hufHeaderSize;
    2471             :     }
    2472             : 
    2473             :     {   short offcodeNCount[MaxOff+1];
    2474           0 :         unsigned offcodeMaxValue = MaxOff, offcodeLog = OffFSELog;
    2475           0 :         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
    2476           0 :         if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
    2477           0 :         CHECK_E (FSE_buildCTable(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted);
    2478           0 :         dictPtr += offcodeHeaderSize;
    2479             :     }
    2480             : 
    2481             :     {   short matchlengthNCount[MaxML+1];
    2482           0 :         unsigned matchlengthMaxValue = MaxML, matchlengthLog = MLFSELog;
    2483           0 :         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
    2484           0 :         if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
    2485           0 :         CHECK_E (FSE_buildCTable(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted);
    2486           0 :         dictPtr += matchlengthHeaderSize;
    2487             :     }
    2488             : 
    2489             :     {   short litlengthNCount[MaxLL+1];
    2490           0 :         unsigned litlengthMaxValue = MaxLL, litlengthLog = LLFSELog;
    2491           0 :         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
    2492           0 :         if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
    2493           0 :         CHECK_E(FSE_buildCTable(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted);
    2494           0 :         dictPtr += litlengthHeaderSize;
    2495             :     }
    2496             : 
    2497           0 :     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
    2498           0 :     cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
    2499           0 :     cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
    2500           0 :     cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
    2501           0 :     dictPtr += 12;
    2502             : 
    2503           0 :     cctx->flagStaticTables = 1;
    2504           0 :     return dictPtr - (const BYTE*)dict;
    2505             : }
    2506             : 
    2507             : /** ZSTD_compress_insertDictionary() :
    2508             : *   @return : 0, or an error code */
    2509          74 : static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* zc, const void* dict, size_t dictSize)
    2510             : {
    2511          74 :     if ((dict==NULL) || (dictSize<=8)) return 0;
    2512             : 
    2513             :     /* default : dict is pure content */
    2514           0 :     if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return ZSTD_loadDictionaryContent(zc, dict, dictSize);
    2515           0 :     zc->dictID = zc->params.fParams.noDictIDFlag ? 0 :  MEM_readLE32((const char*)dict+4);
    2516             : 
    2517             :     /* known magic number : dict is parsed for entropy stats and content */
    2518           0 :     {   size_t const loadError = ZSTD_loadDictEntropyStats(zc, (const char*)dict+8 /* skip dictHeader */, dictSize-8);
    2519           0 :         size_t const eSize = loadError + 8;
    2520           0 :         if (ZSTD_isError(loadError)) return loadError;
    2521           0 :         return ZSTD_loadDictionaryContent(zc, (const char*)dict+eSize, dictSize-eSize);
    2522             :     }
    2523             : }
    2524             : 
    2525             : 
    2526             : /*! ZSTD_compressBegin_internal() :
    2527             : *   @return : 0, or an error code */
    2528          74 : static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
    2529             :                              const void* dict, size_t dictSize,
    2530             :                                    ZSTD_parameters params, U64 pledgedSrcSize)
    2531             : {
    2532          74 :     ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
    2533          74 :     CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
    2534          74 :     return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
    2535             : }
    2536             : 
    2537             : 
    2538             : /*! ZSTD_compressBegin_advanced() :
    2539             : *   @return : 0, or an error code */
    2540           0 : size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
    2541             :                              const void* dict, size_t dictSize,
    2542             :                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
    2543             : {
    2544             :     /* compression parameters verification and optimization */
    2545           0 :     CHECK_F(ZSTD_checkCParams(params.cParams));
    2546           0 :     return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
    2547             : }
    2548             : 
    2549             : 
    2550           0 : size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
    2551             : {
    2552           0 :     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
    2553           0 :     return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
    2554             : }
    2555             : 
    2556             : 
    2557           0 : size_t ZSTD_compressBegin(ZSTD_CCtx* zc, int compressionLevel)
    2558             : {
    2559           0 :     return ZSTD_compressBegin_usingDict(zc, NULL, 0, compressionLevel);
    2560             : }
    2561             : 
    2562             : 
    2563             : /*! ZSTD_writeEpilogue() :
    2564             : *   Ends a frame.
    2565             : *   @return : nb of bytes written into dst (or an error code) */
    2566          74 : static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
    2567             : {
    2568          74 :     BYTE* const ostart = (BYTE*)dst;
    2569          74 :     BYTE* op = ostart;
    2570          74 :     size_t fhSize = 0;
    2571             : 
    2572          74 :     if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
    2573             : 
    2574             :     /* special case : empty frame */
    2575          74 :     if (cctx->stage == ZSTDcs_init) {
    2576           0 :         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
    2577           0 :         if (ZSTD_isError(fhSize)) return fhSize;
    2578           0 :         dstCapacity -= fhSize;
    2579           0 :         op += fhSize;
    2580           0 :         cctx->stage = ZSTDcs_ongoing;
    2581             :     }
    2582             : 
    2583          74 :     if (cctx->stage != ZSTDcs_ending) {
    2584             :         /* write one last empty block, make it the "last" block */
    2585           0 :         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
    2586           0 :         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
    2587           0 :         MEM_writeLE32(op, cBlockHeader24);
    2588           0 :         op += ZSTD_blockHeaderSize;
    2589           0 :         dstCapacity -= ZSTD_blockHeaderSize;
    2590             :     }
    2591             : 
    2592          74 :     if (cctx->params.fParams.checksumFlag) {
    2593           0 :         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
    2594           0 :         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
    2595           0 :         MEM_writeLE32(op, checksum);
    2596           0 :         op += 4;
    2597             :     }
    2598             : 
    2599          74 :     cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
    2600          74 :     return op-ostart;
    2601             : }
    2602             : 
    2603             : 
    2604          74 : size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
    2605             :                          void* dst, size_t dstCapacity,
    2606             :                    const void* src, size_t srcSize)
    2607             : {
    2608             :     size_t endResult;
    2609          74 :     size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
    2610          74 :     if (ZSTD_isError(cSize)) return cSize;
    2611          74 :     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
    2612          74 :     if (ZSTD_isError(endResult)) return endResult;
    2613          74 :     return cSize + endResult;
    2614             : }
    2615             : 
    2616             : 
    2617          74 : static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
    2618             :                                void* dst, size_t dstCapacity,
    2619             :                          const void* src, size_t srcSize,
    2620             :                          const void* dict,size_t dictSize,
    2621             :                                ZSTD_parameters params)
    2622             : {
    2623          74 :     CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
    2624          74 :     return ZSTD_compressEnd(cctx, dst,  dstCapacity, src, srcSize);
    2625             : }
    2626             : 
    2627           0 : size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
    2628             :                                void* dst, size_t dstCapacity,
    2629             :                          const void* src, size_t srcSize,
    2630             :                          const void* dict,size_t dictSize,
    2631             :                                ZSTD_parameters params)
    2632             : {
    2633           0 :     CHECK_F(ZSTD_checkCParams(params.cParams));
    2634           0 :     return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
    2635             : }
    2636             : 
    2637          74 : size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel)
    2638             : {
    2639          74 :     ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dictSize);
    2640          74 :     params.fParams.contentSizeFlag = 1;
    2641          74 :     return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
    2642             : }
    2643             : 
    2644          74 : size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
    2645             : {
    2646          74 :     return ZSTD_compress_usingDict(ctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
    2647             : }
    2648             : 
    2649          74 : size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
    2650             : {
    2651             :     size_t result;
    2652             :     ZSTD_CCtx ctxBody;
    2653          74 :     memset(&ctxBody, 0, sizeof(ctxBody));
    2654          74 :     memcpy(&ctxBody.customMem, &defaultCustomMem, sizeof(ZSTD_customMem));
    2655          74 :     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
    2656          74 :     ZSTD_free(ctxBody.workSpace, defaultCustomMem);  /* can't free ctxBody itself, as it's on stack; free only heap content */
    2657          74 :     return result;
    2658             : }
    2659             : 
    2660             : 
    2661             : /* =====  Dictionary API  ===== */
    2662             : 
    2663             : struct ZSTD_CDict_s {
    2664             :     void* dictContent;
    2665             :     size_t dictContentSize;
    2666             :     ZSTD_CCtx* refContext;
    2667             : };  /* typedef'd tp ZSTD_CDict within "zstd.h" */
    2668             : 
    2669           0 : size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
    2670             : {
    2671           0 :     if (cdict==NULL) return 0;   /* support sizeof on NULL */
    2672           0 :     return ZSTD_sizeof_CCtx(cdict->refContext) + cdict->dictContentSize;
    2673             : }
    2674             : 
    2675           0 : ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_parameters params, ZSTD_customMem customMem)
    2676             : {
    2677           0 :     if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
    2678           0 :     if (!customMem.customAlloc || !customMem.customFree) return NULL;
    2679             : 
    2680           0 :     {   ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
    2681           0 :         void* const dictContent = ZSTD_malloc(dictSize, customMem);
    2682           0 :         ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
    2683             : 
    2684           0 :         if (!dictContent || !cdict || !cctx) {
    2685           0 :             ZSTD_free(dictContent, customMem);
    2686           0 :             ZSTD_free(cdict, customMem);
    2687           0 :             ZSTD_free(cctx, customMem);
    2688           0 :             return NULL;
    2689             :         }
    2690             : 
    2691           0 :         memcpy(dictContent, dict, dictSize);
    2692           0 :         {   size_t const errorCode = ZSTD_compressBegin_advanced(cctx, dictContent, dictSize, params, 0);
    2693           0 :             if (ZSTD_isError(errorCode)) {
    2694           0 :                 ZSTD_free(dictContent, customMem);
    2695           0 :                 ZSTD_free(cdict, customMem);
    2696           0 :                 ZSTD_free(cctx, customMem);
    2697           0 :                 return NULL;
    2698             :         }   }
    2699             : 
    2700           0 :         cdict->dictContent = dictContent;
    2701           0 :         cdict->dictContentSize = dictSize;
    2702           0 :         cdict->refContext = cctx;
    2703           0 :         return cdict;
    2704             :     }
    2705             : }
    2706             : 
    2707           0 : ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
    2708             : {
    2709           0 :     ZSTD_customMem const allocator = { NULL, NULL, NULL };
    2710           0 :     ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
    2711           0 :     params.fParams.contentSizeFlag = 1;
    2712           0 :     return ZSTD_createCDict_advanced(dict, dictSize, params, allocator);
    2713             : }
    2714             : 
    2715           0 : size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
    2716             : {
    2717           0 :     if (cdict==NULL) return 0;   /* support free on NULL */
    2718           0 :     {   ZSTD_customMem const cMem = cdict->refContext->customMem;
    2719           0 :         ZSTD_freeCCtx(cdict->refContext);
    2720           0 :         ZSTD_free(cdict->dictContent, cMem);
    2721           0 :         ZSTD_free(cdict, cMem);
    2722           0 :         return 0;
    2723             :     }
    2724             : }
    2725             : 
    2726           0 : size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, U64 pledgedSrcSize)
    2727             : {
    2728           0 :     if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
    2729           0 :     else CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, cdict->refContext->params, pledgedSrcSize));
    2730           0 :     return 0;
    2731             : }
    2732             : 
    2733             : /*! ZSTD_compress_usingCDict() :
    2734             : *   Compression using a digested Dictionary.
    2735             : *   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
    2736             : *   Note that compression level is decided during dictionary creation */
    2737           0 : size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
    2738             :                                 void* dst, size_t dstCapacity,
    2739             :                                 const void* src, size_t srcSize,
    2740             :                                 const ZSTD_CDict* cdict)
    2741             : {
    2742           0 :     CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
    2743             : 
    2744           0 :     if (cdict->refContext->params.fParams.contentSizeFlag==1) {
    2745           0 :         cctx->params.fParams.contentSizeFlag = 1;
    2746           0 :         cctx->frameContentSize = srcSize;
    2747             :     }
    2748             : 
    2749           0 :     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
    2750             : }
    2751             : 
    2752             : 
    2753             : 
    2754             : /* ******************************************************************
    2755             : *  Streaming
    2756             : ********************************************************************/
    2757             : 
    2758             : typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
    2759             : 
    2760             : struct ZSTD_CStream_s {
    2761             :     ZSTD_CCtx* cctx;
    2762             :     ZSTD_CDict* cdict;
    2763             :     char*  inBuff;
    2764             :     size_t inBuffSize;
    2765             :     size_t inToCompress;
    2766             :     size_t inBuffPos;
    2767             :     size_t inBuffTarget;
    2768             :     size_t blockSize;
    2769             :     char*  outBuff;
    2770             :     size_t outBuffSize;
    2771             :     size_t outBuffContentSize;
    2772             :     size_t outBuffFlushedSize;
    2773             :     ZSTD_cStreamStage stage;
    2774             :     U32    checksum;
    2775             :     U32    frameEnded;
    2776             :     ZSTD_customMem customMem;
    2777             : };   /* typedef'd to ZSTD_CStream within "zstd.h" */
    2778             : 
    2779           0 : ZSTD_CStream* ZSTD_createCStream(void)
    2780             : {
    2781           0 :     return ZSTD_createCStream_advanced(defaultCustomMem);
    2782             : }
    2783             : 
    2784           0 : ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
    2785             : {
    2786             :     ZSTD_CStream* zcs;
    2787             : 
    2788           0 :     if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
    2789           0 :     if (!customMem.customAlloc || !customMem.customFree) return NULL;
    2790             : 
    2791           0 :     zcs = (ZSTD_CStream*)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
    2792           0 :     if (zcs==NULL) return NULL;
    2793           0 :     memset(zcs, 0, sizeof(ZSTD_CStream));
    2794           0 :     memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
    2795           0 :     zcs->cctx = ZSTD_createCCtx_advanced(customMem);
    2796           0 :     if (zcs->cctx == NULL) { ZSTD_freeCStream(zcs); return NULL; }
    2797           0 :     return zcs;
    2798             : }
    2799             : 
    2800           0 : size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
    2801             : {
    2802           0 :     if (zcs==NULL) return 0;   /* support free on NULL */
    2803           0 :     {   ZSTD_customMem const cMem = zcs->customMem;
    2804           0 :         ZSTD_freeCCtx(zcs->cctx);
    2805           0 :         ZSTD_freeCDict(zcs->cdict);
    2806           0 :         ZSTD_free(zcs->inBuff, cMem);
    2807           0 :         ZSTD_free(zcs->outBuff, cMem);
    2808           0 :         ZSTD_free(zcs, cMem);
    2809           0 :         return 0;
    2810             :     }
    2811             : }
    2812             : 
    2813             : 
    2814             : /*======   Initialization   ======*/
    2815             : 
    2816           0 : size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
    2817           0 : size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; }
    2818             : 
    2819           0 : size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
    2820             : {
    2821           0 :     CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize));
    2822             : 
    2823           0 :     zcs->inToCompress = 0;
    2824           0 :     zcs->inBuffPos = 0;
    2825           0 :     zcs->inBuffTarget = zcs->blockSize;
    2826           0 :     zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
    2827           0 :     zcs->stage = zcss_load;
    2828           0 :     zcs->frameEnded = 0;
    2829           0 :     return 0;   /* ready to go */
    2830             : }
    2831             : 
    2832           0 : size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
    2833             :                                  const void* dict, size_t dictSize,
    2834             :                                  ZSTD_parameters params, unsigned long long pledgedSrcSize)
    2835             : {
    2836             :     /* allocate buffers */
    2837           0 :     {   size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
    2838           0 :         if (zcs->inBuffSize < neededInBuffSize) {
    2839           0 :             zcs->inBuffSize = neededInBuffSize;
    2840           0 :             ZSTD_free(zcs->inBuff, zcs->customMem);
    2841           0 :             zcs->inBuff = (char*) ZSTD_malloc(neededInBuffSize, zcs->customMem);
    2842           0 :             if (zcs->inBuff == NULL) return ERROR(memory_allocation);
    2843             :         }
    2844           0 :         zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
    2845             :     }
    2846           0 :     if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) {
    2847           0 :         zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize)+1;
    2848           0 :         ZSTD_free(zcs->outBuff, zcs->customMem);
    2849           0 :         zcs->outBuff = (char*) ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
    2850           0 :         if (zcs->outBuff == NULL) return ERROR(memory_allocation);
    2851             :     }
    2852             : 
    2853           0 :     ZSTD_freeCDict(zcs->cdict);
    2854           0 :     zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, params, zcs->customMem);
    2855           0 :     if (zcs->cdict == NULL) return ERROR(memory_allocation);
    2856             : 
    2857           0 :     zcs->checksum = params.fParams.checksumFlag > 0;
    2858             : 
    2859           0 :     return ZSTD_resetCStream(zcs, pledgedSrcSize);
    2860             : }
    2861             : 
    2862           0 : size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
    2863             : {
    2864           0 :     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
    2865           0 :     return ZSTD_initCStream_advanced(zcs, dict, dictSize, params, 0);
    2866             : }
    2867             : 
    2868           0 : size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
    2869             : {
    2870           0 :     return ZSTD_initCStream_usingDict(zcs, NULL, 0, compressionLevel);
    2871             : }
    2872             : 
    2873           0 : size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
    2874             : {
    2875           0 :     if (zcs==NULL) return 0;   /* support sizeof on NULL */
    2876           0 :     return sizeof(zcs) + ZSTD_sizeof_CCtx(zcs->cctx) + ZSTD_sizeof_CDict(zcs->cdict) + zcs->outBuffSize + zcs->inBuffSize;
    2877             : }
    2878             : 
    2879             : /*======   Compression   ======*/
    2880             : 
    2881             : typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
    2882             : 
    2883           0 : MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
    2884             : {
    2885           0 :     size_t const length = MIN(dstCapacity, srcSize);
    2886           0 :     memcpy(dst, src, length);
    2887           0 :     return length;
    2888             : }
    2889             : 
    2890           0 : static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
    2891             :                               void* dst, size_t* dstCapacityPtr,
    2892             :                         const void* src, size_t* srcSizePtr,
    2893             :                               ZSTD_flush_e const flush)
    2894             : {
    2895           0 :     U32 someMoreWork = 1;
    2896           0 :     const char* const istart = (const char*)src;
    2897           0 :     const char* const iend = istart + *srcSizePtr;
    2898           0 :     const char* ip = istart;
    2899           0 :     char* const ostart = (char*)dst;
    2900           0 :     char* const oend = ostart + *dstCapacityPtr;
    2901           0 :     char* op = ostart;
    2902             : 
    2903           0 :     while (someMoreWork) {
    2904           0 :         switch(zcs->stage)
    2905             :         {
    2906           0 :         case zcss_init: return ERROR(init_missing);   /* call ZBUFF_compressInit() first ! */
    2907             : 
    2908             :         case zcss_load:
    2909             :             /* complete inBuffer */
    2910           0 :             {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
    2911           0 :                 size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip);
    2912           0 :                 zcs->inBuffPos += loaded;
    2913           0 :                 ip += loaded;
    2914           0 :                 if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) {
    2915           0 :                     someMoreWork = 0; break;  /* not enough input to get a full block : stop there, wait for more */
    2916             :             }   }
    2917             :             /* compress current block (note : this stage cannot be stopped in the middle) */
    2918             :             {   void* cDst;
    2919             :                 size_t cSize;
    2920           0 :                 size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
    2921           0 :                 size_t oSize = oend-op;
    2922           0 :                 if (oSize >= ZSTD_compressBound(iSize))
    2923           0 :                     cDst = op;   /* compress directly into output buffer (avoid flush stage) */
    2924             :                 else
    2925           0 :                     cDst = zcs->outBuff, oSize = zcs->outBuffSize;
    2926           0 :                 cSize = (flush == zsf_end) ?
    2927           0 :                         ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) :
    2928           0 :                         ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
    2929           0 :                 if (ZSTD_isError(cSize)) return cSize;
    2930           0 :                 if (flush == zsf_end) zcs->frameEnded = 1;
    2931             :                 /* prepare next block */
    2932           0 :                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
    2933           0 :                 if (zcs->inBuffTarget > zcs->inBuffSize)
    2934           0 :                     zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;   /* note : inBuffSize >= blockSize */
    2935           0 :                 zcs->inToCompress = zcs->inBuffPos;
    2936           0 :                 if (cDst == op) { op += cSize; break; }   /* no need to flush */
    2937           0 :                 zcs->outBuffContentSize = cSize;
    2938           0 :                 zcs->outBuffFlushedSize = 0;
    2939           0 :                 zcs->stage = zcss_flush;   /* pass-through to flush stage */
    2940             :             }
    2941             : 
    2942             :         case zcss_flush:
    2943           0 :             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
    2944           0 :                 size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
    2945           0 :                 op += flushed;
    2946           0 :                 zcs->outBuffFlushedSize += flushed;
    2947           0 :                 if (toFlush!=flushed) { someMoreWork = 0; break; }  /* dst too small to store flushed data : stop there */
    2948           0 :                 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
    2949           0 :                 zcs->stage = zcss_load;
    2950           0 :                 break;
    2951             :             }
    2952             : 
    2953             :         case zcss_final:
    2954           0 :             someMoreWork = 0;   /* do nothing */
    2955           0 :             break;
    2956             : 
    2957             :         default:
    2958           0 :             return ERROR(GENERIC);   /* impossible */
    2959             :         }
    2960             :     }
    2961             : 
    2962           0 :     *srcSizePtr = ip - istart;
    2963           0 :     *dstCapacityPtr = op - ostart;
    2964           0 :     if (zcs->frameEnded) return 0;
    2965           0 :     {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
    2966           0 :         if (hintInSize==0) hintInSize = zcs->blockSize;
    2967           0 :         return hintInSize;
    2968             :     }
    2969             : }
    2970             : 
    2971           0 : size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
    2972             : {
    2973           0 :     size_t sizeRead = input->size - input->pos;
    2974           0 :     size_t sizeWritten = output->size - output->pos;
    2975           0 :     size_t const result = ZSTD_compressStream_generic(zcs,
    2976           0 :                                                       (char*)(output->dst) + output->pos, &sizeWritten,
    2977           0 :                                                       (const char*)(input->src) + input->pos, &sizeRead, zsf_gather);
    2978           0 :     input->pos += sizeRead;
    2979           0 :     output->pos += sizeWritten;
    2980           0 :     return result;
    2981             : }
    2982             : 
    2983             : 
    2984             : /*======   Finalize   ======*/
    2985             : 
    2986             : /*! ZSTD_flushStream() :
    2987             : *   @return : amount of data remaining to flush */
    2988           0 : size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
    2989             : {
    2990           0 :     size_t srcSize = 0;
    2991           0 :     size_t sizeWritten = output->size - output->pos;
    2992           0 :     size_t const result = ZSTD_compressStream_generic(zcs,
    2993           0 :                                                      (char*)(output->dst) + output->pos, &sizeWritten,
    2994             :                                                      &srcSize, &srcSize, /* use a valid src address instead of NULL */
    2995             :                                                       zsf_flush);
    2996           0 :     output->pos += sizeWritten;
    2997           0 :     if (ZSTD_isError(result)) return result;
    2998           0 :     return zcs->outBuffContentSize - zcs->outBuffFlushedSize;   /* remaining to flush */
    2999             : }
    3000             : 
    3001             : 
    3002           0 : size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
    3003             : {
    3004           0 :     BYTE* const ostart = (BYTE*)(output->dst) + output->pos;
    3005           0 :     BYTE* const oend = (BYTE*)(output->dst) + output->size;
    3006           0 :     BYTE* op = ostart;
    3007             : 
    3008           0 :     if (zcs->stage != zcss_final) {
    3009             :         /* flush whatever remains */
    3010           0 :         size_t srcSize = 0;
    3011           0 :         size_t sizeWritten = output->size - output->pos;
    3012           0 :         size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end);  /* use a valid src address instead of NULL */
    3013           0 :         size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
    3014           0 :         op += sizeWritten;
    3015           0 :         if (remainingToFlush) {
    3016           0 :             output->pos += sizeWritten;
    3017           0 :             return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
    3018             :         }
    3019             :         /* create epilogue */
    3020           0 :         zcs->stage = zcss_final;
    3021           0 :         zcs->outBuffContentSize = !notEnded ? 0 :
    3022           0 :             ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, 0);  /* write epilogue, including final empty block, into outBuff */
    3023             :     }
    3024             : 
    3025             :     /* flush epilogue */
    3026           0 :     {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
    3027           0 :         size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
    3028           0 :         op += flushed;
    3029           0 :         zcs->outBuffFlushedSize += flushed;
    3030           0 :         output->pos += op-ostart;
    3031           0 :         if (toFlush==flushed) zcs->stage = zcss_init;  /* end reached */
    3032           0 :         return toFlush - flushed;
    3033             :     }
    3034             : }
    3035             : 
    3036             : 
    3037             : 
    3038             : /*-=====  Pre-defined compression levels  =====-*/
    3039             : 
    3040             : #define ZSTD_DEFAULT_CLEVEL 1
    3041             : #define ZSTD_MAX_CLEVEL     22
    3042           0 : int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
    3043             : 
    3044             : static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
    3045             : {   /* "default" */
    3046             :     /* W,  C,  H,  S,  L, TL, strat */
    3047             :     { 18, 12, 12,  1,  7, 16, ZSTD_fast    },  /* level  0 - never used */
    3048             :     { 19, 13, 14,  1,  7, 16, ZSTD_fast    },  /* level  1 */
    3049             :     { 19, 15, 16,  1,  6, 16, ZSTD_fast    },  /* level  2 */
    3050             :     { 20, 16, 17,  1,  5, 16, ZSTD_dfast   },  /* level  3.*/
    3051             :     { 20, 18, 18,  1,  5, 16, ZSTD_dfast   },  /* level  4.*/
    3052             :     { 20, 15, 18,  3,  5, 16, ZSTD_greedy  },  /* level  5 */
    3053             :     { 21, 16, 19,  2,  5, 16, ZSTD_lazy    },  /* level  6 */
    3054             :     { 21, 17, 20,  3,  5, 16, ZSTD_lazy    },  /* level  7 */
    3055             :     { 21, 18, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
    3056             :     { 21, 20, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  9 */
    3057             :     { 21, 19, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
    3058             :     { 22, 20, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
    3059             :     { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
    3060             :     { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 13 */
    3061             :     { 22, 21, 22,  6,  5, 16, ZSTD_lazy2   },  /* level 14 */
    3062             :     { 22, 21, 21,  5,  5, 16, ZSTD_btlazy2 },  /* level 15 */
    3063             :     { 23, 22, 22,  5,  5, 16, ZSTD_btlazy2 },  /* level 16 */
    3064             :     { 23, 21, 22,  4,  5, 24, ZSTD_btopt   },  /* level 17 */
    3065             :     { 23, 23, 22,  6,  5, 32, ZSTD_btopt   },  /* level 18 */
    3066             :     { 23, 23, 22,  6,  3, 48, ZSTD_btopt   },  /* level 19 */
    3067             :     { 25, 25, 23,  7,  3, 64, ZSTD_btopt   },  /* level 20 */
    3068             :     { 26, 26, 23,  7,  3,256, ZSTD_btopt   },  /* level 21 */
    3069             :     { 27, 27, 25,  9,  3,512, ZSTD_btopt   },  /* level 22 */
    3070             : },
    3071             : {   /* for srcSize <= 256 KB */
    3072             :     /* W,  C,  H,  S,  L,  T, strat */
    3073             :     {  0,  0,  0,  0,  0,  0, ZSTD_fast    },  /* level  0 - not used */
    3074             :     { 18, 13, 14,  1,  6,  8, ZSTD_fast    },  /* level  1 */
    3075             :     { 18, 14, 13,  1,  5,  8, ZSTD_dfast   },  /* level  2 */
    3076             :     { 18, 16, 15,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
    3077             :     { 18, 15, 17,  1,  5,  8, ZSTD_greedy  },  /* level  4.*/
    3078             :     { 18, 16, 17,  4,  5,  8, ZSTD_greedy  },  /* level  5.*/
    3079             :     { 18, 16, 17,  3,  5,  8, ZSTD_lazy    },  /* level  6.*/
    3080             :     { 18, 17, 17,  4,  4,  8, ZSTD_lazy    },  /* level  7 */
    3081             :     { 18, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
    3082             :     { 18, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
    3083             :     { 18, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
    3084             :     { 18, 18, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 11.*/
    3085             :     { 18, 18, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 12.*/
    3086             :     { 18, 19, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13 */
    3087             :     { 18, 18, 18,  4,  4, 16, ZSTD_btopt   },  /* level 14.*/
    3088             :     { 18, 18, 18,  4,  3, 16, ZSTD_btopt   },  /* level 15.*/
    3089             :     { 18, 19, 18,  6,  3, 32, ZSTD_btopt   },  /* level 16.*/
    3090             :     { 18, 19, 18,  8,  3, 64, ZSTD_btopt   },  /* level 17.*/
    3091             :     { 18, 19, 18,  9,  3,128, ZSTD_btopt   },  /* level 18.*/
    3092             :     { 18, 19, 18, 10,  3,256, ZSTD_btopt   },  /* level 19.*/
    3093             :     { 18, 19, 18, 11,  3,512, ZSTD_btopt   },  /* level 20.*/
    3094             :     { 18, 19, 18, 12,  3,512, ZSTD_btopt   },  /* level 21.*/
    3095             :     { 18, 19, 18, 13,  3,512, ZSTD_btopt   },  /* level 22.*/
    3096             : },
    3097             : {   /* for srcSize <= 128 KB */
    3098             :     /* W,  C,  H,  S,  L,  T, strat */
    3099             :     { 17, 12, 12,  1,  7,  8, ZSTD_fast    },  /* level  0 - not used */
    3100             :     { 17, 12, 13,  1,  6,  8, ZSTD_fast    },  /* level  1 */
    3101             :     { 17, 13, 16,  1,  5,  8, ZSTD_fast    },  /* level  2 */
    3102             :     { 17, 16, 16,  2,  5,  8, ZSTD_dfast   },  /* level  3 */
    3103             :     { 17, 13, 15,  3,  4,  8, ZSTD_greedy  },  /* level  4 */
    3104             :     { 17, 15, 17,  4,  4,  8, ZSTD_greedy  },  /* level  5 */
    3105             :     { 17, 16, 17,  3,  4,  8, ZSTD_lazy    },  /* level  6 */
    3106             :     { 17, 15, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  7 */
    3107             :     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
    3108             :     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
    3109             :     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
    3110             :     { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
    3111             :     { 17, 17, 17,  8,  4,  8, ZSTD_lazy2   },  /* level 12 */
    3112             :     { 17, 18, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13.*/
    3113             :     { 17, 17, 17,  7,  3,  8, ZSTD_btopt   },  /* level 14.*/
    3114             :     { 17, 17, 17,  7,  3, 16, ZSTD_btopt   },  /* level 15.*/
    3115             :     { 17, 18, 17,  7,  3, 32, ZSTD_btopt   },  /* level 16.*/
    3116             :     { 17, 18, 17,  7,  3, 64, ZSTD_btopt   },  /* level 17.*/
    3117             :     { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 18.*/
    3118             :     { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 19.*/
    3119             :     { 17, 18, 17,  9,  3,256, ZSTD_btopt   },  /* level 20.*/
    3120             :     { 17, 18, 17, 10,  3,256, ZSTD_btopt   },  /* level 21.*/
    3121             :     { 17, 18, 17, 11,  3,512, ZSTD_btopt   },  /* level 22.*/
    3122             : },
    3123             : {   /* for srcSize <= 16 KB */
    3124             :     /* W,  C,  H,  S,  L,  T, strat */
    3125             :     { 14, 12, 12,  1,  7,  6, ZSTD_fast    },  /* level  0 - not used */
    3126             :     { 14, 14, 14,  1,  6,  6, ZSTD_fast    },  /* level  1 */
    3127             :     { 14, 14, 14,  1,  4,  6, ZSTD_fast    },  /* level  2 */
    3128             :     { 14, 14, 14,  1,  4,  6, ZSTD_dfast   },  /* level  3.*/
    3129             :     { 14, 14, 14,  4,  4,  6, ZSTD_greedy  },  /* level  4.*/
    3130             :     { 14, 14, 14,  3,  4,  6, ZSTD_lazy    },  /* level  5.*/
    3131             :     { 14, 14, 14,  4,  4,  6, ZSTD_lazy2   },  /* level  6 */
    3132             :     { 14, 14, 14,  5,  4,  6, ZSTD_lazy2   },  /* level  7 */
    3133             :     { 14, 14, 14,  6,  4,  6, ZSTD_lazy2   },  /* level  8.*/
    3134             :     { 14, 15, 14,  6,  4,  6, ZSTD_btlazy2 },  /* level  9.*/
    3135             :     { 14, 15, 14,  3,  3,  6, ZSTD_btopt   },  /* level 10.*/
    3136             :     { 14, 15, 14,  6,  3,  8, ZSTD_btopt   },  /* level 11.*/
    3137             :     { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
    3138             :     { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
    3139             :     { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
    3140             :     { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
    3141             :     { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
    3142             :     { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
    3143             :     { 14, 15, 15,  6,  3,256, ZSTD_btopt   },  /* level 18.*/
    3144             :     { 14, 15, 15,  7,  3,256, ZSTD_btopt   },  /* level 19.*/
    3145             :     { 14, 15, 15,  8,  3,256, ZSTD_btopt   },  /* level 20.*/
    3146             :     { 14, 15, 15,  9,  3,256, ZSTD_btopt   },  /* level 21.*/
    3147             :     { 14, 15, 15, 10,  3,256, ZSTD_btopt   },  /* level 22.*/
    3148             : },
    3149             : };
    3150             : 
    3151             : /*! ZSTD_getCParams() :
    3152             : *   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
    3153             : *   Size values are optional, provide 0 if not known or unused */
    3154          74 : ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
    3155             : {
    3156             :     ZSTD_compressionParameters cp;
    3157          74 :     size_t const addedSize = srcSize ? 0 : 500;
    3158          74 :     U64 const rSize = srcSize+dictSize ? srcSize+dictSize+addedSize : (U64)-1;
    3159          74 :     U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
    3160          74 :     if (compressionLevel <= 0) compressionLevel = ZSTD_DEFAULT_CLEVEL;   /* 0 == default; no negative compressionLevel yet */
    3161          74 :     if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
    3162          74 :     cp = ZSTD_defaultCParameters[tableID][compressionLevel];
    3163          74 :     if (MEM_32bits()) {   /* auto-correction, for 32-bits mode */
    3164           0 :         if (cp.windowLog > ZSTD_WINDOWLOG_MAX) cp.windowLog = ZSTD_WINDOWLOG_MAX;
    3165           0 :         if (cp.chainLog > ZSTD_CHAINLOG_MAX) cp.chainLog = ZSTD_CHAINLOG_MAX;
    3166           0 :         if (cp.hashLog > ZSTD_HASHLOG_MAX) cp.hashLog = ZSTD_HASHLOG_MAX;
    3167             :     }
    3168          74 :     cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
    3169          74 :     return cp;
    3170             : }
    3171             : 
    3172             : /*! ZSTD_getParams() :
    3173             : *   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
    3174             : *   All fields of `ZSTD_frameParameters` are set to default (0) */
    3175          74 : ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) {
    3176             :     ZSTD_parameters params;
    3177          74 :     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
    3178          74 :     memset(&params, 0, sizeof(params));
    3179          74 :     params.cParams = cParams;
    3180          74 :     return params;
    3181             : }

Generated by: LCOV version 1.11