summaryrefslogtreecommitdiff
path: root/thirdparty/zstd/compress
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/zstd/compress')
-rw-r--r--thirdparty/zstd/compress/fse_compress.c296
-rw-r--r--thirdparty/zstd/compress/hist.c203
-rw-r--r--thirdparty/zstd/compress/hist.h95
-rw-r--r--thirdparty/zstd/compress/huf_compress.c84
-rw-r--r--thirdparty/zstd/compress/zstd_compress.c2141
-rw-r--r--thirdparty/zstd/compress/zstd_compress_internal.h243
-rw-r--r--thirdparty/zstd/compress/zstd_double_fast.c362
-rw-r--r--thirdparty/zstd/compress/zstd_double_fast.h10
-rw-r--r--thirdparty/zstd/compress/zstd_fast.c292
-rw-r--r--thirdparty/zstd/compress/zstd_fast.h10
-rw-r--r--thirdparty/zstd/compress/zstd_lazy.c554
-rw-r--r--thirdparty/zstd/compress/zstd_lazy.h33
-rw-r--r--thirdparty/zstd/compress/zstd_ldm.c128
-rw-r--r--thirdparty/zstd/compress/zstd_ldm.h12
-rw-r--r--thirdparty/zstd/compress/zstd_opt.c910
-rw-r--r--thirdparty/zstd/compress/zstd_opt.h28
-rw-r--r--thirdparty/zstd/compress/zstdmt_compress.c584
-rw-r--r--thirdparty/zstd/compress/zstdmt_compress.h60
18 files changed, 4179 insertions, 1866 deletions
diff --git a/thirdparty/zstd/compress/fse_compress.c b/thirdparty/zstd/compress/fse_compress.c
index cb8f1fa323..60f357bbd2 100644
--- a/thirdparty/zstd/compress/fse_compress.c
+++ b/thirdparty/zstd/compress/fse_compress.c
@@ -1,6 +1,6 @@
/* ******************************************************************
FSE : Finite State Entropy encoder
- Copyright (C) 2013-2015, Yann Collet.
+ Copyright (C) 2013-present, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
@@ -37,9 +37,11 @@
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
-#include <stdio.h> /* printf (debug) */
-#include "bitstream.h"
#include "compiler.h"
+#include "mem.h" /* U32, U16, etc. */
+#include "debug.h" /* assert, DEBUGLOG */
+#include "hist.h" /* HIST_count_wksp */
+#include "bitstream.h"
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "error_private.h"
@@ -49,7 +51,6 @@
* Error Management
****************************************************************/
#define FSE_isError ERR_isError
-#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* **************************************************************
@@ -82,7 +83,9 @@
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
*/
-size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+size_t FSE_buildCTable_wksp(FSE_CTable* ct,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize)
{
U32 const tableSize = 1 << tableLog;
U32 const tableMask = tableSize - 1;
@@ -100,14 +103,19 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi
if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
tableU16[-2] = (U16) tableLog;
tableU16[-1] = (U16) maxSymbolValue;
+ assert(tableLog < 16); /* required for threshold strategy to work */
/* For explanations on how to distribute symbol values over the table :
- * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+ #ifdef __clang_analyzer__
+ memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
+ #endif
/* symbol start positions */
{ U32 u;
cumul[0] = 0;
- for (u=1; u<=maxSymbolValue+1; u++) {
+ for (u=1; u <= maxSymbolValue+1; u++) {
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
cumul[u] = cumul[u-1] + 1;
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
@@ -122,13 +130,15 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi
U32 symbol;
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
int nbOccurences;
- for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
+ int const freq = normalizedCounter[symbol];
+ for (nbOccurences=0; nbOccurences<freq; nbOccurences++) {
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
position = (position + step) & tableMask;
- while (position > highThreshold) position = (position + step) & tableMask; /* Low proba area */
+ while (position > highThreshold)
+ position = (position + step) & tableMask; /* Low proba area */
} }
- if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */
+ assert(position==0); /* Must have initialized all positions */
}
/* Build table */
@@ -143,7 +153,10 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi
for (s=0; s<=maxSymbolValue; s++) {
switch (normalizedCounter[s])
{
- case 0: break;
+ case 0:
+ /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
+ symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
+ break;
case -1:
case 1:
@@ -160,6 +173,18 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi
total += normalizedCounter[s];
} } } }
+#if 0 /* debug : symbol costs */
+ DEBUGLOG(5, "\n --- table statistics : ");
+ { U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
+ symbol, normalizedCounter[symbol],
+ FSE_getMaxNbBits(symbolTT, symbol),
+ (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
+ }
+ }
+#endif
+
return 0;
}
@@ -174,8 +199,9 @@ size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned
#ifndef FSE_COMMONDEFS_ONLY
+
/*-**************************************************************
-* FSE NCount encoding-decoding
+* FSE NCount encoding
****************************************************************/
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
{
@@ -183,9 +209,10 @@ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
}
-static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
- const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
- unsigned writeIsSafe)
+static size_t
+FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ unsigned writeIsSafe)
{
BYTE* const ostart = (BYTE*) header;
BYTE* out = ostart;
@@ -194,13 +221,12 @@ static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
const int tableSize = 1 << tableLog;
int remaining;
int threshold;
- U32 bitStream;
- int bitCount;
- unsigned charnum = 0;
- int previous0 = 0;
+ U32 bitStream = 0;
+ int bitCount = 0;
+ unsigned symbol = 0;
+ unsigned const alphabetSize = maxSymbolValue + 1;
+ int previousIs0 = 0;
- bitStream = 0;
- bitCount = 0;
/* Table Size */
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
bitCount += 4;
@@ -210,48 +236,53 @@ static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
threshold = tableSize;
nbBits = tableLog+1;
- while (remaining>1) { /* stops at 1 */
- if (previous0) {
- unsigned start = charnum;
- while (!normalizedCounter[charnum]) charnum++;
- while (charnum >= start+24) {
+ while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
+ if (previousIs0) {
+ unsigned start = symbol;
+ while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
+ if (symbol == alphabetSize) break; /* incorrect distribution */
+ while (symbol >= start+24) {
start+=24;
bitStream += 0xFFFFU << bitCount;
- if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ if ((!writeIsSafe) && (out > oend-2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE) bitStream;
out[1] = (BYTE)(bitStream>>8);
out+=2;
bitStream>>=16;
}
- while (charnum >= start+3) {
+ while (symbol >= start+3) {
start+=3;
bitStream += 3 << bitCount;
bitCount += 2;
}
- bitStream += (charnum-start) << bitCount;
+ bitStream += (symbol-start) << bitCount;
bitCount += 2;
if (bitCount>16) {
- if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
bitStream >>= 16;
bitCount -= 16;
} }
- { int count = normalizedCounter[charnum++];
- int const max = (2*threshold-1)-remaining;
+ { int count = normalizedCounter[symbol++];
+ int const max = (2*threshold-1) - remaining;
remaining -= count < 0 ? -count : count;
count++; /* +1 for extra accuracy */
- if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+ if (count>=threshold)
+ count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
bitStream += count << bitCount;
bitCount += nbBits;
bitCount -= (count<max);
- previous0 = (count==1);
+ previousIs0 = (count==1);
if (remaining<1) return ERROR(GENERIC);
while (remaining<threshold) { nbBits--; threshold>>=1; }
}
if (bitCount>16) {
- if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
@@ -259,19 +290,23 @@ static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
bitCount -= 16;
} }
+ if (remaining != 1)
+ return ERROR(GENERIC); /* incorrect normalized distribution */
+ assert(symbol <= alphabetSize);
+
/* flush remaining bitStream */
- if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out+= (bitCount+7) /8;
- if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
-
return (out-ostart);
}
-size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
@@ -279,179 +314,13 @@ size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalized
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
- return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
-}
-
-
-
-/*-**************************************************************
-* Counting histogram
-****************************************************************/
-/*! FSE_count_simple
- This function counts byte values within `src`, and store the histogram into table `count`.
- It doesn't use any additional memory.
- But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
- For this reason, prefer using a table `count` with 256 elements.
- @return : count of most numerous element.
-*/
-size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
- const void* src, size_t srcSize)
-{
- const BYTE* ip = (const BYTE*)src;
- const BYTE* const end = ip + srcSize;
- unsigned maxSymbolValue = *maxSymbolValuePtr;
- unsigned max=0;
-
- memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
- if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
-
- while (ip<end) {
- assert(*ip <= maxSymbolValue);
- count[*ip++]++;
- }
-
- while (!count[maxSymbolValue]) maxSymbolValue--;
- *maxSymbolValuePtr = maxSymbolValue;
-
- { U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
-
- return (size_t)max;
-}
-
-
-/* FSE_count_parallel_wksp() :
- * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
- * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`.
- * @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
-static size_t FSE_count_parallel_wksp(
- unsigned* count, unsigned* maxSymbolValuePtr,
- const void* source, size_t sourceSize,
- unsigned checkMax, unsigned* const workSpace)
-{
- const BYTE* ip = (const BYTE*)source;
- const BYTE* const iend = ip+sourceSize;
- unsigned maxSymbolValue = *maxSymbolValuePtr;
- unsigned max=0;
- U32* const Counting1 = workSpace;
- U32* const Counting2 = Counting1 + 256;
- U32* const Counting3 = Counting2 + 256;
- U32* const Counting4 = Counting3 + 256;
-
- memset(workSpace, 0, 4*256*sizeof(unsigned));
-
- /* safety checks */
- if (!sourceSize) {
- memset(count, 0, maxSymbolValue + 1);
- *maxSymbolValuePtr = 0;
- return 0;
- }
- if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
-
- /* by stripes of 16 bytes */
- { U32 cached = MEM_read32(ip); ip += 4;
- while (ip < iend-15) {
- U32 c = cached; cached = MEM_read32(ip); ip += 4;
- Counting1[(BYTE) c ]++;
- Counting2[(BYTE)(c>>8) ]++;
- Counting3[(BYTE)(c>>16)]++;
- Counting4[ c>>24 ]++;
- c = cached; cached = MEM_read32(ip); ip += 4;
- Counting1[(BYTE) c ]++;
- Counting2[(BYTE)(c>>8) ]++;
- Counting3[(BYTE)(c>>16)]++;
- Counting4[ c>>24 ]++;
- c = cached; cached = MEM_read32(ip); ip += 4;
- Counting1[(BYTE) c ]++;
- Counting2[(BYTE)(c>>8) ]++;
- Counting3[(BYTE)(c>>16)]++;
- Counting4[ c>>24 ]++;
- c = cached; cached = MEM_read32(ip); ip += 4;
- Counting1[(BYTE) c ]++;
- Counting2[(BYTE)(c>>8) ]++;
- Counting3[(BYTE)(c>>16)]++;
- Counting4[ c>>24 ]++;
- }
- ip-=4;
- }
-
- /* finish last symbols */
- while (ip<iend) Counting1[*ip++]++;
-
- if (checkMax) { /* verify stats will fit into destination table */
- U32 s; for (s=255; s>maxSymbolValue; s--) {
- Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
- if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
- } }
-
- { U32 s;
- if (maxSymbolValue > 255) maxSymbolValue = 255;
- for (s=0; s<=maxSymbolValue; s++) {
- count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
- if (count[s] > max) max = count[s];
- } }
-
- while (!count[maxSymbolValue]) maxSymbolValue--;
- *maxSymbolValuePtr = maxSymbolValue;
- return (size_t)max;
-}
-
-/* FSE_countFast_wksp() :
- * Same as FSE_countFast(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
- const void* source, size_t sourceSize,
- unsigned* workSpace)
-{
- if (sourceSize < 1500) /* heuristic threshold */
- return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
- return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
-}
-
-/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
-size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
- const void* source, size_t sourceSize)
-{
- unsigned tmpCounters[1024];
- return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
-}
-
-/* FSE_count_wksp() :
- * Same as FSE_count(), but using an externally provided scratch buffer.
- * `workSpace` size must be table of >= `1024` unsigned */
-size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
- const void* source, size_t sourceSize, unsigned* workSpace)
-{
- if (*maxSymbolValuePtr < 255)
- return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
- *maxSymbolValuePtr = 255;
- return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
-}
-
-size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
- const void* src, size_t srcSize)
-{
- unsigned tmpCounters[1024];
- return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
}
-
/*-**************************************************************
* FSE Compression Code
****************************************************************/
-/*! FSE_sizeof_CTable() :
- FSE_CTable is a variable size structure which contains :
- `U16 tableLog;`
- `U16 maxSymbolValue;`
- `U16 nextStateNumber[1 << tableLog];` // This size is variable
- `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable
-Allocation is manual (C standard does not support variable-size structures).
-*/
-size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
-{
- if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
- return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
-}
FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
{
@@ -466,7 +335,7 @@ void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
/* provides the minimum logSize to safely represent a distribution */
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
{
- U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
+ U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
assert(srcSize > 1); /* Not supported, RLE should be used instead */
@@ -529,6 +398,9 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
}
ToDistribute = (1 << tableLog) - distributed;
+ if (ToDistribute == 0)
+ return 0;
+
if ((total / ToDistribute) > lowOne) {
/* risk of rounding to zero */
lowOne = (U32)((total * 3) / (ToDistribute * 2));
@@ -629,11 +501,11 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
U32 s;
U32 nTotal = 0;
for (s=0; s<=maxSymbolValue; s++)
- printf("%3i: %4i \n", s, normalizedCounter[s]);
+ RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
for (s=0; s<=maxSymbolValue; s++)
nTotal += abs(normalizedCounter[s]);
if (nTotal != (1U<<tableLog))
- printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+ RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
getchar();
}
#endif
@@ -786,7 +658,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
- U32 count[FSE_MAX_SYMBOL_VALUE+1];
+ unsigned count[FSE_MAX_SYMBOL_VALUE+1];
S16 norm[FSE_MAX_SYMBOL_VALUE+1];
FSE_CTable* CTable = (FSE_CTable*)workSpace;
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
@@ -800,7 +672,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
/* Scan input and build symbol stats */
- { CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
+ { CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );
if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
@@ -835,7 +707,7 @@ typedef struct {
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
{
fseWkspMax_t scratchBuffer;
- FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
+ DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
}
diff --git a/thirdparty/zstd/compress/hist.c b/thirdparty/zstd/compress/hist.c
new file mode 100644
index 0000000000..45b7babc1e
--- /dev/null
+++ b/thirdparty/zstd/compress/hist.c
@@ -0,0 +1,203 @@
+/* ******************************************************************
+ hist : Histogram functions
+ part of Finite State Entropy project
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "mem.h" /* U32, BYTE, etc. */
+#include "debug.h" /* assert, DEBUGLOG */
+#include "error_private.h" /* ERROR */
+#include "hist.h"
+
+
+/* --- Error management --- */
+unsigned HIST_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+ * Histogram functions
+ ****************************************************************/
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+ unsigned maxSymbolValue = *maxSymbolValuePtr;
+ unsigned largestCount=0;
+
+ memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+ if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+ while (ip<end) {
+ assert(*ip <= maxSymbolValue);
+ count[*ip++]++;
+ }
+
+ while (!count[maxSymbolValue]) maxSymbolValue--;
+ *maxSymbolValuePtr = maxSymbolValue;
+
+ { U32 s;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > largestCount) largestCount = count[s];
+ }
+
+ return largestCount;
+}
+
+typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
+
+/* HIST_count_parallel_wksp() :
+ * store histogram into 4 intermediate tables, recombined at the end.
+ * this design makes better use of OoO cpus,
+ * and is noticeably faster when some values are heavily repeated.
+ * But it needs some additional workspace for intermediate tables.
+ * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
+ * @return : largest histogram frequency,
+ * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
+static size_t HIST_count_parallel_wksp(
+ unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ HIST_checkInput_e check,
+ U32* const workSpace)
+{
+ const BYTE* ip = (const BYTE*)source;
+ const BYTE* const iend = ip+sourceSize;
+ unsigned maxSymbolValue = *maxSymbolValuePtr;
+ unsigned max=0;
+ U32* const Counting1 = workSpace;
+ U32* const Counting2 = Counting1 + 256;
+ U32* const Counting3 = Counting2 + 256;
+ U32* const Counting4 = Counting3 + 256;
+
+ memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+ /* safety checks */
+ if (!sourceSize) {
+ memset(count, 0, maxSymbolValue + 1);
+ *maxSymbolValuePtr = 0;
+ return 0;
+ }
+ if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
+
+ /* by stripes of 16 bytes */
+ { U32 cached = MEM_read32(ip); ip += 4;
+ while (ip < iend-15) {
+ U32 c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ }
+ ip-=4;
+ }
+
+ /* finish last symbols */
+ while (ip<iend) Counting1[*ip++]++;
+
+ if (check) { /* verify stats will fit into destination table */
+ U32 s; for (s=255; s>maxSymbolValue; s--) {
+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+ if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
+ } }
+
+ { U32 s;
+ if (maxSymbolValue > 255) maxSymbolValue = 255;
+ for (s=0; s<=maxSymbolValue; s++) {
+ count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+ if (count[s] > max) max = count[s];
+ } }
+
+ while (!count[maxSymbolValue]) maxSymbolValue--;
+ *maxSymbolValuePtr = maxSymbolValue;
+ return (size_t)max;
+}
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if (sourceSize < 1500) /* heuristic threshold */
+ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
+}
+
+/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize)
+{
+ unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
+}
+
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ if (*maxSymbolValuePtr < 255)
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
+ *maxSymbolValuePtr = 255;
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
+}
+
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize)
+{
+ unsigned tmpCounters[HIST_WKSP_SIZE_U32];
+ return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
+}
diff --git a/thirdparty/zstd/compress/hist.h b/thirdparty/zstd/compress/hist.h
new file mode 100644
index 0000000000..8b389358dc
--- /dev/null
+++ b/thirdparty/zstd/compress/hist.h
@@ -0,0 +1,95 @@
+/* ******************************************************************
+ hist : Histogram functions
+ part of Finite State Entropy project
+ Copyright (C) 2013-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* --- dependencies --- */
+#include <stddef.h> /* size_t */
+
+
+/* --- simple histogram functions --- */
+
+/*! HIST_count():
+ * Provides the precise count of each byte within a table 'count'.
+ * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+ * Updates *maxSymbolValuePtr with actual largest symbol value detected.
+ * @return : count of the most frequent symbol (which isn't identified).
+ * or an error code, which can be tested using HIST_isError().
+ * note : if return == srcSize, there is only one symbol.
+ */
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */
+
+
+/* --- advanced histogram functions --- */
+
+#define HIST_WKSP_SIZE_U32 1024
+#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
+/** HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * Benefit is this function will use very little stack space.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/** HIST_countFast() :
+ * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
+ */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+/** HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/*! HIST_count_simple() :
+ * Same as HIST_countFast(), this function is unsafe,
+ * and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
+ * It is also a bit slower for large inputs.
+ * However, it does not need any additional memory (not even on stack).
+ * @return : count of the most frequent symbol.
+ * Note this function doesn't produce any error (i.e. it must succeed).
+ */
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
diff --git a/thirdparty/zstd/compress/huf_compress.c b/thirdparty/zstd/compress/huf_compress.c
index 83230b415f..f074f1e0a9 100644
--- a/thirdparty/zstd/compress/huf_compress.c
+++ b/thirdparty/zstd/compress/huf_compress.c
@@ -45,8 +45,9 @@
****************************************************************/
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
-#include "bitstream.h"
#include "compiler.h"
+#include "bitstream.h"
+#include "hist.h"
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
#include "fse.h" /* header compression */
#define HUF_STATIC_LINKING_ONLY
@@ -58,7 +59,7 @@
* Error Management
****************************************************************/
#define HUF_isError ERR_isError
-#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
@@ -81,28 +82,28 @@ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
*/
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
-size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
+static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
- U32 maxSymbolValue = HUF_TABLELOG_MAX;
+ unsigned maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
- U32 count[HUF_TABLELOG_MAX+1];
+ unsigned count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1];
/* init conditions */
if (wtSize <= 1) return 0; /* Not compressible */
/* Scan input and build symbol stats */
- { CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
+ { unsigned const maxCount = HIST_count_simple(count, &maxSymbolValue, weightTable, wtSize); /* never fails */
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
- if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
}
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
@@ -133,7 +134,7 @@ struct HUF_CElt_s {
`CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
- const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
{
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
@@ -168,7 +169,7 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
}
-size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
@@ -216,6 +217,13 @@ size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src
return readSize;
}
+U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
+{
+ const HUF_CElt* table = (const HUF_CElt*)symbolTable;
+ assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+ return table[symbolValue].nbBits;
+}
+
typedef struct nodeElt_s {
U32 count;
@@ -307,7 +315,7 @@ typedef struct {
U32 current;
} rankPos;
-static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
+static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
{
rankPos rank[32];
U32 n;
@@ -339,7 +347,7 @@ static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
*/
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
-size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
{
nodeElt* const huffNode0 = (nodeElt*)workSpace;
nodeElt* const huffNode = huffNode0+1;
@@ -413,7 +421,7 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValu
* @return : maxNbBits
* Note : count is used before tree is written, so they can safely overlap
*/
-size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
+size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
{
huffNodeTable nodeTable;
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
@@ -602,13 +610,14 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si
return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
}
+typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
static size_t HUF_compressCTable_internal(
BYTE* const ostart, BYTE* op, BYTE* const oend,
const void* src, size_t srcSize,
- unsigned singleStream, const HUF_CElt* CTable, const int bmi2)
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
{
- size_t const cSize = singleStream ?
+ size_t const cSize = (nbStreams==HUF_singleStream) ?
HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
if (HUF_isError(cSize)) { return cSize; }
@@ -620,21 +629,21 @@ static size_t HUF_compressCTable_internal(
}
typedef struct {
- U32 count[HUF_SYMBOLVALUE_MAX + 1];
+ unsigned count[HUF_SYMBOLVALUE_MAX + 1];
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
huffNodeTable nodeTable;
} HUF_compress_tables_t;
/* HUF_compress_internal() :
* `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
-static size_t HUF_compress_internal (
- void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog,
- unsigned singleStream,
- void* workSpace, size_t wkspSize,
- HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
- const int bmi2)
+static size_t
+HUF_compress_internal (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ HUF_nbStreams_e nbStreams,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
+ const int bmi2)
{
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
BYTE* const ostart = (BYTE*)dst;
@@ -643,7 +652,7 @@ static size_t HUF_compress_internal (
/* checks & inits */
if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
- if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
+ if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
@@ -656,13 +665,13 @@ static size_t HUF_compress_internal (
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- singleStream, oldHufTable, bmi2);
+ nbStreams, oldHufTable, bmi2);
}
/* Scan input and build symbol stats */
- { CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
- if (largest <= (srcSize >> 7)+1) return 0; /* heuristic : probably not compressible enough */
+ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
/* Check validity of previous table */
@@ -675,14 +684,15 @@ static size_t HUF_compress_internal (
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- singleStream, oldHufTable, bmi2);
+ nbStreams, oldHufTable, bmi2);
}
/* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
- { CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count,
- maxSymbolValue, huffLog,
- table->nodeTable, sizeof(table->nodeTable)) );
+ { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
+ maxSymbolValue, huffLog,
+ table->nodeTable, sizeof(table->nodeTable));
+ CHECK_F(maxBits);
huffLog = (U32)maxBits;
/* Zero unused symbols in CTable, so we can check it for validity */
memset(table->CTable + (maxSymbolValue + 1), 0,
@@ -698,7 +708,7 @@ static size_t HUF_compress_internal (
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- singleStream, oldHufTable, bmi2);
+ nbStreams, oldHufTable, bmi2);
} }
/* Use the new huffman table */
@@ -710,7 +720,7 @@ static size_t HUF_compress_internal (
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
- singleStream, table->CTable, bmi2);
+ nbStreams, table->CTable, bmi2);
}
@@ -720,7 +730,7 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, 1 /*single stream*/,
+ maxSymbolValue, huffLog, HUF_singleStream,
workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/);
}
@@ -732,7 +742,7 @@ size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, 1 /*single stream*/,
+ maxSymbolValue, huffLog, HUF_singleStream,
workSpace, wkspSize, hufTable,
repeat, preferRepeat, bmi2);
}
@@ -754,7 +764,7 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, 0 /*4 streams*/,
+ maxSymbolValue, huffLog, HUF_fourStreams,
workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/);
}
@@ -769,7 +779,7 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
- maxSymbolValue, huffLog, 0 /* 4 streams */,
+ maxSymbolValue, huffLog, HUF_fourStreams,
workSpace, wkspSize,
hufTable, repeat, preferRepeat, bmi2);
}
diff --git a/thirdparty/zstd/compress/zstd_compress.c b/thirdparty/zstd/compress/zstd_compress.c
index 2aa26da4cd..c2c9d3bc55 100644
--- a/thirdparty/zstd/compress/zstd_compress.c
+++ b/thirdparty/zstd/compress/zstd_compress.c
@@ -8,21 +8,14 @@
* You may select, at your option, one of the above-listed licenses.
*/
-
-/*-*************************************
-* Tuning parameters
-***************************************/
-#ifndef ZSTD_CLEVEL_DEFAULT
-# define ZSTD_CLEVEL_DEFAULT 3
-#endif
-
-
/*-*************************************
* Dependencies
***************************************/
+#include <limits.h> /* INT_MAX */
#include <string.h> /* memset */
#include "cpu.h"
#include "mem.h"
+#include "hist.h" /* HIST_countFast_wksp */
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
#include "fse.h"
#define HUF_STATIC_LINKING_ONLY
@@ -54,7 +47,6 @@ struct ZSTD_CDict_s {
size_t workspaceSize;
ZSTD_matchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
- ZSTD_compressionParameters cParams;
ZSTD_customMem customMem;
U32 dictID;
}; /* typedef'd to ZSTD_CDict within "zstd.h" */
@@ -64,17 +56,26 @@ ZSTD_CCtx* ZSTD_createCCtx(void)
return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
}
+static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
+{
+ assert(cctx != NULL);
+ memset(cctx, 0, sizeof(*cctx));
+ cctx->customMem = memManager;
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
+ assert(!ZSTD_isError(err));
+ (void)err;
+ }
+}
+
ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
{
ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
- { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_calloc(sizeof(ZSTD_CCtx), customMem);
+ { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
if (!cctx) return NULL;
- cctx->customMem = customMem;
- cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
- cctx->requestedParams.fParams.contentSizeFlag = 1;
- cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ ZSTD_initCCtx(cctx, customMem);
return cctx;
}
}
@@ -102,17 +103,24 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
return cctx;
}
-size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
{
- if (cctx==NULL) return 0; /* support free on NULL */
- if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */
+ assert(cctx != NULL);
+ assert(cctx->staticSize == 0);
ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
#ifdef ZSTD_MULTITHREAD
ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
#endif
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support free on NULL */
+ if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */
+ ZSTD_freeCCtxContent(cctx);
ZSTD_free(cctx, cctx->customMem);
- return 0; /* reserved as a potential error code in the future */
+ return 0;
}
@@ -121,7 +129,7 @@ static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
#ifdef ZSTD_MULTITHREAD
return ZSTDMT_sizeof_CCtx(cctx->mtctx);
#else
- (void) cctx;
+ (void)cctx;
return 0;
#endif
}
@@ -143,21 +151,6 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
/* private API call, for dictBuilder only */
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
-ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
-{
- ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
- if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
- if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
- if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
- if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
- if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
- if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength;
- if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
- if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
- return cParams;
-}
-
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
ZSTD_compressionParameters cParams)
{
@@ -234,9 +227,160 @@ static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
return ret;
}
-#define CLAMPCHECK(val,min,max) { \
- if (((val)<(min)) | ((val)>(max))) { \
- return ERROR(parameter_outOfBound); \
+ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ bounds.lowerBound = ZSTD_minCLevel();
+ bounds.upperBound = ZSTD_maxCLevel();
+ return bounds;
+
+ case ZSTD_c_windowLog:
+ bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_hashLog:
+ bounds.lowerBound = ZSTD_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_chainLog:
+ bounds.lowerBound = ZSTD_CHAINLOG_MIN;
+ bounds.upperBound = ZSTD_CHAINLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_searchLog:
+ bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
+ bounds.upperBound = ZSTD_SEARCHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_minMatch:
+ bounds.lowerBound = ZSTD_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_targetLength:
+ bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
+ bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
+ return bounds;
+
+ case ZSTD_c_strategy:
+ bounds.lowerBound = ZSTD_STRATEGY_MIN;
+ bounds.upperBound = ZSTD_STRATEGY_MAX;
+ return bounds;
+
+ case ZSTD_c_contentSizeFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_checksumFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_dictIDFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_nbWorkers:
+ bounds.lowerBound = 0;
+#ifdef ZSTD_MULTITHREAD
+ bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
+#else
+ bounds.upperBound = 0;
+#endif
+ return bounds;
+
+ case ZSTD_c_jobSize:
+ bounds.lowerBound = 0;
+#ifdef ZSTD_MULTITHREAD
+ bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
+#else
+ bounds.upperBound = 0;
+#endif
+ return bounds;
+
+ case ZSTD_c_overlapLog:
+ bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
+ bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_enableLongDistanceMatching:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_ldmHashLog:
+ bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmMinMatch:
+ bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmBucketSizeLog:
+ bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmHashRateLog:
+ bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
+ return bounds;
+
+ /* experimental parameters */
+ case ZSTD_c_rsyncable:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_forceMaxWindow :
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_format:
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ bounds.lowerBound = ZSTD_f_zstd1;
+ bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_forceAttachDict:
+ ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
+ bounds.lowerBound = ZSTD_dictDefaultAttach;
+ bounds.upperBound = ZSTD_dictForceCopy; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ default:
+ { ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
+ return boundError;
+ }
+ }
+}
+
+/* ZSTD_cParam_withinBounds:
+ * @return 1 if value is within cParam bounds,
+ * 0 otherwise */
+static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+#define BOUNDCHECK(cParam, val) { \
+ if (!ZSTD_cParam_withinBounds(cParam,val)) { \
+ return ERROR(parameter_outOfBound); \
} }
@@ -244,38 +388,39 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
{
switch(param)
{
- case ZSTD_p_compressionLevel:
- case ZSTD_p_hashLog:
- case ZSTD_p_chainLog:
- case ZSTD_p_searchLog:
- case ZSTD_p_minMatch:
- case ZSTD_p_targetLength:
- case ZSTD_p_compressionStrategy:
- case ZSTD_p_compressLiterals:
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
return 1;
- case ZSTD_p_format:
- case ZSTD_p_windowLog:
- case ZSTD_p_contentSizeFlag:
- case ZSTD_p_checksumFlag:
- case ZSTD_p_dictIDFlag:
- case ZSTD_p_forceMaxWindow :
- case ZSTD_p_nbWorkers:
- case ZSTD_p_jobSize:
- case ZSTD_p_overlapSizeLog:
- case ZSTD_p_enableLongDistanceMatching:
- case ZSTD_p_ldmHashLog:
- case ZSTD_p_ldmMinMatch:
- case ZSTD_p_ldmBucketSizeLog:
- case ZSTD_p_ldmHashEveryLog:
+ case ZSTD_c_format:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow :
+ case ZSTD_c_nbWorkers:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_forceAttachDict:
default:
return 0;
}
}
-size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
+size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
{
- DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value);
+ DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
if (cctx->streamStage != zcss_init) {
if (ZSTD_isUpdateAuthorized(param)) {
cctx->cParamsChanged = 1;
@@ -285,49 +430,52 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
switch(param)
{
- case ZSTD_p_format :
+ case ZSTD_c_format :
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_compressionLevel:
+ case ZSTD_c_compressionLevel:
if (cctx->cdict) return ERROR(stage_wrong);
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_windowLog:
- case ZSTD_p_hashLog:
- case ZSTD_p_chainLog:
- case ZSTD_p_searchLog:
- case ZSTD_p_minMatch:
- case ZSTD_p_targetLength:
- case ZSTD_p_compressionStrategy:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
if (cctx->cdict) return ERROR(stage_wrong);
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_compressLiterals:
- case ZSTD_p_contentSizeFlag:
- case ZSTD_p_checksumFlag:
- case ZSTD_p_dictIDFlag:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_forceMaxWindow : /* Force back-references to remain < windowSize,
+ case ZSTD_c_forceMaxWindow : /* Force back-references to remain < windowSize,
* even when referencing into Dictionary content.
* default : 0 when using a CDict, 1 when using a Prefix */
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_nbWorkers:
- if ((value>0) && cctx->staticSize) {
+ case ZSTD_c_forceAttachDict:
+ return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+ case ZSTD_c_nbWorkers:
+ if ((value!=0) && cctx->staticSize) {
return ERROR(parameter_unsupported); /* MT not compatible with static alloc */
}
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_jobSize:
- case ZSTD_p_overlapSizeLog:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
- case ZSTD_p_enableLongDistanceMatching:
- case ZSTD_p_ldmHashLog:
- case ZSTD_p_ldmMinMatch:
- case ZSTD_p_ldmBucketSizeLog:
- case ZSTD_p_ldmHashEveryLog:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_ldmHashRateLog:
if (cctx->cdict) return ERROR(stage_wrong);
return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
@@ -335,148 +483,257 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned v
}
}
-size_t ZSTD_CCtxParam_setParameter(
- ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value)
+size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
+ ZSTD_cParameter param, int value)
{
- DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value);
+ DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%i, %i)", (int)param, value);
switch(param)
{
- case ZSTD_p_format :
- if (value > (unsigned)ZSTD_f_zstd1_magicless)
- return ERROR(parameter_unsupported);
+ case ZSTD_c_format :
+ BOUNDCHECK(ZSTD_c_format, value);
CCtxParams->format = (ZSTD_format_e)value;
return (size_t)CCtxParams->format;
- case ZSTD_p_compressionLevel : {
- int cLevel = (int)value; /* cast expected to restore negative sign */
+ case ZSTD_c_compressionLevel : {
+ int cLevel = value;
if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
+ if (cLevel < ZSTD_minCLevel()) cLevel = ZSTD_minCLevel();
if (cLevel) { /* 0 : does not change current level */
- CCtxParams->disableLiteralCompression = (cLevel<0); /* negative levels disable huffman */
CCtxParams->compressionLevel = cLevel;
}
if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
return 0; /* return type (size_t) cannot represent negative values */
}
- case ZSTD_p_windowLog :
- if (value>0) /* 0 => use default */
- CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
+ case ZSTD_c_windowLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_windowLog, value);
CCtxParams->cParams.windowLog = value;
return CCtxParams->cParams.windowLog;
- case ZSTD_p_hashLog :
- if (value>0) /* 0 => use default */
- CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+ case ZSTD_c_hashLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_hashLog, value);
CCtxParams->cParams.hashLog = value;
return CCtxParams->cParams.hashLog;
- case ZSTD_p_chainLog :
- if (value>0) /* 0 => use default */
- CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
+ case ZSTD_c_chainLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_chainLog, value);
CCtxParams->cParams.chainLog = value;
return CCtxParams->cParams.chainLog;
- case ZSTD_p_searchLog :
- if (value>0) /* 0 => use default */
- CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
+ case ZSTD_c_searchLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_searchLog, value);
CCtxParams->cParams.searchLog = value;
return value;
- case ZSTD_p_minMatch :
- if (value>0) /* 0 => use default */
- CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
- CCtxParams->cParams.searchLength = value;
- return CCtxParams->cParams.searchLength;
+ case ZSTD_c_minMatch :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_minMatch, value);
+ CCtxParams->cParams.minMatch = value;
+ return CCtxParams->cParams.minMatch;
- case ZSTD_p_targetLength :
- /* all values are valid. 0 => use default */
+ case ZSTD_c_targetLength :
+ BOUNDCHECK(ZSTD_c_targetLength, value);
CCtxParams->cParams.targetLength = value;
return CCtxParams->cParams.targetLength;
- case ZSTD_p_compressionStrategy :
- if (value>0) /* 0 => use default */
- CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
+ case ZSTD_c_strategy :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_strategy, value);
CCtxParams->cParams.strategy = (ZSTD_strategy)value;
return (size_t)CCtxParams->cParams.strategy;
- case ZSTD_p_compressLiterals:
- CCtxParams->disableLiteralCompression = !value;
- return !CCtxParams->disableLiteralCompression;
-
- case ZSTD_p_contentSizeFlag :
+ case ZSTD_c_contentSizeFlag :
/* Content size written in frame header _when known_ (default:1) */
- DEBUGLOG(4, "set content size flag = %u", (value>0));
- CCtxParams->fParams.contentSizeFlag = value > 0;
+ DEBUGLOG(4, "set content size flag = %u", (value!=0));
+ CCtxParams->fParams.contentSizeFlag = value != 0;
return CCtxParams->fParams.contentSizeFlag;
- case ZSTD_p_checksumFlag :
+ case ZSTD_c_checksumFlag :
/* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
- CCtxParams->fParams.checksumFlag = value > 0;
+ CCtxParams->fParams.checksumFlag = value != 0;
return CCtxParams->fParams.checksumFlag;
- case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
- DEBUGLOG(4, "set dictIDFlag = %u", (value>0));
+ case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
+ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
CCtxParams->fParams.noDictIDFlag = !value;
return !CCtxParams->fParams.noDictIDFlag;
- case ZSTD_p_forceMaxWindow :
- CCtxParams->forceWindow = (value > 0);
+ case ZSTD_c_forceMaxWindow :
+ CCtxParams->forceWindow = (value != 0);
return CCtxParams->forceWindow;
- case ZSTD_p_nbWorkers :
+ case ZSTD_c_forceAttachDict : {
+ const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
+ BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+ CCtxParams->attachDictPref = pref;
+ return CCtxParams->attachDictPref;
+ }
+
+ case ZSTD_c_nbWorkers :
#ifndef ZSTD_MULTITHREAD
- if (value>0) return ERROR(parameter_unsupported);
+ if (value!=0) return ERROR(parameter_unsupported);
return 0;
#else
return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
#endif
- case ZSTD_p_jobSize :
+ case ZSTD_c_jobSize :
#ifndef ZSTD_MULTITHREAD
return ERROR(parameter_unsupported);
#else
return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
#endif
- case ZSTD_p_overlapSizeLog :
+ case ZSTD_c_overlapLog :
#ifndef ZSTD_MULTITHREAD
return ERROR(parameter_unsupported);
#else
- return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value);
+ return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapLog, value);
#endif
- case ZSTD_p_enableLongDistanceMatching :
- CCtxParams->ldmParams.enableLdm = (value>0);
+ case ZSTD_c_rsyncable :
+#ifndef ZSTD_MULTITHREAD
+ return ERROR(parameter_unsupported);
+#else
+ return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_rsyncable, value);
+#endif
+
+ case ZSTD_c_enableLongDistanceMatching :
+ CCtxParams->ldmParams.enableLdm = (value!=0);
return CCtxParams->ldmParams.enableLdm;
- case ZSTD_p_ldmHashLog :
- if (value>0) /* 0 ==> auto */
- CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+ case ZSTD_c_ldmHashLog :
+ if (value!=0) /* 0 ==> auto */
+ BOUNDCHECK(ZSTD_c_ldmHashLog, value);
CCtxParams->ldmParams.hashLog = value;
return CCtxParams->ldmParams.hashLog;
- case ZSTD_p_ldmMinMatch :
- if (value>0) /* 0 ==> default */
- CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
+ case ZSTD_c_ldmMinMatch :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
CCtxParams->ldmParams.minMatchLength = value;
return CCtxParams->ldmParams.minMatchLength;
- case ZSTD_p_ldmBucketSizeLog :
- if (value > ZSTD_LDM_BUCKETSIZELOG_MAX)
- return ERROR(parameter_outOfBound);
+ case ZSTD_c_ldmBucketSizeLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
CCtxParams->ldmParams.bucketSizeLog = value;
return CCtxParams->ldmParams.bucketSizeLog;
- case ZSTD_p_ldmHashEveryLog :
+ case ZSTD_c_ldmHashRateLog :
if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
return ERROR(parameter_outOfBound);
- CCtxParams->ldmParams.hashEveryLog = value;
- return CCtxParams->ldmParams.hashEveryLog;
+ CCtxParams->ldmParams.hashRateLog = value;
+ return CCtxParams->ldmParams.hashRateLog;
default: return ERROR(parameter_unsupported);
}
}
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
+{
+ return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParam_getParameter(
+ ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
+{
+ switch(param)
+ {
+ case ZSTD_c_format :
+ *value = CCtxParams->format;
+ break;
+ case ZSTD_c_compressionLevel :
+ *value = CCtxParams->compressionLevel;
+ break;
+ case ZSTD_c_windowLog :
+ *value = CCtxParams->cParams.windowLog;
+ break;
+ case ZSTD_c_hashLog :
+ *value = CCtxParams->cParams.hashLog;
+ break;
+ case ZSTD_c_chainLog :
+ *value = CCtxParams->cParams.chainLog;
+ break;
+ case ZSTD_c_searchLog :
+ *value = CCtxParams->cParams.searchLog;
+ break;
+ case ZSTD_c_minMatch :
+ *value = CCtxParams->cParams.minMatch;
+ break;
+ case ZSTD_c_targetLength :
+ *value = CCtxParams->cParams.targetLength;
+ break;
+ case ZSTD_c_strategy :
+ *value = (unsigned)CCtxParams->cParams.strategy;
+ break;
+ case ZSTD_c_contentSizeFlag :
+ *value = CCtxParams->fParams.contentSizeFlag;
+ break;
+ case ZSTD_c_checksumFlag :
+ *value = CCtxParams->fParams.checksumFlag;
+ break;
+ case ZSTD_c_dictIDFlag :
+ *value = !CCtxParams->fParams.noDictIDFlag;
+ break;
+ case ZSTD_c_forceMaxWindow :
+ *value = CCtxParams->forceWindow;
+ break;
+ case ZSTD_c_forceAttachDict :
+ *value = CCtxParams->attachDictPref;
+ break;
+ case ZSTD_c_nbWorkers :
+#ifndef ZSTD_MULTITHREAD
+ assert(CCtxParams->nbWorkers == 0);
+#endif
+ *value = CCtxParams->nbWorkers;
+ break;
+ case ZSTD_c_jobSize :
+#ifndef ZSTD_MULTITHREAD
+ return ERROR(parameter_unsupported);
+#else
+ assert(CCtxParams->jobSize <= INT_MAX);
+ *value = (int)CCtxParams->jobSize;
+ break;
+#endif
+ case ZSTD_c_overlapLog :
+#ifndef ZSTD_MULTITHREAD
+ return ERROR(parameter_unsupported);
+#else
+ *value = CCtxParams->overlapLog;
+ break;
+#endif
+ case ZSTD_c_rsyncable :
+#ifndef ZSTD_MULTITHREAD
+ return ERROR(parameter_unsupported);
+#else
+ *value = CCtxParams->rsyncable;
+ break;
+#endif
+ case ZSTD_c_enableLongDistanceMatching :
+ *value = CCtxParams->ldmParams.enableLdm;
+ break;
+ case ZSTD_c_ldmHashLog :
+ *value = CCtxParams->ldmParams.hashLog;
+ break;
+ case ZSTD_c_ldmMinMatch :
+ *value = CCtxParams->ldmParams.minMatchLength;
+ break;
+ case ZSTD_c_ldmBucketSizeLog :
+ *value = CCtxParams->ldmParams.bucketSizeLog;
+ break;
+ case ZSTD_c_ldmHashRateLog :
+ *value = CCtxParams->ldmParams.hashRateLog;
+ break;
+ default: return ERROR(parameter_unsupported);
+ }
+ return 0;
+}
+
/** ZSTD_CCtx_setParametersUsingCCtxParams() :
* just applies `params` into `cctx`
* no action is performed, parameters are merely stored.
@@ -487,6 +744,7 @@ size_t ZSTD_CCtxParam_setParameter(
size_t ZSTD_CCtx_setParametersUsingCCtxParams(
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
{
+ DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
if (cctx->cdict) return ERROR(stage_wrong);
@@ -565,53 +823,59 @@ size_t ZSTD_CCtx_refPrefix_advanced(
return 0;
}
-static void ZSTD_startNewCompression(ZSTD_CCtx* cctx)
-{
- cctx->streamStage = zcss_init;
- cctx->pledgedSrcSizePlusOne = 0;
-}
-
/*! ZSTD_CCtx_reset() :
* Also dumps dictionary */
-void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
+size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
{
- ZSTD_startNewCompression(cctx);
- cctx->cdict = NULL;
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ cctx->streamStage = zcss_init;
+ cctx->pledgedSrcSizePlusOne = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+ cctx->cdict = NULL;
+ return ZSTD_CCtxParams_reset(&cctx->requestedParams);
+ }
+ return 0;
}
+
/** ZSTD_checkCParams() :
control CParam values remain within authorized range.
@return : 0, or an error code if one value is beyond authorized range */
size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
{
- CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
- CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
- CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
- CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
- CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
- if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN)
- return ERROR(parameter_unsupported);
- if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
- return ERROR(parameter_unsupported);
+ BOUNDCHECK(ZSTD_c_windowLog, cParams.windowLog);
+ BOUNDCHECK(ZSTD_c_chainLog, cParams.chainLog);
+ BOUNDCHECK(ZSTD_c_hashLog, cParams.hashLog);
+ BOUNDCHECK(ZSTD_c_searchLog, cParams.searchLog);
+ BOUNDCHECK(ZSTD_c_minMatch, cParams.minMatch);
+ BOUNDCHECK(ZSTD_c_targetLength,cParams.targetLength);
+ BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
return 0;
}
/** ZSTD_clampCParams() :
* make CParam values within valid range.
* @return : valid CParams */
-static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams)
-{
-# define CLAMP(val,min,max) { \
- if (val<min) val=min; \
- else if (val>max) val=max; \
- }
- CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
- CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
- CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
- CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
- CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
- if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN) cParams.targetLength = ZSTD_TARGETLENGTH_MIN;
- if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra;
+static ZSTD_compressionParameters
+ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+{
+# define CLAMP_TYPE(cParam, val, type) { \
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
+ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
+ }
+# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, int)
+ CLAMP(ZSTD_c_windowLog, cParams.windowLog);
+ CLAMP(ZSTD_c_chainLog, cParams.chainLog);
+ CLAMP(ZSTD_c_hashLog, cParams.hashLog);
+ CLAMP(ZSTD_c_searchLog, cParams.searchLog);
+ CLAMP(ZSTD_c_minMatch, cParams.minMatch);
+ CLAMP(ZSTD_c_targetLength,cParams.targetLength);
+ CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
return cParams;
}
@@ -627,8 +891,11 @@ static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
optimize `cPar` for a given input (`srcSize` and `dictSize`).
mostly downsizing to reduce memory consumption and initialization latency.
Both `srcSize` and `dictSize` are optional (use 0 if unknown).
- Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */
-ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+ Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
+static ZSTD_compressionParameters
+ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize)
{
static const U64 minSrcSize = 513; /* (1<<9) + 1 */
static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
@@ -648,7 +915,7 @@ ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameter
ZSTD_highbit32(tSize-1) + 1;
if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
}
- if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
+ if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
{ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
if (cycleLog > cPar.windowLog)
cPar.chainLog -= (cycleLog - cPar.windowLog);
@@ -660,23 +927,43 @@ ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameter
return cPar;
}
-ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+ZSTD_compressionParameters
+ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize)
{
cPar = ZSTD_clampCParams(cPar);
return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
}
-static size_t ZSTD_sizeof_matchState(ZSTD_compressionParameters const* cParams, const U32 forCCtx)
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
+ if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+ if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
+ if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
+ if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
+ if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
+ if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
+ if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
+ if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
+ assert(!ZSTD_checkCParams(cParams));
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
+}
+
+static size_t
+ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ const U32 forCCtx)
{
size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
size_t const hSize = ((size_t)1) << cParams->hashLog;
- U32 const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = ((size_t)1) << hashLog3;
size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
+ (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
- size_t const optSpace = (forCCtx && ((cParams->strategy == ZSTD_btopt) ||
- (cParams->strategy == ZSTD_btultra)))
+ size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
: 0;
DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
@@ -691,9 +978,9 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, 0, 0);
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
- U32 const divider = (cParams.searchLength==3) ? 3 : 4;
+ U32 const divider = (cParams.minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = blockSize + 11*maxNbSeq;
+ size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
size_t const entropySpace = HUF_WORKSPACE_SIZE;
size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
@@ -726,7 +1013,7 @@ size_t ZSTD_estimateCCtxSize(int compressionLevel)
{
int level;
size_t memBudget = 0;
- for (level=1; level<=compressionLevel; level++) {
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
@@ -752,15 +1039,17 @@ size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
}
-static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) {
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
+{
ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
return ZSTD_estimateCStreamSize_usingCParams(cParams);
}
-size_t ZSTD_estimateCStreamSize(int compressionLevel) {
+size_t ZSTD_estimateCStreamSize(int compressionLevel)
+{
int level;
size_t memBudget = 0;
- for (level=1; level<=compressionLevel; level++) {
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
@@ -786,9 +1075,27 @@ ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
fp.ingested = cctx->consumedSrcSize + buffered;
fp.consumed = cctx->consumedSrcSize;
fp.produced = cctx->producedCSize;
+ fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
+ fp.currentJobID = 0;
+ fp.nbActiveWorkers = 0;
return fp;
} }
+/*! ZSTD_toFlushNow()
+ * Only useful for multithreading scenarios currently (nbWorkers >= 1).
+ */
+size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers > 0) {
+ return ZSTDMT_toFlushNow(cctx->mtctx);
+ }
+#endif
+ (void)cctx;
+ return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
+}
+
+
static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
ZSTD_compressionParameters cParams2)
@@ -796,7 +1103,21 @@ static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
return (cParams1.hashLog == cParams2.hashLog)
& (cParams1.chainLog == cParams2.chainLog)
& (cParams1.strategy == cParams2.strategy) /* opt parser space */
- & ((cParams1.searchLength==3) == (cParams2.searchLength==3)); /* hashlog3 space */
+ & ((cParams1.minMatch==3) == (cParams2.minMatch==3)); /* hashlog3 space */
+}
+
+static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
+ ZSTD_compressionParameters cParams2)
+{
+ (void)cParams1;
+ (void)cParams2;
+ assert(cParams1.windowLog == cParams2.windowLog);
+ assert(cParams1.chainLog == cParams2.chainLog);
+ assert(cParams1.hashLog == cParams2.hashLog);
+ assert(cParams1.searchLog == cParams2.searchLog);
+ assert(cParams1.minMatch == cParams2.minMatch);
+ assert(cParams1.targetLength == cParams2.targetLength);
+ assert(cParams1.strategy == cParams2.strategy);
}
/** The parameters are equivalent if ldm is not enabled in both sets or
@@ -809,7 +1130,7 @@ static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
ldmParams1.hashLog == ldmParams2.hashLog &&
ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
- ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
+ ldmParams1.hashRateLog == ldmParams2.hashRateLog);
}
typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
@@ -817,33 +1138,51 @@ typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
/* ZSTD_sufficientBuff() :
* check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
* Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
-static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1,
+static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
+ size_t maxNbLit1,
ZSTD_buffered_policy_e buffPol2,
ZSTD_compressionParameters cParams2,
U64 pledgedSrcSize)
{
size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
+ size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
+ size_t const maxNbLit2 = blockSize2;
size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
- DEBUGLOG(4, "ZSTD_sufficientBuff: is windowSize2=%u <= wlog1=%u",
- (U32)windowSize2, cParams2.windowLog);
- DEBUGLOG(4, "ZSTD_sufficientBuff: is blockSize2=%u <= blockSize1=%u",
- (U32)blockSize2, (U32)blockSize1);
- return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */
+ DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
+ (U32)neededBufferSize2, (U32)bufferSize1);
+ DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
+ (U32)maxNbSeq2, (U32)maxNbSeq1);
+ DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
+ (U32)maxNbLit2, (U32)maxNbLit1);
+ return (maxNbLit2 <= maxNbLit1)
+ & (maxNbSeq2 <= maxNbSeq1)
& (neededBufferSize2 <= bufferSize1);
}
/** Equivalence for resetCCtx purposes */
static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
ZSTD_CCtx_params params2,
- size_t buffSize1, size_t blockSize1,
+ size_t buffSize1,
+ size_t maxNbSeq1, size_t maxNbLit1,
ZSTD_buffered_policy_e buffPol2,
U64 pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
- return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
- ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) &&
- ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize);
+ if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
+ DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
+ return 0;
+ }
+ if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
+ DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
+ return 0;
+ }
+ if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
+ params2.cParams, pledgedSrcSize)) {
+ DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
+ return 0;
+ }
+ return 1;
}
static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
@@ -851,10 +1190,10 @@ static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
int i;
for (i = 0; i < ZSTD_REP_NUM; ++i)
bs->rep[i] = repStartValue[i];
- bs->entropy.hufCTable_repeatMode = HUF_repeat_none;
- bs->entropy.offcode_repeatMode = FSE_repeat_none;
- bs->entropy.matchlength_repeatMode = FSE_repeat_none;
- bs->entropy.litlength_repeatMode = FSE_repeat_none;
+ bs->entropy.huf.repeatMode = HUF_repeat_none;
+ bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
}
/*! ZSTD_invalidateMatchState()
@@ -865,9 +1204,11 @@ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
{
ZSTD_window_clear(&ms->window);
- ms->nextToUpdate = ms->window.dictLimit + 1;
+ ms->nextToUpdate = ms->window.dictLimit;
+ ms->nextToUpdate3 = ms->window.dictLimit;
ms->loadedDictEnd = 0;
ms->opt.litLengthSum = 0; /* force reset of btopt stats */
+ ms->dictMatchState = NULL;
}
/*! ZSTD_continueCCtx() :
@@ -880,6 +1221,7 @@ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pl
cctx->blockSize = blockSize; /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
cctx->appliedParams = params;
+ cctx->blockState.matchState.cParams = params.cParams;
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
cctx->consumedSrcSize = 0;
cctx->producedCSize = 0;
@@ -900,11 +1242,15 @@ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pl
typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
-static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, ZSTD_compressionParameters const* cParams, ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
+static void*
+ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ void* ptr,
+ const ZSTD_compressionParameters* cParams,
+ ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
{
size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
size_t const hSize = ((size_t)1) << cParams->hashLog;
- U32 const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = ((size_t)1) << hashLog3;
size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
@@ -912,12 +1258,15 @@ static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, ZSTD_compre
ms->hashLog3 = hashLog3;
memset(&ms->window, 0, sizeof(ms->window));
+ ms->window.dictLimit = 1; /* start from 1, so that 1st position is valid */
+ ms->window.lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
+ ms->window.nextSrc = ms->window.base + 1; /* see issue #1241 */
ZSTD_invalidateMatchState(ms);
/* opt parser space */
- if (forCCtx && ((cParams->strategy == ZSTD_btopt) | (cParams->strategy == ZSTD_btultra))) {
+ if (forCCtx && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space");
- ms->opt.litFreq = (U32*)ptr;
+ ms->opt.litFreq = (unsigned*)ptr;
ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
@@ -937,14 +1286,24 @@ static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, ZSTD_compre
ms->hashTable3 = ms->chainTable + chainSize;
ptr = ms->hashTable3 + h3Size;
+ ms->cParams = *cParams;
+
assert(((size_t)ptr & 3) == 0);
return ptr;
}
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 /* when workspace is continuously too large
+ * during at least this number of times,
+ * context's memory usage is considered wasteful,
+ * because it's sized to handle a worst case scenario which rarely happens.
+ * In which case, resize it down to free some memory */
+
/*! ZSTD_resetCCtx_internal() :
note : `params` are assumed fully validated at this stage */
static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
- ZSTD_CCtx_params params, U64 pledgedSrcSize,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
ZSTD_compResetPolicy_e const crp,
ZSTD_buffered_policy_e const zbuff)
{
@@ -954,34 +1313,35 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
if (crp == ZSTDcrp_continue) {
if (ZSTD_equivalentParams(zc->appliedParams, params,
- zc->inBuffSize, zc->blockSize,
- zbuff, pledgedSrcSize)) {
- DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%u)",
- zc->appliedParams.cParams.windowLog, (U32)zc->blockSize);
- return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
+ zc->inBuffSize,
+ zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
+ zbuff, pledgedSrcSize)) {
+ DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
+ zc->appliedParams.cParams.windowLog, zc->blockSize);
+ zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0); /* if it was too large, it still is */
+ if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION)
+ return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
} }
DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
if (params.ldmParams.enableLdm) {
/* Adjust long distance matching parameters */
- params.ldmParams.windowLog = params.cParams.windowLog;
ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
- assert(params.ldmParams.hashEveryLog < 32);
- zc->ldmState.hashPower =
- ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+ assert(params.ldmParams.hashRateLog < 32);
+ zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
}
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
- U32 const divider = (params.cParams.searchLength==3) ? 3 : 4;
+ U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
- size_t const tokenSpace = blockSize + 11*maxNbSeq;
+ size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
- void* ptr;
+ void* ptr; /* used to partition workSpace */
/* Check if workSpace is large enough, alloc a new one if needed */
{ size_t const entropySpace = HUF_WORKSPACE_SIZE;
@@ -993,14 +1353,20 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
ldmSeqSpace + matchStateSize + tokenSpace +
bufferSpace;
- DEBUGLOG(4, "Need %uKB workspace, including %uKB for match state, and %uKB for buffers",
- (U32)(neededSpace>>10), (U32)(matchStateSize>>10), (U32)(bufferSpace>>10));
- DEBUGLOG(4, "windowSize: %u - blockSize: %u", (U32)windowSize, (U32)blockSize);
-
- if (zc->workSpaceSize < neededSpace) { /* too small : resize */
- DEBUGLOG(4, "Need to update workSpaceSize from %uK to %uK",
- (unsigned)(zc->workSpaceSize>>10),
- (unsigned)(neededSpace>>10));
+
+ int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
+ int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
+ int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
+ zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
+
+ DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
+ neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
+ DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+ if (workSpaceTooSmall || workSpaceWasteful) {
+ DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
+ zc->workSpaceSize >> 10,
+ neededSpace >> 10);
/* static cctx : no resize, error out */
if (zc->staticSize) return ERROR(memory_allocation);
@@ -1009,9 +1375,11 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
if (zc->workSpace == NULL) return ERROR(memory_allocation);
zc->workSpaceSize = neededSpace;
- ptr = zc->workSpace;
+ zc->workSpaceOversizedDuration = 0;
- /* Statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+ /* Statically sized space.
+ * entropyWorkspace never moves,
+ * though prev/next block swap places */
assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */
assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
@@ -1022,13 +1390,14 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
/* init params */
zc->appliedParams = params;
+ zc->blockState.matchState.cParams = params.cParams;
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
zc->consumedSrcSize = 0;
zc->producedCSize = 0;
if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u",
- (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+ (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
zc->blockSize = blockSize;
XXH64_reset(&zc->xxhState, 0);
@@ -1058,13 +1427,18 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
/* sequences storage */
+ zc->seqStore.maxNbSeq = maxNbSeq;
zc->seqStore.sequencesStart = (seqDef*)ptr;
ptr = zc->seqStore.sequencesStart + maxNbSeq;
zc->seqStore.llCode = (BYTE*) ptr;
zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
- ptr = zc->seqStore.litStart + blockSize;
+ /* ZSTD_wildcopy() is used to copy into the literals buffer,
+ * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+ */
+ zc->seqStore.maxNbLit = blockSize;
+ ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
/* ldm bucketOffsets table */
if (params.ldmParams.enableLdm) {
@@ -1098,28 +1472,111 @@ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
}
-static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+/* These are the approximate sizes for each strategy past which copying the
+ * dictionary tables into the working context is faster than using them
+ * in-place.
+ */
+static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
+ 8 KB, /* unused */
+ 8 KB, /* ZSTD_fast */
+ 16 KB, /* ZSTD_dfast */
+ 32 KB, /* ZSTD_greedy */
+ 32 KB, /* ZSTD_lazy */
+ 32 KB, /* ZSTD_lazy2 */
+ 32 KB, /* ZSTD_btlazy2 */
+ 32 KB, /* ZSTD_btopt */
+ 8 KB, /* ZSTD_btultra */
+ 8 KB /* ZSTD_btultra2 */
+};
+
+static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize)
+{
+ size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
+ return ( pledgedSrcSize <= cutoff
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || params.attachDictPref == ZSTD_dictForceAttach )
+ && params.attachDictPref != ZSTD_dictForceCopy
+ && !params.forceWindow; /* dictMatchState isn't correctly
+ * handled in _enforceMaxDist */
+}
+
+static size_t ZSTD_resetCCtx_byAttachingCDict(
+ ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ {
+ const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+ unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Resize working context table params for input only, since the dict
+ * has its own tables. */
+ params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
+ params.cParams.windowLog = windowLog;
+ ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ ZSTDcrp_continue, zbuff);
+ assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ }
+
+ {
+ const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+ - cdict->matchState.window.base);
+ const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
+ if (cdictLen == 0) {
+ /* don't even attach dictionaries with no contents */
+ DEBUGLOG(4, "skipping attaching empty dictionary");
+ } else {
+ DEBUGLOG(4, "attaching dictionary into context");
+ cctx->blockState.matchState.dictMatchState = &cdict->matchState;
+
+ /* prep working match state so dict matches never have negative indices
+ * when they are translated to the working context's index space. */
+ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
+ cctx->blockState.matchState.window.nextSrc =
+ cctx->blockState.matchState.window.base + cdictEnd;
+ ZSTD_window_clear(&cctx->blockState.matchState.window);
+ }
+ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
+ }
+ }
+
+ cctx->dictID = cdict->dictID;
+
+ /* copy block state */
+ memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
- unsigned windowLog,
- ZSTD_frameParameters fParams,
+ ZSTD_CCtx_params params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
- { ZSTD_CCtx_params params = cctx->requestedParams;
+ const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+
+ DEBUGLOG(4, "copying dictionary into context");
+
+ { unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
/* Copy only compression parameters related to tables. */
- params.cParams = cdict->cParams;
- if (windowLog) params.cParams.windowLog = windowLog;
- params.fParams = fParams;
+ params.cParams = *cdict_cParams;
+ params.cParams.windowLog = windowLog;
ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
ZSTDcrp_noMemset, zbuff);
- assert(cctx->appliedParams.cParams.strategy == cdict->cParams.strategy);
- assert(cctx->appliedParams.cParams.hashLog == cdict->cParams.hashLog);
- assert(cctx->appliedParams.cParams.chainLog == cdict->cParams.chainLog);
+ assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
+ assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
}
/* copy tables */
- { size_t const chainSize = (cdict->cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict->cParams.chainLog);
- size_t const hSize = (size_t)1 << cdict->cParams.hashLog;
+ { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
+ size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */
assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
@@ -1127,6 +1584,7 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace); /* presumes all tables follow each other */
}
+
/* Zero the hashTable3, since the cdict never fills it */
{ size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
assert(cdict->matchState.hashLog3 == 0);
@@ -1134,14 +1592,14 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
}
/* copy dictionary offsets */
- {
- ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
}
+
cctx->dictID = cdict->dictID;
/* copy block state */
@@ -1150,6 +1608,28 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
return 0;
}
+/* We have a choice between copying the dictionary context into the working
+ * context, or referencing the dictionary context from the working context
+ * in-place. We decide here which strategy to use. */
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+
+ DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
+ (unsigned)pledgedSrcSize);
+
+ if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
+ return ZSTD_resetCCtx_byAttachingCDict(
+ cctx, cdict, params, pledgedSrcSize, zbuff);
+ } else {
+ return ZSTD_resetCCtx_byCopyingCDict(
+ cctx, cdict, params, pledgedSrcSize, zbuff);
+ }
+}
+
/*! ZSTD_copyCCtx_internal() :
* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
@@ -1192,7 +1672,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
/* copy dictionary offsets */
{
- ZSTD_matchState_t const* srcMatchState = &srcCCtx->blockState.matchState;
+ const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
@@ -1294,15 +1774,15 @@ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
/* See doc/zstd_compression_format.md for detailed format description */
-size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
{
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ MEM_writeLE24(dst, cBlockHeader24);
memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
- MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
- return ZSTD_blockHeaderSize+srcSize;
+ return ZSTD_blockHeaderSize + srcSize;
}
-
static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
BYTE* const ostart = (BYTE* const)dst;
@@ -1356,16 +1836,27 @@ static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, cons
}
-static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+ U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
+ ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ return (srcSize >> minlog) + 2;
+}
-static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t const* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
+static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
- U32* workspace, const int bmi2)
+ void* workspace, size_t wkspSize,
+ const int bmi2)
{
- size_t const minGain = ZSTD_minGain(srcSize);
+ size_t const minGain = ZSTD_minGain(srcSize, strategy);
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
BYTE* const ostart = (BYTE*)dst;
U32 singleStream = srcSize < 256;
@@ -1376,27 +1867,25 @@ static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t const* prevEntropy,
disableLiteralCompression);
/* Prepare nextEntropy assuming reusing the existing table */
- nextEntropy->hufCTable_repeatMode = prevEntropy->hufCTable_repeatMode;
- memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable,
- sizeof(prevEntropy->hufCTable));
+ memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (disableLiteralCompression)
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
/* small ? don't even attempt compression (speed opt) */
# define COMPRESS_LITERALS_SIZE_MIN 63
- { size_t const minLitSize = (prevEntropy->hufCTable_repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
- { HUF_repeat repeat = prevEntropy->hufCTable_repeatMode;
+ { HUF_repeat repeat = prevHuf->repeatMode;
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
- workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2)
+ workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
: HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
- workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2);
+ workspace, wkspSize, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
if (repeat != HUF_repeat_none) {
/* reused the existing table */
hType = set_repeat;
@@ -1404,17 +1893,17 @@ static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t const* prevEntropy,
}
if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
- memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+ memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (cLitSize==1) {
- memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+ memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
}
if (hType == set_compressed) {
/* using a newly constructed table */
- nextEntropy->hufCTable_repeatMode = HUF_repeat_check;
+ nextHuf->repeatMode = HUF_repeat_check;
}
/* Build header */
@@ -1451,6 +1940,7 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
BYTE* const mlCodeTable = seqStorePtr->mlCode;
U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
U32 u;
+ assert(nbSeq <= seqStorePtr->maxNbSeq);
for (u=0; u<nbSeq; u++) {
U32 const llv = sequences[u].litLength;
U32 const mlv = sequences[u].matchLength;
@@ -1464,66 +1954,241 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
}
+
+/**
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabiltyLog256[256] = {
+ 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+ 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
+ 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
+ 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
+ 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
+ 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
+ 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
+ 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
+ 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
+ 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
+ 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
+ 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
+ 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
+ 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
+ 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
+ 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
+ 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
+ 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
+ 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
+ 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
+ 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
+ 5, 4, 2, 1,
+};
+
+
+/**
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+ unsigned cost = 0;
+ unsigned s;
+ for (s = 0; s <= max; ++s) {
+ unsigned norm = (unsigned)((256 * count[s]) / total);
+ if (count[s] != 0 && norm == 0)
+ norm = 1;
+ assert(count[s] < total);
+ cost += count[s] * kInverseProbabiltyLog256[norm];
+ }
+ return cost >> 8;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max)
+{
+ unsigned const shift = 8 - accuracyLog;
+ size_t cost = 0;
+ unsigned s;
+ assert(accuracyLog <= 8);
+ for (s = 0; s <= max; ++s) {
+ unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
+ unsigned const norm256 = normAcc << shift;
+ assert(norm256 > 0);
+ assert(norm256 < 256);
+ cost += count[s] * kInverseProbabiltyLog256[norm256];
+ }
+ return cost >> 8;
+}
+
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+ void const* ptr = ctable;
+ U16 const* u16ptr = (U16 const*)ptr;
+ U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+ return maxSymbolValue;
+}
+
+
+/**
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+static size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max)
+{
+ unsigned const kAccuracyLog = 8;
+ size_t cost = 0;
+ unsigned s;
+ FSE_CState_t cstate;
+ FSE_initCState(&cstate, ctable);
+ if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
+ DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
+ ZSTD_getFSEMaxSymbolValue(ctable), max);
+ return ERROR(GENERIC);
+ }
+ for (s = 0; s <= max; ++s) {
+ unsigned const tableLog = cstate.stateLog;
+ unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+ unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+ if (count[s] == 0)
+ continue;
+ if (bitCost >= badCost) {
+ DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
+ return ERROR(GENERIC);
+ }
+ cost += count[s] * bitCost;
+ }
+ return cost >> kAccuracyLog;
+}
+
+/**
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+ size_t const nbSeq, unsigned const FSELog)
+{
+ BYTE wksp[FSE_NCOUNTBOUND];
+ S16 norm[MaxSeq + 1];
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
+ return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+
typedef enum {
ZSTD_defaultDisallowed = 0,
ZSTD_defaultAllowed = 1
} ZSTD_defaultPolicy_e;
-MEM_STATIC
-symbolEncodingType_e ZSTD_selectEncodingType(
- FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq,
- U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed)
+MEM_STATIC symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy)
{
-#define MIN_SEQ_FOR_DYNAMIC_FSE 64
-#define MAX_SEQ_FOR_STATIC_FSE 1000
ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
- if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) {
+ if (mostFrequent == nbSeq) {
+ *repeatMode = FSE_repeat_none;
+ if (isDefaultAllowed && nbSeq <= 2) {
+ /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+ * If basic encoding isn't possible, always choose RLE.
+ */
+ DEBUGLOG(5, "Selected set_basic");
+ return set_basic;
+ }
DEBUGLOG(5, "Selected set_rle");
- /* Prefer set_basic over set_rle when there are 2 or less symbols,
- * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
- * If basic encoding isn't possible, always choose RLE.
- */
- *repeatMode = FSE_repeat_check;
return set_rle;
}
- if ( isDefaultAllowed
- && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
- DEBUGLOG(5, "Selected set_repeat");
- return set_repeat;
- }
- if ( isDefaultAllowed
- && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) ) {
- DEBUGLOG(5, "Selected set_basic");
- /* The format allows default tables to be repeated, but it isn't useful.
- * When using simple heuristics to select encoding type, we don't want
- * to confuse these tables with dictionaries. When running more careful
- * analysis, we don't need to waste time checking both repeating tables
- * and default tables.
- */
- *repeatMode = FSE_repeat_none;
- return set_basic;
+ if (strategy < ZSTD_lazy) {
+ if (isDefaultAllowed) {
+ size_t const staticFse_nbSeq_max = 1000;
+ size_t const mult = 10 - strategy;
+ size_t const baseLog = 3;
+ size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
+ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
+ assert(mult <= 9 && mult >= 7);
+ if ( (*repeatMode == FSE_repeat_valid)
+ && (nbSeq < staticFse_nbSeq_max) ) {
+ DEBUGLOG(5, "Selected set_repeat");
+ return set_repeat;
+ }
+ if ( (nbSeq < dynamicFse_nbSeq_min)
+ || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+ DEBUGLOG(5, "Selected set_basic");
+ /* The format allows default tables to be repeated, but it isn't useful.
+ * When using simple heuristics to select encoding type, we don't want
+ * to confuse these tables with dictionaries. When running more careful
+ * analysis, we don't need to waste time checking both repeating tables
+ * and default tables.
+ */
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ }
+ } else {
+ size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+ size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+ size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+ size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+ if (isDefaultAllowed) {
+ assert(!ZSTD_isError(basicCost));
+ assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+ }
+ assert(!ZSTD_isError(NCountCost));
+ assert(compressedCost < ERROR(maxCode));
+ DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+ (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
+ if (basicCost <= repeatCost && basicCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_basic");
+ assert(isDefaultAllowed);
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ if (repeatCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_repeat");
+ assert(!ZSTD_isError(repeatCost));
+ return set_repeat;
+ }
+ assert(compressedCost < basicCost && compressedCost < repeatCost);
}
DEBUGLOG(5, "Selected set_compressed");
*repeatMode = FSE_repeat_check;
return set_compressed;
}
-MEM_STATIC
-size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
- U32* count, U32 max,
- BYTE const* codeTable, size_t nbSeq,
- S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
- FSE_CTable const* prevCTable, size_t prevCTableSize,
- void* workspace, size_t workspaceSize)
+MEM_STATIC size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* workspace, size_t workspaceSize)
{
BYTE* op = (BYTE*)dst;
- BYTE const* const oend = op + dstCapacity;
+ const BYTE* const oend = op + dstCapacity;
+ DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
switch (type) {
case set_rle:
- *op = codeTable[0];
CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
+ if (dstCapacity==0) return ERROR(dstSize_tooSmall);
+ *op = codeTable[0];
return 1;
case set_repeat:
memcpy(nextCTable, prevCTable, prevCTableSize);
@@ -1565,6 +2230,9 @@ ZSTD_encodeSequences_body(
FSE_CState_t stateLitLength;
CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
+ DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
+ (int)(blockStream.endPtr - blockStream.startPtr),
+ (unsigned)dstCapacity);
/* first symbols */
FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
@@ -1597,9 +2265,9 @@ ZSTD_encodeSequences_body(
U32 const ofBits = ofCode;
U32 const mlBits = ML_bits[mlCode];
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
- sequences[n].litLength,
- sequences[n].matchLength + MINMATCH,
- sequences[n].offset);
+ (unsigned)sequences[n].litLength,
+ (unsigned)sequences[n].matchLength + MINMATCH,
+ (unsigned)sequences[n].offset);
/* 32b*/ /* 64b*/
/* (7)*/ /* (7)*/
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
@@ -1624,6 +2292,7 @@ ZSTD_encodeSequences_body(
BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
}
BIT_flushBits(&blockStream); /* (7)*/
+ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
} }
DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
@@ -1674,13 +2343,14 @@ ZSTD_encodeSequences_bmi2(
#endif
-size_t ZSTD_encodeSequences(
+static size_t ZSTD_encodeSequences(
void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
{
+ DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
#if DYNAMIC_BMI2
if (bmi2) {
return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
@@ -1698,18 +2368,23 @@ size_t ZSTD_encodeSequences(
sequences, nbSeq, longOffsets);
}
-MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
- ZSTD_entropyCTables_t const* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- ZSTD_CCtx_params const* cctxParams,
- void* dst, size_t dstCapacity, U32* workspace,
- const int bmi2)
+/* ZSTD_compressSequences_internal():
+ * actually compresses both literals and sequences */
+MEM_STATIC size_t
+ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ void* workspace, size_t wkspSize,
+ const int bmi2)
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
- U32 count[MaxSeq+1];
- FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
- FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
- FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ unsigned count[MaxSeq+1];
+ FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
const seqDef* const sequences = seqStorePtr->sequencesStart;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
@@ -1720,18 +2395,22 @@ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
BYTE* op = ostart;
size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
BYTE* seqHead;
+ BYTE* lastNCount = NULL;
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ DEBUGLOG(5, "ZSTD_compressSequences_internal");
/* Compress literals */
{ const BYTE* const literals = seqStorePtr->litStart;
size_t const litSize = seqStorePtr->lit - literals;
+ int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
size_t const cSize = ZSTD_compressLiterals(
- prevEntropy, nextEntropy,
- cctxParams->cParams.strategy, cctxParams->disableLiteralCompression,
+ &prevEntropy->huf, &nextEntropy->huf,
+ cctxParams->cParams.strategy, disableLiteralCompression,
op, dstCapacity,
literals, litSize,
- workspace, bmi2);
+ workspace, wkspSize,
+ bmi2);
if (ZSTD_isError(cSize))
return cSize;
assert(cSize <= dstCapacity);
@@ -1747,13 +2426,9 @@ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
else
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
if (nbSeq==0) {
- memcpy(nextEntropy->litlengthCTable, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable));
- nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
- memcpy(nextEntropy->offcodeCTable, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable));
- nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
- memcpy(nextEntropy->matchlengthCTable, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable));
- nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
- return op - ostart;
+ /* Copy the old tables over as if we repeated them */
+ memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+ return op - ostart;
}
/* seqHead : flags for FSE encoding type */
@@ -1762,44 +2437,66 @@ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
/* build CTable for Literal Lengths */
- { U32 max = MaxLL;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
+ { unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
- nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
- LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed);
+ nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
+ LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->fse.litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
- count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
- prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
- workspace, HUF_WORKSPACE_SIZE);
+ count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
+ workspace, wkspSize);
if (ZSTD_isError(countSize)) return countSize;
+ if (LLtype == set_compressed)
+ lastNCount = op;
op += countSize;
} }
/* build CTable for Offsets */
- { U32 max = MaxOff;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
+ { unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
- nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
- Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy);
+ nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
+ Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->fse.offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
- count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
- prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
- workspace, HUF_WORKSPACE_SIZE);
+ count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
+ workspace, wkspSize);
if (ZSTD_isError(countSize)) return countSize;
+ if (Offtype == set_compressed)
+ lastNCount = op;
op += countSize;
} }
/* build CTable for MatchLengths */
- { U32 max = MaxML;
- size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
- DEBUGLOG(5, "Building ML table");
- nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
- MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed);
+ { unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
+ MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
+ count, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->fse.matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
- count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
- prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
- workspace, HUF_WORKSPACE_SIZE);
+ count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
+ workspace, wkspSize);
if (ZSTD_isError(countSize)) return countSize;
+ if (MLtype == set_compressed)
+ lastNCount = op;
op += countSize;
} }
@@ -1814,21 +2511,42 @@ MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
longOffsets, bmi2);
if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
op += bitstreamSize;
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() recieves a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+ if (lastNCount && (op - lastNCount) < 4) {
+ /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(op - lastNCount == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
}
+ DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
return op - ostart;
}
-MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
- ZSTD_entropyCTables_t const* prevEntropy,
- ZSTD_entropyCTables_t* nextEntropy,
- ZSTD_CCtx_params const* cctxParams,
- void* dst, size_t dstCapacity,
- size_t srcSize, U32* workspace, int bmi2)
+MEM_STATIC size_t
+ZSTD_compressSequences(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* workspace, size_t wkspSize,
+ int bmi2)
{
size_t const cSize = ZSTD_compressSequences_internal(
- seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity,
- workspace, bmi2);
+ seqStorePtr, prevEntropy, nextEntropy, cctxParams,
+ dst, dstCapacity,
+ workspace, wkspSize, bmi2);
+ if (cSize == 0) return 0;
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
*/
@@ -1837,40 +2555,57 @@ MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
if (ZSTD_isError(cSize)) return cSize;
/* Check compressibility */
- { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize); /* note : fixed formula, maybe should depend on compression level, or strategy */
+ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
if (cSize >= maxCSize) return 0; /* block not compressed */
}
- /* We check that dictionaries have offset codes available for the first
- * block. After the first block, the offcode table might not have large
- * enough codes to represent the offsets in the data.
- */
- if (nextEntropy->offcode_repeatMode == FSE_repeat_valid)
- nextEntropy->offcode_repeatMode = FSE_repeat_check;
-
return cSize;
}
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
{
- static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = {
+ static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
- ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy,
- ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2,
- ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
+ ZSTD_compressBlock_fast,
+ ZSTD_compressBlock_doubleFast,
+ ZSTD_compressBlock_greedy,
+ ZSTD_compressBlock_lazy,
+ ZSTD_compressBlock_lazy2,
+ ZSTD_compressBlock_btlazy2,
+ ZSTD_compressBlock_btopt,
+ ZSTD_compressBlock_btultra,
+ ZSTD_compressBlock_btultra2 },
{ ZSTD_compressBlock_fast_extDict /* default for 0 */,
- ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict,
- ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict,
- ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
+ ZSTD_compressBlock_fast_extDict,
+ ZSTD_compressBlock_doubleFast_extDict,
+ ZSTD_compressBlock_greedy_extDict,
+ ZSTD_compressBlock_lazy_extDict,
+ ZSTD_compressBlock_lazy2_extDict,
+ ZSTD_compressBlock_btlazy2_extDict,
+ ZSTD_compressBlock_btopt_extDict,
+ ZSTD_compressBlock_btultra_extDict,
+ ZSTD_compressBlock_btultra_extDict },
+ { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
+ ZSTD_compressBlock_fast_dictMatchState,
+ ZSTD_compressBlock_doubleFast_dictMatchState,
+ ZSTD_compressBlock_greedy_dictMatchState,
+ ZSTD_compressBlock_lazy_dictMatchState,
+ ZSTD_compressBlock_lazy2_dictMatchState,
+ ZSTD_compressBlock_btlazy2_dictMatchState,
+ ZSTD_compressBlock_btopt_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState }
};
+ ZSTD_blockCompressor selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
- assert((U32)strat >= (U32)ZSTD_fast);
- assert((U32)strat <= (U32)ZSTD_btultra);
- return blockCompressor[extDict!=0][(U32)strat];
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ assert(selectedCompressor != NULL);
+ return selectedCompressor;
}
static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
@@ -1880,7 +2615,7 @@ static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
seqStorePtr->lit += lastLLSize;
}
-static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
@@ -1892,24 +2627,38 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
const void* src, size_t srcSize)
{
ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ size_t cSize;
DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
- (U32)dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
+ (unsigned)dstCapacity, (unsigned)ms->window.dictLimit, (unsigned)ms->nextToUpdate);
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+
+ /* Assert that we have correctly flushed the ctx params into the ms's copy */
+ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
+
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
- ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength);
- return 0; /* don't even attempt compression below a certain srcSize */
+ ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+ cSize = 0;
+ goto out; /* don't even attempt compression below a certain srcSize */
}
ZSTD_resetSeqStore(&(zc->seqStore));
+ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* required for optimal parser to read stats from dictionary */
+
+ /* a gap between an attached dict and the current window is not safe,
+ * they must remain adjacent,
+ * and when that stops being the case, the dict must be unset */
+ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
/* limited update after a very long match */
{ const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const U32 current = (U32)(istart-base);
+ if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
if (current > ms->nextToUpdate + 384)
ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
}
/* select and store sequences */
- { U32 const extDict = ZSTD_window_hasExtDict(ms->window);
+ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
size_t lastLLSize;
{ int i;
for (i = 0; i < ZSTD_REP_NUM; ++i)
@@ -1922,8 +2671,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
ZSTD_ldm_blockCompress(&zc->externSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
- &zc->appliedParams.cParams,
- src, srcSize, extDict);
+ src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm) {
rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
@@ -1939,31 +2687,40 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
ZSTD_ldm_blockCompress(&ldmSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
- &zc->appliedParams.cParams,
- src, srcSize, extDict);
+ src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
} else { /* not long range mode */
- ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict);
- lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, &zc->appliedParams.cParams, src, srcSize);
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
}
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
} }
/* encode sequences and literals */
- { size_t const cSize = ZSTD_compressSequences(&zc->seqStore,
- &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
- &zc->appliedParams,
- dst, dstCapacity,
- srcSize, zc->entropyWorkspace, zc->bmi2);
- if (ZSTD_isError(cSize) || cSize == 0) return cSize;
- /* confirm repcodes and entropy tables */
- { ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
- zc->blockState.prevCBlock = zc->blockState.nextCBlock;
- zc->blockState.nextCBlock = tmp;
- }
- return cSize;
+ cSize = ZSTD_compressSequences(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ srcSize,
+ zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+
+out:
+ if (!ZSTD_isError(cSize) && cSize != 0) {
+ /* confirm repcodes and entropy tables when emitting a compressed block */
+ ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
+ zc->blockState.prevCBlock = zc->blockState.nextCBlock;
+ zc->blockState.nextCBlock = tmp;
}
+ /* We check that dictionaries have offset codes available for the first
+ * block. After the first block, the offcode table might not have large
+ * enough codes to represent the offsets in the data.
+ */
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
}
@@ -1987,7 +2744,7 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
assert(cctx->appliedParams.cParams.windowLog <= 31);
- DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize);
+ DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize);
@@ -2005,13 +2762,13 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
-
ZSTD_reduceIndex(cctx, correction);
if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
else ms->nextToUpdate -= correction;
ms->loadedDictEnd = 0;
+ ms->dictMatchState = NULL;
}
- ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd);
+ ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
{ size_t cSize = ZSTD_compressBlock_internal(cctx,
@@ -2020,11 +2777,8 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
if (ZSTD_isError(cSize)) return cSize;
if (cSize == 0) { /* block is not compressible */
- U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
- if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
- MEM_writeLE32(op, cBlockHeader24); /* 4th byte will be overwritten */
- memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
- cSize = ZSTD_blockHeaderSize + blockSize;
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ if (ZSTD_isError(cSize)) return cSize;
} else {
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader24);
@@ -2038,7 +2792,7 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
assert(dstCapacity >= cSize);
dstCapacity -= cSize;
DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
- (U32)cSize);
+ (unsigned)cSize);
} }
if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
@@ -2060,9 +2814,10 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
size_t pos=0;
- if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
+ assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+ if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
- !params.fParams.noDictIDFlag, dictID, dictIDSizeCode);
+ !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
if (params.format == ZSTD_f_zstd1) {
MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
@@ -2122,11 +2877,11 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
const void* src, size_t srcSize,
U32 frame, U32 lastFrameChunk)
{
- ZSTD_matchState_t* ms = &cctx->blockState.matchState;
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
- cctx->stage, (U32)srcSize);
+ cctx->stage, (unsigned)srcSize);
if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
if (frame && (cctx->stage==ZSTDcs_init)) {
@@ -2143,20 +2898,39 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
if (!ZSTD_window_update(&ms->window, src, srcSize)) {
ms->nextToUpdate = ms->window.dictLimit;
}
- if (cctx->appliedParams.ldmParams.enableLdm)
+ if (cctx->appliedParams.ldmParams.enableLdm) {
ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+ }
+
+ if (!frame) {
+ /* overflow check and correction for block mode */
+ if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) {
+ U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
+ U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src);
+ ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ ZSTD_reduceIndex(cctx, correction);
+ if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+ else ms->nextToUpdate -= correction;
+ ms->loadedDictEnd = 0;
+ ms->dictMatchState = NULL;
+ }
+ }
- DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
if (ZSTD_isError(cSize)) return cSize;
cctx->consumedSrcSize += srcSize;
cctx->producedCSize += (cSize + fhSize);
- if (cctx->appliedParams.fParams.contentSizeFlag) { /* control src size */
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
- (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
+ (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
return ERROR(srcSize_wrong);
}
}
@@ -2168,7 +2942,7 @@ size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
- DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize);
+ DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
}
@@ -2184,44 +2958,51 @@ size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const
{
size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
+
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
}
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* src, size_t srcSize)
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm)
{
const BYTE* const ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
- ZSTD_compressionParameters const* cParams = &params->cParams;
ZSTD_window_update(&ms->window, src, srcSize);
ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+ /* Assert that we the ms params match the params we're being given */
+ ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+
if (srcSize <= HASH_READ_SIZE) return 0;
switch(params->cParams.strategy)
{
case ZSTD_fast:
- ZSTD_fillHashTable(ms, cParams, iend);
+ ZSTD_fillHashTable(ms, iend, dtlm);
break;
case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, cParams, iend);
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm);
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
if (srcSize >= HASH_READ_SIZE)
- ZSTD_insertAndFindFirstIndex(ms, cParams, iend-HASH_READ_SIZE);
+ ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
break;
case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
case ZSTD_btopt:
case ZSTD_btultra:
+ case ZSTD_btultra2:
if (srcSize >= HASH_READ_SIZE)
- ZSTD_updateTree(ms, cParams, iend-HASH_READ_SIZE, iend);
+ ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
break;
default:
@@ -2256,7 +3037,12 @@ static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSym
* assumptions : magic number supposed already checked
* dictSize supposed > 8
*/
-static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, void* workspace)
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ZSTD_CCtx_params const* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
@@ -2265,13 +3051,15 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matc
size_t dictID;
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(dictSize > 8);
+ assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
dictPtr += 4; /* skip magic number */
dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr);
dictPtr += 4;
{ unsigned maxSymbolValue = 255;
- size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
+ size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
dictPtr += hufHeaderSize;
@@ -2282,7 +3070,10 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matc
if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, workspace, HUF_WORKSPACE_SIZE),
+ /* fill all offset symbols to avoid garbage at end of table */
+ CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable,
+ offcodeNCount, MaxOff, offcodeLog,
+ workspace, HUF_WORKSPACE_SIZE),
dictionary_corrupted);
dictPtr += offcodeHeaderSize;
}
@@ -2294,7 +3085,9 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matc
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
/* Every match length code must have non-zero probability */
CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE),
+ CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable,
+ matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+ workspace, HUF_WORKSPACE_SIZE),
dictionary_corrupted);
dictPtr += matchlengthHeaderSize;
}
@@ -2306,7 +3099,9 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matc
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
/* Every literal length code must have non-zero probability */
CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE),
+ CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable,
+ litlengthNCount, litlengthMaxValue, litlengthLog,
+ workspace, HUF_WORKSPACE_SIZE),
dictionary_corrupted);
dictPtr += litlengthHeaderSize;
}
@@ -2332,22 +3127,25 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matc
if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
} }
- bs->entropy.hufCTable_repeatMode = HUF_repeat_valid;
- bs->entropy.offcode_repeatMode = FSE_repeat_valid;
- bs->entropy.matchlength_repeatMode = FSE_repeat_valid;
- bs->entropy.litlength_repeatMode = FSE_repeat_valid;
- CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize));
+ bs->entropy.huf.repeatMode = HUF_repeat_valid;
+ bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
+ bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
+ bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
+ CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
return dictID;
}
}
/** ZSTD_compress_insertDictionary() :
* @return : dictID, or an error code */
-static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms,
- ZSTD_CCtx_params const* params,
- const void* dict, size_t dictSize,
- ZSTD_dictContentType_e dictContentType,
- void* workspace)
+static size_t
+ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ const ZSTD_CCtx_params* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
if ((dict==NULL) || (dictSize<=8)) return 0;
@@ -2356,12 +3154,12 @@ static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZS
/* dict restricted modes */
if (dictContentType == ZSTD_dct_rawContent)
- return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+ return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
if (dictContentType == ZSTD_dct_auto) {
DEBUGLOG(4, "raw content dictionary detected");
- return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+ return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
}
if (dictContentType == ZSTD_dct_fullDict)
return ERROR(dictionary_wrong);
@@ -2369,17 +3167,18 @@ static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZS
}
/* dict as full zstd dictionary */
- return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, workspace);
+ return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
}
/*! ZSTD_compressBegin_internal() :
* @return : 0, or an error code */
-size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
- const void* dict, size_t dictSize,
- ZSTD_dictContentType_e dictContentType,
- const ZSTD_CDict* cdict,
- ZSTD_CCtx_params params, U64 pledgedSrcSize,
- ZSTD_buffered_policy_e zbuff)
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params, U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
/* params are supposed to be fully validated at this point */
@@ -2387,9 +3186,7 @@ size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (cdict && cdict->dictContentSize>0) {
- cctx->requestedParams = params;
- return ZSTD_resetCCtx_usingCDict(cctx, cdict, params.cParams.windowLog,
- params.fParams, pledgedSrcSize, zbuff);
+ return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
}
CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
@@ -2397,7 +3194,7 @@ size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
{
size_t const dictID = ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
- &params, dict, dictSize, dictContentType, cctx->entropyWorkspace);
+ &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
if (ZSTD_isError(dictID)) return dictID;
assert(dictID <= (size_t)(U32)-1);
cctx->dictID = (U32)dictID;
@@ -2408,6 +3205,7 @@ size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
unsigned long long pledgedSrcSize)
@@ -2416,7 +3214,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
/* compression parameters verification and optimization */
CHECK_F( ZSTD_checkCParams(params.cParams) );
return ZSTD_compressBegin_internal(cctx,
- dict, dictSize, dictContentType,
+ dict, dictSize, dictContentType, dtlm,
cdict,
params, pledgedSrcSize,
ZSTDb_not_buffered);
@@ -2431,7 +3229,7 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
return ZSTD_compressBegin_advanced_internal(cctx,
- dict, dictSize, ZSTD_dct_auto,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
NULL /*cdict*/,
cctxParams, pledgedSrcSize);
}
@@ -2441,8 +3239,8 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
- DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize);
- return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
+ DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
@@ -2485,7 +3283,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
if (dstCapacity<4) return ERROR(dstSize_tooSmall);
- DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum);
+ DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
MEM_writeLE32(op, checksum);
op += 4;
}
@@ -2505,11 +3303,13 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
if (ZSTD_isError(cSize)) return cSize;
endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
if (ZSTD_isError(endResult)) return endResult;
- if (cctx->appliedParams.fParams.contentSizeFlag) { /* control src size */
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
DEBUGLOG(4, "end of frame : controlling src size");
if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
- (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
+ (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
return ERROR(srcSize_wrong);
} }
return cSize + endResult;
@@ -2517,22 +3317,22 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
- ZSTD_parameters params)
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params)
{
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
DEBUGLOG(4, "ZSTD_compress_internal");
return ZSTD_compress_advanced_internal(cctx,
- dst, dstCapacity,
- src, srcSize,
- dict, dictSize,
- cctxParams);
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ cctxParams);
}
-size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
+size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
@@ -2540,7 +3340,11 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
{
DEBUGLOG(4, "ZSTD_compress_advanced");
CHECK_F(ZSTD_checkCParams(params.cParams));
- return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
+ return ZSTD_compress_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ params);
}
/* Internal */
@@ -2551,37 +3355,44 @@ size_t ZSTD_compress_advanced_internal(
const void* dict,size_t dictSize,
ZSTD_CCtx_params params)
{
- DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)",
- (U32)srcSize);
- CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
- params, srcSize, ZSTDb_not_buffered) );
+ DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
+ CHECK_F( ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ params, srcSize, ZSTDb_not_buffered) );
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
-size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
- const void* dict, size_t dictSize, int compressionLevel)
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ int compressionLevel)
{
- ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize ? srcSize : 1, dict ? dictSize : 0);
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
assert(params.fParams.contentSizeFlag == 1);
- ZSTD_CCtxParam_setParameter(&cctxParams, ZSTD_p_compressLiterals, compressionLevel>=0);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
}
-size_t ZSTD_compressCCtx (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
{
- DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize);
+ DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
+ assert(cctx != NULL);
return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
}
-size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+size_t ZSTD_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
{
size_t result;
ZSTD_CCtx ctxBody;
- memset(&ctxBody, 0, sizeof(ctxBody));
- ctxBody.customMem = ZSTD_defaultCMem;
+ ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
- ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem); /* can't free ctxBody itself, as it's on stack; free only heap content */
+ ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
return result;
}
@@ -2594,7 +3405,7 @@ size_t ZSTD_estimateCDictSize_advanced(
size_t dictSize, ZSTD_compressionParameters cParams,
ZSTD_dictLoadMethod_e dictLoadMethod)
{
- DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
+ DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
}
@@ -2608,7 +3419,7 @@ size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support sizeof on NULL */
- DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
+ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
}
@@ -2619,9 +3430,9 @@ static size_t ZSTD_initCDict_internal(
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
- DEBUGLOG(3, "ZSTD_initCDict_internal, dictContentType %u", (U32)dictContentType);
+ DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
assert(!ZSTD_checkCParams(cParams));
- cdict->cParams = cParams;
+ cdict->matchState.cParams = cParams;
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
cdict->dictBuffer = NULL;
cdict->dictContent = dictBuffer;
@@ -2654,7 +3465,7 @@ static size_t ZSTD_initCDict_internal(
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, &params,
cdict->dictContent, cdict->dictContentSize,
- dictContentType, cdict->workspace);
+ dictContentType, ZSTD_dtlm_full, cdict->workspace);
if (ZSTD_isError(dictID)) return dictID;
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
@@ -2669,7 +3480,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
{
- DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType);
+ DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
{ ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
@@ -2750,7 +3561,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
void* ptr;
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
- (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
+ (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
if (dictLoadMethod == ZSTD_dlm_byCopy) {
@@ -2775,7 +3586,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
{
assert(cdict != NULL);
- return cdict->cParams;
+ return cdict->matchState.cParams;
}
/* ZSTD_compressBegin_usingCDict_advanced() :
@@ -2799,7 +3610,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
}
params.fParams = fParams;
return ZSTD_compressBegin_internal(cctx,
- NULL, 0, ZSTD_dct_auto,
+ NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
cdict,
params, pledgedSrcSize,
ZSTDb_not_buffered);
@@ -2813,7 +3624,7 @@ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
- return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0);
+ return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
}
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
@@ -2880,16 +3691,17 @@ size_t ZSTD_CStreamOutSize(void)
static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
const ZSTD_CDict* const cdict,
- ZSTD_CCtx_params const params, unsigned long long const pledgedSrcSize)
+ ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
{
- DEBUGLOG(4, "ZSTD_resetCStream_internal (disableLiteralCompression=%i)",
- params.disableLiteralCompression);
+ DEBUGLOG(4, "ZSTD_resetCStream_internal");
+ /* Finalize the compression parameters */
+ params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
CHECK_F( ZSTD_compressBegin_internal(cctx,
- dict, dictSize, dictContentType,
+ dict, dictSize, dictContentType, ZSTD_dtlm_fast,
cdict,
params, pledgedSrcSize,
ZSTDb_buffered) );
@@ -2909,10 +3721,9 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
{
ZSTD_CCtx_params params = zcs->requestedParams;
- DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
+ DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
params.fParams.contentSizeFlag = 1;
- params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, 0);
return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
}
@@ -2925,11 +3736,12 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_internal");
+ params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (dict && dictSize >= 8) {
- DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize);
+ DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
if (zcs->staticSize) { /* static CCtx : never uses malloc */
/* incompatible with internal cdict creation */
return ERROR(memory_allocation);
@@ -2988,28 +3800,24 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
- (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
+ (unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
CHECK_F( ZSTD_checkCParams(params.cParams) );
if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
- { ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, cctxParams, pledgedSrcSize);
- }
+ zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+ return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
}
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
{
- ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
- ZSTD_CCtx_params const cctxParams =
- ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
+ ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
+ return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN);
}
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
{
U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
- ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
- ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
- return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
+ ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
+ return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize);
}
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
@@ -3020,8 +3828,15 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
/*====== Compression ======*/
-MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
+static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
+{
+ size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+ if (hintInSize==0) hintInSize = cctx->blockSize;
+ return hintInSize;
+}
+
+static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
{
size_t const length = MIN(dstCapacity, srcSize);
if (length) memcpy(dst, src, length);
@@ -3029,7 +3844,7 @@ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
}
/** ZSTD_compressStream_generic():
- * internal function for all *compressStream*() variants and *compress_generic()
+ * internal function for all *compressStream*() variants
* non-static, because can be called from zstdmt_compress.c
* @return : hint size for next input */
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
@@ -3046,7 +3861,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
U32 someMoreWork = 1;
/* check expectations */
- DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0);
assert(zcs->outBuff != NULL);
@@ -3068,12 +3883,12 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
/* shortcut to compression pass directly into output buffer */
size_t const cSize = ZSTD_compressEnd(zcs,
op, oend-op, ip, iend-ip);
- DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize);
+ DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
if (ZSTD_isError(cSize)) return cSize;
ip = iend;
op += cSize;
zcs->frameEnded = 1;
- ZSTD_startNewCompression(zcs);
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
someMoreWork = 0; break;
}
/* complete loading into inBuffer */
@@ -3117,7 +3932,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (zcs->inBuffTarget > zcs->inBuffSize)
zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
- (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
+ (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos;
@@ -3126,7 +3941,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (zcs->frameEnded) {
DEBUGLOG(5, "Frame completed directly in outBuffer");
someMoreWork = 0;
- ZSTD_startNewCompression(zcs);
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
}
break;
}
@@ -3141,7 +3956,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
size_t const flushed = ZSTD_limitCopy(op, oend-op,
zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
- (U32)toFlush, (U32)(oend-op), (U32)flushed);
+ (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
op += flushed;
zcs->outBuffFlushedSize += flushed;
if (toFlush!=flushed) {
@@ -3154,7 +3969,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (zcs->frameEnded) {
DEBUGLOG(5, "Frame completed on flush");
someMoreWork = 0;
- ZSTD_startNewCompression(zcs);
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
break;
}
zcs->streamStage = zcss_load;
@@ -3169,28 +3984,34 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
input->pos = ip - istart;
output->pos = op - ostart;
if (zcs->frameEnded) return 0;
- { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
- if (hintInSize==0) hintInSize = zcs->blockSize;
- return hintInSize;
+ return ZSTD_nextInputSizeHint(zcs);
+}
+
+static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers >= 1) {
+ assert(cctx->mtctx != NULL);
+ return ZSTDMT_nextInputSizeHint(cctx->mtctx);
}
+#endif
+ return ZSTD_nextInputSizeHint(cctx);
+
}
size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
- /* check conditions */
- if (output->pos > output->size) return ERROR(GENERIC);
- if (input->pos > input->size) return ERROR(GENERIC);
-
- return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
+ CHECK_F( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
+ return ZSTD_nextInputSizeHint_MTorST(zcs);
}
-size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
- ZSTD_outBuffer* output,
- ZSTD_inBuffer* input,
- ZSTD_EndDirective endOp)
+size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp)
{
- DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp);
+ DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
/* check conditions */
if (output->pos > output->size) return ERROR(GENERIC);
if (input->pos > input->size) return ERROR(GENERIC);
@@ -3200,26 +4021,23 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
if (cctx->streamStage == zcss_init) {
ZSTD_CCtx_params params = cctx->requestedParams;
ZSTD_prefixDict const prefixDict = cctx->prefixDict;
- memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
- assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
- DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
+ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
+ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
+ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */
params.cParams = ZSTD_getCParamsFromCCtxParams(
&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
+
#ifdef ZSTD_MULTITHREAD
if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
}
if (params.nbWorkers > 0) {
/* mt context creation */
- if (cctx->mtctx == NULL || (params.nbWorkers != ZSTDMT_getNbWorkers(cctx->mtctx))) {
- DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u",
+ if (cctx->mtctx == NULL) {
+ DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
params.nbWorkers);
- if (cctx->mtctx != NULL)
- DEBUGLOG(4, "ZSTD_compress_generic: previous nbWorkers was %u",
- ZSTDMT_getNbWorkers(cctx->mtctx));
- ZSTDMT_freeCCtx(cctx->mtctx);
cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
if (cctx->mtctx == NULL) return ERROR(memory_allocation);
}
@@ -3240,6 +4058,7 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
assert(cctx->streamStage == zcss_load);
assert(cctx->appliedParams.nbWorkers == 0);
} }
+ /* end of transparent initialization stage */
/* compression stage */
#ifdef ZSTD_MULTITHREAD
@@ -3251,17 +4070,18 @@ size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
{ size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
if ( ZSTD_isError(flushMin)
|| (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
- ZSTD_startNewCompression(cctx);
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
}
+ DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
return flushMin;
} }
#endif
CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
- DEBUGLOG(5, "completed ZSTD_compress_generic");
+ DEBUGLOG(5, "completed ZSTD_compressStream2");
return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
}
-size_t ZSTD_compress_generic_simpleArgs (
+size_t ZSTD_compressStream2_simpleArgs (
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos,
@@ -3269,13 +4089,33 @@ size_t ZSTD_compress_generic_simpleArgs (
{
ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
ZSTD_inBuffer input = { src, srcSize, *srcPos };
- /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
- size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp);
+ /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
}
+size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+ { size_t oPos = 0;
+ size_t iPos = 0;
+ size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
+ dst, dstCapacity, &oPos,
+ src, srcSize, &iPos,
+ ZSTD_e_end);
+ if (ZSTD_isError(result)) return result;
+ if (result != 0) { /* compression not completed, due to lack of output space */
+ assert(oPos == dstCapacity);
+ return ERROR(dstSize_tooSmall);
+ }
+ assert(iPos == srcSize); /* all input is expected consumed */
+ return oPos;
+ }
+}
/*====== Finalize ======*/
@@ -3284,21 +4124,21 @@ size_t ZSTD_compress_generic_simpleArgs (
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = { NULL, 0, 0 };
- if (output->pos > output->size) return ERROR(GENERIC);
- CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) );
- return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
+ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
}
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = { NULL, 0, 0 };
- if (output->pos > output->size) return ERROR(GENERIC);
- CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) );
+ size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
+ CHECK_F( remainingToFlush );
+ if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
+ /* single thread mode : attempt to calculate remaining to flush more precisely */
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
- size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize;
- DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush);
+ size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
+ DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
return toFlush;
}
}
@@ -3308,111 +4148,112 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
#define ZSTD_MAX_CLEVEL 22
int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
{ /* "default" - guarantees a monotonically increasing memory budget */
/* W, C, H, S, L, TL, strat */
{ 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
- { 19, 13, 14, 1, 7, 1, ZSTD_fast }, /* level 1 */
- { 19, 15, 16, 1, 6, 1, ZSTD_fast }, /* level 2 */
- { 20, 16, 17, 1, 5, 8, ZSTD_dfast }, /* level 3 */
- { 20, 17, 18, 1, 5, 8, ZSTD_dfast }, /* level 4 */
- { 20, 17, 18, 2, 5, 16, ZSTD_greedy }, /* level 5 */
- { 21, 17, 19, 2, 5, 16, ZSTD_lazy }, /* level 6 */
- { 21, 18, 19, 3, 5, 16, ZSTD_lazy }, /* level 7 */
- { 21, 18, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
- { 21, 19, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 9 */
- { 21, 19, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
- { 22, 20, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
- { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
- { 22, 21, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */
- { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
- { 22, 22, 22, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
- { 22, 21, 22, 4, 5, 48, ZSTD_btopt }, /* level 16 */
- { 23, 22, 22, 4, 4, 48, ZSTD_btopt }, /* level 17 */
- { 23, 22, 22, 5, 3, 64, ZSTD_btopt }, /* level 18 */
- { 23, 23, 22, 7, 3,128, ZSTD_btopt }, /* level 19 */
- { 25, 25, 23, 7, 3,128, ZSTD_btultra }, /* level 20 */
- { 26, 26, 24, 7, 3,256, ZSTD_btultra }, /* level 21 */
- { 27, 27, 25, 9, 3,512, ZSTD_btultra }, /* level 22 */
+ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
+ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
+ { 21, 16, 17, 1, 5, 1, ZSTD_dfast }, /* level 3 */
+ { 21, 18, 18, 1, 5, 1, ZSTD_dfast }, /* level 4 */
+ { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
+ { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
+ { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
+ { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
+ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
+ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
+ { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
+ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
+ { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
+ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
+ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
+ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
+ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
+ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
+ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
+ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
+ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
+ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
},
{ /* for srcSize <= 256 KB */
/* W, C, H, S, L, T, strat */
{ 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
- { 18, 13, 14, 1, 6, 1, ZSTD_fast }, /* level 1 */
- { 18, 14, 13, 1, 5, 8, ZSTD_dfast }, /* level 2 */
- { 18, 16, 15, 1, 5, 8, ZSTD_dfast }, /* level 3 */
- { 18, 15, 17, 1, 5, 8, ZSTD_greedy }, /* level 4.*/
- { 18, 16, 17, 4, 5, 8, ZSTD_greedy }, /* level 5.*/
- { 18, 16, 17, 3, 5, 8, ZSTD_lazy }, /* level 6.*/
- { 18, 17, 17, 4, 4, 8, ZSTD_lazy }, /* level 7 */
- { 18, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
- { 18, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
- { 18, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
- { 18, 18, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 11.*/
- { 18, 18, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 12.*/
- { 18, 19, 17, 7, 4, 8, ZSTD_btlazy2 }, /* level 13 */
- { 18, 18, 18, 4, 4, 16, ZSTD_btopt }, /* level 14.*/
- { 18, 18, 18, 4, 3, 16, ZSTD_btopt }, /* level 15.*/
- { 18, 19, 18, 6, 3, 32, ZSTD_btopt }, /* level 16.*/
- { 18, 19, 18, 8, 3, 64, ZSTD_btopt }, /* level 17.*/
- { 18, 19, 18, 9, 3,128, ZSTD_btopt }, /* level 18.*/
- { 18, 19, 18, 10, 3,256, ZSTD_btopt }, /* level 19.*/
- { 18, 19, 18, 11, 3,512, ZSTD_btultra }, /* level 20.*/
- { 18, 19, 18, 12, 3,512, ZSTD_btultra }, /* level 21.*/
- { 18, 19, 18, 13, 3,512, ZSTD_btultra }, /* level 22.*/
+ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 18, 14, 14, 1, 5, 1, ZSTD_dfast }, /* level 2 */
+ { 18, 16, 16, 1, 4, 1, ZSTD_dfast }, /* level 3 */
+ { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
+ { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
+ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
+ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
+ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
+ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
+ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
+ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
},
{ /* for srcSize <= 128 KB */
/* W, C, H, S, L, T, strat */
- { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* level 0 - not used */
- { 17, 12, 13, 1, 6, 1, ZSTD_fast }, /* level 1 */
- { 17, 13, 16, 1, 5, 1, ZSTD_fast }, /* level 2 */
- { 17, 16, 16, 2, 5, 8, ZSTD_dfast }, /* level 3 */
- { 17, 13, 15, 3, 4, 8, ZSTD_greedy }, /* level 4 */
- { 17, 15, 17, 4, 4, 8, ZSTD_greedy }, /* level 5 */
- { 17, 16, 17, 3, 4, 8, ZSTD_lazy }, /* level 6 */
- { 17, 15, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
+ { 17, 15, 16, 2, 5, 1, ZSTD_dfast }, /* level 3 */
+ { 17, 17, 17, 2, 4, 1, ZSTD_dfast }, /* level 4 */
+ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
+ { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
+ { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
{ 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
{ 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
{ 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
- { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */
- { 17, 17, 17, 8, 4, 8, ZSTD_lazy2 }, /* level 12 */
- { 17, 18, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13.*/
- { 17, 17, 17, 7, 3, 8, ZSTD_btopt }, /* level 14.*/
- { 17, 17, 17, 7, 3, 16, ZSTD_btopt }, /* level 15.*/
- { 17, 18, 17, 7, 3, 32, ZSTD_btopt }, /* level 16.*/
- { 17, 18, 17, 7, 3, 64, ZSTD_btopt }, /* level 17.*/
- { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 18.*/
- { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 19.*/
- { 17, 18, 17, 9, 3,256, ZSTD_btultra }, /* level 20.*/
- { 17, 18, 17, 10, 3,256, ZSTD_btultra }, /* level 21.*/
- { 17, 18, 17, 11, 3,512, ZSTD_btultra }, /* level 22.*/
+ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
+ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
+ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
+ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
+ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
+ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
},
{ /* for srcSize <= 16 KB */
/* W, C, H, S, L, T, strat */
{ 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
- { 14, 14, 14, 1, 6, 1, ZSTD_fast }, /* level 1 */
- { 14, 14, 14, 1, 4, 1, ZSTD_fast }, /* level 2 */
- { 14, 14, 14, 1, 4, 6, ZSTD_dfast }, /* level 3.*/
- { 14, 14, 14, 4, 4, 6, ZSTD_greedy }, /* level 4.*/
- { 14, 14, 14, 3, 4, 6, ZSTD_lazy }, /* level 5.*/
- { 14, 14, 14, 4, 4, 6, ZSTD_lazy2 }, /* level 6 */
- { 14, 14, 14, 5, 4, 6, ZSTD_lazy2 }, /* level 7 */
- { 14, 14, 14, 6, 4, 6, ZSTD_lazy2 }, /* level 8.*/
- { 14, 15, 14, 6, 4, 6, ZSTD_btlazy2 }, /* level 9.*/
- { 14, 15, 14, 3, 3, 6, ZSTD_btopt }, /* level 10.*/
- { 14, 15, 14, 6, 3, 8, ZSTD_btopt }, /* level 11.*/
- { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/
- { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/
- { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/
- { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/
- { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/
- { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/
- { 14, 15, 15, 6, 3,256, ZSTD_btopt }, /* level 18.*/
- { 14, 15, 15, 7, 3,256, ZSTD_btopt }, /* level 19.*/
- { 14, 15, 15, 8, 3,256, ZSTD_btultra }, /* level 20.*/
- { 14, 15, 15, 9, 3,256, ZSTD_btultra }, /* level 21.*/
- { 14, 15, 15, 10, 3,256, ZSTD_btultra }, /* level 22.*/
+ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
+ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
+ { 14, 14, 15, 2, 4, 1, ZSTD_dfast }, /* level 3 */
+ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
+ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
+ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
+ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
+ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
+ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
+ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
+ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
+ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
+ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
+ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
+ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
+ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
},
};
@@ -3431,8 +4272,8 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
{ ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */
- return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
-
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);
+ }
}
/*! ZSTD_getParams() :
diff --git a/thirdparty/zstd/compress/zstd_compress_internal.h b/thirdparty/zstd/compress/zstd_compress_internal.h
index 81f12ca6df..29bca59859 100644
--- a/thirdparty/zstd/compress/zstd_compress_internal.h
+++ b/thirdparty/zstd/compress/zstd_compress_internal.h
@@ -27,6 +27,7 @@
extern "C" {
#endif
+
/*-*************************************
* Constants
***************************************/
@@ -37,7 +38,8 @@ extern "C" {
It's not a big deal though : candidate will just be sorted again.
Additionnally, candidate position 1 will be lost.
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
- The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy */
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy
+ Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
/*-*************************************
@@ -53,14 +55,22 @@ typedef struct ZSTD_prefixDict_s {
} ZSTD_prefixDict;
typedef struct {
- U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
+ U32 CTable[HUF_CTABLE_SIZE_U32(255)];
+ HUF_repeat repeatMode;
+} ZSTD_hufCTables_t;
+
+typedef struct {
FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
- HUF_repeat hufCTable_repeatMode;
FSE_repeat offcode_repeatMode;
FSE_repeat matchlength_repeatMode;
FSE_repeat litlength_repeatMode;
+} ZSTD_fseCTables_t;
+
+typedef struct {
+ ZSTD_hufCTables_t huf;
+ ZSTD_fseCTables_t fse;
} ZSTD_entropyCTables_t;
typedef struct {
@@ -76,26 +86,27 @@ typedef struct {
U32 rep[ZSTD_REP_NUM];
} ZSTD_optimal_t;
+typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
typedef struct {
/* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
- U32* litFreq; /* table of literals statistics, of size 256 */
- U32* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
- U32* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
- U32* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
- ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
- ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+ unsigned* litFreq; /* table of literals statistics, of size 256 */
+ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
+ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
+ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
+ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
+ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
U32 litSum; /* nb of literals */
U32 litLengthSum; /* nb of litLength codes */
U32 matchLengthSum; /* nb of matchLength codes */
U32 offCodeSum; /* nb of offset codes */
- /* begin updated by ZSTD_setLog2Prices */
- U32 log2litSum; /* pow2 to compare log2(litfreq) to */
- U32 log2litLengthSum; /* pow2 to compare log2(llfreq) to */
- U32 log2matchLengthSum; /* pow2 to compare log2(mlfreq) to */
- U32 log2offCodeSum; /* pow2 to compare log2(offreq) to */
- /* end : updated by ZSTD_setLog2Prices */
- U32 staticPrices; /* prices follow a pre-defined cost structure, statistics are irrelevant */
+ U32 litSumBasePrice; /* to compare to log2(litfreq) */
+ U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
+ U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
+ U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
+ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
+ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
} optState_t;
typedef struct {
@@ -111,17 +122,20 @@ typedef struct {
U32 lowLimit; /* below that point, no more data */
} ZSTD_window_t;
-typedef struct {
- ZSTD_window_t window; /* State for window round buffer management */
- U32 loadedDictEnd; /* index of end of dictionary */
- U32 nextToUpdate; /* index from which to continue table update */
- U32 nextToUpdate3; /* index from which to continue table update */
- U32 hashLog3; /* dispatch table : larger == faster, more memory */
+typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+struct ZSTD_matchState_t {
+ ZSTD_window_t window; /* State for window round buffer management */
+ U32 loadedDictEnd; /* index of end of dictionary */
+ U32 nextToUpdate; /* index from which to continue table update */
+ U32 nextToUpdate3; /* index from which to continue table update */
+ U32 hashLog3; /* dispatch table : larger == faster, more memory */
U32* hashTable;
U32* hashTable3;
U32* chainTable;
optState_t opt; /* optimal parser state */
-} ZSTD_matchState_t;
+ const ZSTD_matchState_t * dictMatchState;
+ ZSTD_compressionParameters cParams;
+};
typedef struct {
ZSTD_compressedBlockState_t* prevCBlock;
@@ -147,7 +161,7 @@ typedef struct {
U32 hashLog; /* Log size of hashTable */
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
U32 minMatchLength; /* Minimum match length */
- U32 hashEveryLog; /* Log number of entries to skip */
+ U32 hashRateLog; /* Log number of entries to skip */
U32 windowLog; /* Window log for the LDM */
} ldmParams_t;
@@ -161,7 +175,7 @@ typedef struct {
rawSeq* seq; /* The start of the sequences */
size_t pos; /* The position where reading stopped. <= size. */
size_t size; /* The number of sequences. <= capacity. */
- size_t capacity; /* The capacity of the `seq` pointer */
+ size_t capacity; /* The capacity starting from `seq` pointer */
} rawSeqStore_t;
struct ZSTD_CCtx_params_s {
@@ -170,14 +184,16 @@ struct ZSTD_CCtx_params_s {
ZSTD_frameParameters fParams;
int compressionLevel;
- int disableLiteralCompression;
int forceWindow; /* force back-references to respect limit of
* 1<<wLog, even for dictionary */
+ ZSTD_dictAttachPref_e attachDictPref;
+
/* Multithreading: used to pass parameters to mtctx */
- unsigned nbWorkers;
- unsigned jobSize;
- unsigned overlapSizeLog;
+ int nbWorkers;
+ size_t jobSize;
+ int overlapLog;
+ int rsyncable;
/* Long distance matching parameters */
ldmParams_t ldmParams;
@@ -193,6 +209,8 @@ struct ZSTD_CCtx_s {
ZSTD_CCtx_params requestedParams;
ZSTD_CCtx_params appliedParams;
U32 dictID;
+
+ int workSpaceOversizedDuration;
void* workSpace;
size_t workSpaceSize;
size_t blockSize;
@@ -235,11 +253,15 @@ struct ZSTD_CCtx_s {
#endif
};
+typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+
+typedef enum { ZSTD_noDict = 0, ZSTD_extDict = 1, ZSTD_dictMatchState = 2 } ZSTD_dictMode_e;
+
typedef size_t (*ZSTD_blockCompressor) (
ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
+ void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
@@ -280,16 +302,18 @@ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
*/
MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
{
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
static const BYTE* g_start = NULL;
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
- DEBUGLOG(6, "Cpos%7u :%3u literals, match%3u bytes at dist.code%7u",
+ DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
}
#endif
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
/* copy Literals */
- assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
+ assert(seqStorePtr->maxNbLit <= 128 KB);
+ assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
seqStorePtr->lit += litLength;
@@ -420,6 +444,11 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
size_t const matchLength = ZSTD_count(ip, match, vEnd);
if (match + matchLength != mEnd) return matchLength;
+ DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
+ DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
+ DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
}
@@ -464,6 +493,64 @@ MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
}
}
+/** ZSTD_ipow() :
+ * Return base^exponent.
+ */
+static U64 ZSTD_ipow(U64 base, U64 exponent)
+{
+ U64 power = 1;
+ while (exponent) {
+ if (exponent & 1) power *= base;
+ exponent >>= 1;
+ base *= base;
+ }
+ return power;
+}
+
+#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
+
+/** ZSTD_rollingHash_append() :
+ * Add the buffer to the hash value.
+ */
+static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
+{
+ BYTE const* istart = (BYTE const*)buf;
+ size_t pos;
+ for (pos = 0; pos < size; ++pos) {
+ hash *= prime8bytes;
+ hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ }
+ return hash;
+}
+
+/** ZSTD_rollingHash_compute() :
+ * Compute the rolling hash value of the buffer.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
+{
+ return ZSTD_rollingHash_append(0, buf, size);
+}
+
+/** ZSTD_rollingHash_primePower() :
+ * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
+ * over a window of length bytes.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
+{
+ return ZSTD_ipow(prime8bytes, length - 1);
+}
+
+/** ZSTD_rollingHash_rotate() :
+ * Rotate the rolling hash by one byte.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
+{
+ hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
+ hash *= prime8bytes;
+ hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ return hash;
+}
+
/*-*************************************
* Round buffer management
***************************************/
@@ -497,6 +584,20 @@ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
}
/**
+ * ZSTD_matchState_dictMode():
+ * Inspects the provided matchState and figures out what dictMode should be
+ * passed to the compressor.
+ */
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+{
+ return ZSTD_window_hasExtDict(ms->window) ?
+ ZSTD_extDict :
+ ms->dictMatchState != NULL ?
+ ZSTD_dictMatchState :
+ ZSTD_noDict;
+}
+
+/**
* ZSTD_window_needOverflowCorrection():
* Returns non-zero if the indices are getting too large and need overflow
* protection.
@@ -563,31 +664,44 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
* ZSTD_window_enforceMaxDist():
* Updates lowLimit so that:
* (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ *
* This allows a simple check that index >= lowLimit to see if index is valid.
* This must be called before a block compression call, with srcEnd as the block
* source end.
+ *
* If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
* This is because dictionaries are allowed to be referenced as long as the last
* byte of the dictionary is in the window, but once they are out of range,
* they cannot be referenced. If loadedDictEndPtr is NULL, we use
* loadedDictEnd == 0.
+ *
+ * In normal dict mode, the dict is between lowLimit and dictLimit. In
+ * dictMatchState mode, lowLimit and dictLimit are the same, and the dictionary
+ * is below them. forceWindow and dictMatchState are therefore incompatible.
*/
-MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
- void const* srcEnd, U32 maxDist,
- U32* loadedDictEndPtr)
+MEM_STATIC void
+ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
+ void const* srcEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
{
- U32 const current = (U32)((BYTE const*)srcEnd - window->base);
- U32 loadedDictEnd = loadedDictEndPtr != NULL ? *loadedDictEndPtr : 0;
- if (current > maxDist + loadedDictEnd) {
- U32 const newLowLimit = current - maxDist;
+ U32 const blockEndIdx = (U32)((BYTE const*)srcEnd - window->base);
+ U32 loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+ DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist);
+ if (blockEndIdx > maxDist + loadedDictEnd) {
+ U32 const newLowLimit = blockEndIdx - maxDist;
if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
if (window->dictLimit < window->lowLimit) {
- DEBUGLOG(5, "Update dictLimit from %u to %u", window->dictLimit,
- window->lowLimit);
+ DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
+ (unsigned)window->dictLimit, (unsigned)window->lowLimit);
window->dictLimit = window->lowLimit;
}
if (loadedDictEndPtr)
*loadedDictEndPtr = 0;
+ if (dictMatchStatePtr)
+ *dictMatchStatePtr = NULL;
}
}
@@ -603,12 +717,12 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
{
BYTE const* const ip = (BYTE const*)src;
U32 contiguous = 1;
+ DEBUGLOG(5, "ZSTD_window_update");
/* Check if blocks follow each other */
if (src != window->nextSrc) {
/* not contiguous */
size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
- DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u",
- window->dictLimit);
+ DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
window->lowLimit = window->dictLimit;
assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
window->dictLimit = (U32)distanceFromBase;
@@ -625,10 +739,44 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
window->lowLimit = lowLimitMax;
+ DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
}
return contiguous;
}
+
+/* debug functions */
+#if (DEBUGLEVEL>=2)
+
+MEM_STATIC double ZSTD_fWeight(U32 rawStat)
+{
+ U32 const fp_accuracy = 8;
+ U32 const fp_multiplier = (1 << fp_accuracy);
+ U32 const newStat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(newStat);
+ U32 const BWeight = hb * fp_multiplier;
+ U32 const FWeight = (newStat << fp_accuracy) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + fp_accuracy < 31);
+ return (double)weight / fp_multiplier;
+}
+
+/* display a table content,
+ * listing each element, its frequency, and its predicted bit cost */
+MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+{
+ unsigned u, sum;
+ for (u=0, sum=0; u<=max; u++) sum += table[u];
+ DEBUGLOG(2, "total nb elts: %u", sum);
+ for (u=0; u<=max; u++) {
+ DEBUGLOG(2, "%2u: %5u (%.2f)",
+ u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
+ }
+}
+
+#endif
+
+
#if defined (__cplusplus)
}
#endif
@@ -640,7 +788,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
* ============================================================== */
/* ZSTD_getCParamsFromCCtxParams() :
- * cParams are built depending on compressionLevel, src size hints,
+ * cParams are built depending on compressionLevel, src size hints,
* LDM and manually set compression parameters.
*/
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
@@ -656,6 +804,8 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
+void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+
/*! ZSTD_compressStream_generic() :
* Private use only. To be called from zstdmt_compress.c in single-thread mode. */
size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
@@ -672,6 +822,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
unsigned long long pledgedSrcSize);
diff --git a/thirdparty/zstd/compress/zstd_double_fast.c b/thirdparty/zstd/compress/zstd_double_fast.c
index 86e6b39621..47faf6d641 100644
--- a/thirdparty/zstd/compress/zstd_double_fast.c
+++ b/thirdparty/zstd/compress/zstd_double_fast.c
@@ -13,12 +13,12 @@
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
- ZSTD_compressionParameters const* cParams,
- void const* end)
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashLarge = ms->hashTable;
U32 const hBitsL = cParams->hashLog;
- U32 const mls = cParams->searchLength;
+ U32 const mls = cParams->minMatch;
U32* const hashSmall = ms->chainTable;
U32 const hBitsS = cParams->chainLog;
const BYTE* const base = ms->window.base;
@@ -40,6 +40,9 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
hashSmall[smHash] = current + i;
if (i == 0 || hashLarge[lgHash] == 0)
hashLarge[lgHash] = current + i;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ if (dtlm == ZSTD_dtlm_fast)
+ break;
}
}
}
@@ -48,9 +51,10 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_doubleFast_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
- U32 const mls /* template */)
+ void const* src, size_t srcSize,
+ U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
const U32 hBitsL = cParams->hashLog;
U32* const hashSmall = ms->chainTable;
@@ -59,70 +63,188 @@ size_t ZSTD_compressBlock_doubleFast_generic(
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
- const U32 lowestIndex = ms->window.dictLimit;
- const BYTE* const lowest = base + lowestIndex;
+ const U32 prefixLowestIndex = ms->window.dictLimit;
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
U32 offsetSaved = 0;
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams =
+ dictMode == ZSTD_dictMatchState ?
+ &dms->cParams : NULL;
+ const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ?
+ dms->hashTable : NULL;
+ const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
+ dms->chainTable : NULL;
+ const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
+ dms->window.dictLimit : 0;
+ const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
+ dms->window.base : NULL;
+ const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
+ dictBase + dictStartIndex : NULL;
+ const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
+ dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ?
+ dictCParams->hashLog : hBitsL;
+ const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ?
+ dictCParams->chainLog : hBitsS;
+ const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictStart);
+
+ assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+
/* init */
- ip += (ip==lowest);
- { U32 const maxRep = (U32)(ip-lowest);
+ ip += (dictAndPrefixLength == 0);
+ if (dictMode == ZSTD_noDict) {
+ U32 const maxRep = (U32)(ip - prefixLowest);
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
+ if (dictMode == ZSTD_dictMatchState) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
/* Main Search Loop */
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
size_t mLength;
+ U32 offset;
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+ size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+ size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
U32 const current = (U32)(ip-base);
U32 const matchIndexL = hashLong[h2];
- U32 const matchIndexS = hashSmall[h];
+ U32 matchIndexS = hashSmall[h];
const BYTE* matchLong = base + matchIndexL;
const BYTE* match = base + matchIndexS;
+ const U32 repIndex = current + 1 - offset_1;
+ const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
hashLong[h2] = hashSmall[h] = current; /* update hash tables */
- assert(offset_1 <= current); /* supposed guaranteed by construction */
- if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
- /* favor repcode */
+ /* check dictMatchState repcode */
+ if (dictMode == ZSTD_dictMatchState
+ && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+ goto _match_stored;
+ }
+
+ /* check noDict repcode */
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
- } else {
- U32 offset;
- if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
+ goto _match_stored;
+ }
+
+ if (matchIndexL > prefixLowestIndex) {
+ /* check prefix long match */
+ if (MEM_read64(matchLong) == MEM_read64(ip)) {
mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
offset = (U32)(ip-matchLong);
- while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
- } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
- size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
- U32 const matchIndexL3 = hashLong[hl3];
- const BYTE* matchL3 = base + matchIndexL3;
- hashLong[hl3] = current + 1;
- if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dictMatchState long match */
+ U32 const dictMatchIndexL = dictHashLong[dictHL];
+ const BYTE* dictMatchL = dictBase + dictMatchIndexL;
+ assert(dictMatchL < dictEnd);
+
+ if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
+ mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
+ offset = (U32)(current - dictMatchIndexL - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ }
+
+ if (matchIndexS > prefixLowestIndex) {
+ /* check prefix short match */
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dictMatchState short match */
+ U32 const dictMatchIndexS = dictHashSmall[dictHS];
+ match = dictBase + dictMatchIndexS;
+ matchIndexS = dictMatchIndexS + dictIndexDelta;
+
+ if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ }
+
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+ continue;
+
+_search_next_long:
+
+ {
+ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+ U32 const matchIndexL3 = hashLong[hl3];
+ const BYTE* matchL3 = base + matchIndexL3;
+ hashLong[hl3] = current + 1;
+
+ /* check prefix long +1 match */
+ if (matchIndexL3 > prefixLowestIndex) {
+ if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
ip++;
offset = (U32)(ip-matchL3);
- while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
- } else {
- mLength = ZSTD_count(ip+4, match+4, iend) + 4;
- offset = (U32)(ip-match);
- while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* check dict long +1 match */
+ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+ const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
+ assert(dictMatchL3 < dictEnd);
+ if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
+ ip++;
+ offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
+ goto _match_found;
}
- } else {
- ip += ((ip-anchor) >> kSearchStrength) + 1;
- continue;
}
+ }
- offset_2 = offset_1;
- offset_1 = offset;
-
- ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ /* if no long +1 match, explore the short match we found */
+ if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
+ offset = (U32)(current - matchIndexS);
+ while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ } else {
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ offset = (U32)(ip - match);
+ while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
}
+ /* fall-through */
+
+_match_found:
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+_match_stored:
/* match found */
ip += mLength;
anchor = ip;
@@ -135,19 +257,44 @@ size_t ZSTD_compressBlock_doubleFast_generic(
hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
- while ( (ip <= ilimit)
- && ( (offset_2>0)
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
- ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } } }
+ if (dictMode == ZSTD_dictMatchState) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
+ && repIndex2 < prefixLowestIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } } }
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
@@ -160,102 +307,126 @@ size_t ZSTD_compressBlock_doubleFast_generic(
size_t ZSTD_compressBlock_doubleFast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
+ }
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
{
- const U32 mls = cParams->searchLength;
+ const U32 mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
case 5 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
case 6 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
case 7 :
- return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+ return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
}
}
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
+ void const* src, size_t srcSize,
U32 const mls /* template */)
{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
U32 const hBitsL = cParams->hashLog;
U32* const hashSmall = ms->chainTable;
U32 const hBitsS = cParams->chainLog;
- const BYTE* const base = ms->window.base;
- const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
- const U32 lowestIndex = ms->window.lowLimit;
- const BYTE* const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const lowPrefixPtr = base + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const U32 dictStartIndex = ms->window.lowLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
U32 offset_1=rep[0], offset_2=rep[1];
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
+
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
const U32 matchIndex = hashSmall[hSmall];
- const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
const BYTE* match = matchBase + matchIndex;
const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
const U32 matchLongIndex = hashLong[hLong];
- const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+ const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
const BYTE* matchLong = matchLongBase + matchLongIndex;
const U32 current = (U32)(ip-base);
const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
- const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* repMatch = repBase + repIndex;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
size_t mLength;
hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
- if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+ & (repIndex > dictStartIndex))
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
} else {
- if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
- const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+ if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+ const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
U32 offset;
- mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
offset = current - matchLongIndex;
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+ } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
U32 const matchIndex3 = hashLong[h3];
- const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+ const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
const BYTE* match3 = match3Base + matchIndex3;
U32 offset;
hashLong[h3] = current + 1;
- if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
- const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
+ if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
ip++;
offset = current+1 - matchIndex3;
while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
} else {
- const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
- mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
offset = current - matchIndex;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
}
@@ -282,12 +453,13 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
- const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
- U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+ & (repIndex2 > dictStartIndex))
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
@@ -309,19 +481,19 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
size_t ZSTD_compressBlock_doubleFast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- U32 const mls = cParams->searchLength;
+ U32 const mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
case 5 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
case 6 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
case 7 :
- return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+ return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
}
}
diff --git a/thirdparty/zstd/compress/zstd_double_fast.h b/thirdparty/zstd/compress/zstd_double_fast.h
index 6d80b2774c..4fa31acfc0 100644
--- a/thirdparty/zstd/compress/zstd_double_fast.h
+++ b/thirdparty/zstd/compress/zstd_double_fast.h
@@ -19,14 +19,16 @@ extern "C" {
#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
- ZSTD_compressionParameters const* cParams,
- void const* end);
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
size_t ZSTD_compressBlock_doubleFast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
#if defined (__cplusplus)
diff --git a/thirdparty/zstd/compress/zstd_fast.c b/thirdparty/zstd/compress/zstd_fast.c
index df4d28b340..40ba0f73e6 100644
--- a/thirdparty/zstd/compress/zstd_fast.c
+++ b/thirdparty/zstd/compress/zstd_fast.c
@@ -13,12 +13,12 @@
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
- ZSTD_compressionParameters const* cParams,
- void const* end)
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hBits = cParams->hashLog;
- U32 const mls = cParams->searchLength;
+ U32 const mls = cParams->minMatch;
const BYTE* const base = ms->window.base;
const BYTE* ip = base + ms->nextToUpdate;
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
@@ -27,41 +27,83 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
/* Always insert every fastHashFillStep position into the hash table.
* Insert the other positions if their hash entry is empty.
*/
- for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
U32 const current = (U32)(ip - base);
- U32 i;
- for (i = 0; i < fastHashFillStep; ++i) {
- size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls);
- if (i == 0 || hashTable[hash] == 0)
- hashTable[hash] = current + i;
- }
- }
+ size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
+ hashTable[hash0] = current;
+ if (dtlm == ZSTD_dtlm_fast) continue;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ { U32 p;
+ for (p = 1; p < fastHashFillStep; ++p) {
+ size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
+ if (hashTable[hash] == 0) { /* not yet filled */
+ hashTable[hash] = current + p;
+ } } } }
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_fast_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
- U32 const hlog, U32 const stepSize, U32 const mls)
+ U32 const mls, ZSTD_dictMode_e const dictMode)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
- const U32 lowestIndex = ms->window.dictLimit;
- const BYTE* const lowest = base + lowestIndex;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
U32 offsetSaved = 0;
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams =
+ dictMode == ZSTD_dictMatchState ?
+ &dms->cParams : NULL;
+ const U32* const dictHashTable = dictMode == ZSTD_dictMatchState ?
+ dms->hashTable : NULL;
+ const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
+ dms->window.dictLimit : 0;
+ const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
+ dms->window.base : NULL;
+ const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
+ dictBase + dictStartIndex : NULL;
+ const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
+ dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
+ prefixStartIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
+ const U32 dictHLog = dictMode == ZSTD_dictMatchState ?
+ dictCParams->hashLog : hlog;
+
+ assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+
+ /* otherwise, we would get index underflow when translating a dict index
+ * into a local index */
+ assert(dictMode != ZSTD_dictMatchState
+ || prefixStartIndex >= (U32)(dictEnd - dictBase));
+
/* init */
- ip += (ip==lowest);
- { U32 const maxRep = (U32)(ip-lowest);
+ ip += (dictAndPrefixLength == 0);
+ if (dictMode == ZSTD_noDict) {
+ U32 const maxRep = (U32)(ip - prefixStart);
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
+ if (dictMode == ZSTD_dictMatchState) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
/* Main Search Loop */
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
@@ -70,26 +112,67 @@ size_t ZSTD_compressBlock_fast_generic(
U32 const current = (U32)(ip-base);
U32 const matchIndex = hashTable[h];
const BYTE* match = base + matchIndex;
+ const U32 repIndex = current + 1 - offset_1;
+ const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+ && repIndex < prefixStartIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
hashTable[h] = current; /* update hash table */
- if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+ if ( (dictMode == ZSTD_dictMatchState)
+ && ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+ } else if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
- } else {
- if ( (matchIndex <= lowestIndex)
- || (MEM_read32(match) != MEM_read32(ip)) ) {
+ } else if ( (matchIndex <= prefixStartIndex) ) {
+ if (dictMode == ZSTD_dictMatchState) {
+ size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+ U32 const dictMatchIndex = dictHashTable[dictHash];
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex <= dictStartIndex ||
+ MEM_read32(dictMatch) != MEM_read32(ip)) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a dict match */
+ U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+ while (((ip>anchor) & (dictMatch>dictStart))
+ && (ip[-1] == dictMatch[-1])) {
+ ip--; dictMatch--; mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
+ } else {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
}
+ } else if (MEM_read32(match) != MEM_read32(ip)) {
+ /* it's not a match, and we're not going to check the dictionary */
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a regular match */
+ U32 const offset = (U32)(ip-match);
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
- { U32 const offset = (U32)(ip-match);
- while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- } }
+ while (((ip>anchor) & (match>prefixStart))
+ && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+ }
/* match found */
ip += mLength;
@@ -97,21 +180,46 @@ size_t ZSTD_compressBlock_fast_generic(
if (ip <= ilimit) {
/* Fill Table */
+ assert(base+current+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+
/* check immediate repcode */
- while ( (ip <= ilimit)
- && ( (offset_2>0)
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
- ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } } }
+ if (dictMode == ZSTD_dictMatchState) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } } }
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
@@ -124,42 +232,66 @@ size_t ZSTD_compressBlock_fast_generic(
size_t ZSTD_compressBlock_fast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- U32 const hlog = cParams->hashLog;
- U32 const mls = cParams->searchLength;
- U32 const stepSize = cParams->targetLength;
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32 const mls = cParams->minMatch;
+ assert(ms->dictMatchState == NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
+ case 5 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
+ case 6 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
+ case 7 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
+ }
+}
+
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32 const mls = cParams->minMatch;
+ assert(ms->dictMatchState != NULL);
switch(mls)
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
case 5 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
case 6 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
case 7 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
}
}
static size_t ZSTD_compressBlock_fast_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize,
- U32 const hlog, U32 const stepSize, U32 const mls)
+ void const* src, size_t srcSize, U32 const mls)
{
- U32* hashTable = ms->hashTable;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
- const U32 lowestIndex = ms->window.lowLimit;
- const BYTE* const dictStart = dictBase + lowestIndex;
- const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const lowPrefixPtr = base + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 dictStartIndex = ms->window.lowLimit;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
U32 offset_1=rep[0], offset_2=rep[1];
@@ -167,33 +299,34 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
const size_t h = ZSTD_hashPtr(ip, hlog, mls);
- const U32 matchIndex = hashTable[h];
- const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
- const BYTE* match = matchBase + matchIndex;
- const U32 current = (U32)(ip-base);
- const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
- const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* repMatch = repBase + repIndex;
+ const U32 matchIndex = hashTable[h];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+ const U32 current = (U32)(ip-base);
+ const U32 repIndex = current + 1 - offset_1;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
size_t mLength;
hashTable[h] = current; /* update hash table */
+ assert(offset_1 <= current +1); /* check repIndex */
- if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+ if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
- mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
} else {
- if ( (matchIndex < lowestIndex) ||
+ if ( (matchIndex < dictStartIndex) ||
(MEM_read32(match) != MEM_read32(ip)) ) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
}
- { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
- const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+ { const BYTE* matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
U32 offset;
- mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset = current - matchIndex;
offset_2 = offset_1;
@@ -213,11 +346,11 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
- const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
- if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
@@ -239,21 +372,20 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
size_t ZSTD_compressBlock_fast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- U32 const hlog = cParams->hashLog;
- U32 const mls = cParams->searchLength;
- U32 const stepSize = cParams->targetLength;
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32 const mls = cParams->minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
case 5 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
case 6 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
case 7 :
- return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
}
}
diff --git a/thirdparty/zstd/compress/zstd_fast.h b/thirdparty/zstd/compress/zstd_fast.h
index f0438ad5b4..b74a88c57c 100644
--- a/thirdparty/zstd/compress/zstd_fast.h
+++ b/thirdparty/zstd/compress/zstd_fast.h
@@ -19,14 +19,16 @@ extern "C" {
#include "zstd_compress_internal.h"
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
- ZSTD_compressionParameters const* cParams,
- void const* end);
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
size_t ZSTD_compressBlock_fast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
#if defined (__cplusplus)
}
diff --git a/thirdparty/zstd/compress/zstd_lazy.c b/thirdparty/zstd/compress/zstd_lazy.c
index 9f158123f0..53f998a437 100644
--- a/thirdparty/zstd/compress/zstd_lazy.c
+++ b/thirdparty/zstd/compress/zstd_lazy.c
@@ -16,11 +16,12 @@
* Binary Tree search
***************************************/
-void ZSTD_updateDUBT(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static void
+ZSTD_updateDUBT(ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iend,
U32 mls)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hashLog = cParams->hashLog;
@@ -59,14 +60,16 @@ void ZSTD_updateDUBT(
* sort one already inserted but unsorted position
* assumption : current >= btlow == (current - btmask)
* doesn't fail */
-static void ZSTD_insertDUBT1(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static void
+ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
U32 current, const BYTE* inputEnd,
- U32 nbCompares, U32 btLow, int extDict)
+ U32 nbCompares, U32 btLow,
+ const ZSTD_dictMode_e dictMode)
{
- U32* const bt = ms->chainTable;
- U32 const btLog = cParams->chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
size_t commonLengthSmaller=0, commonLengthLarger=0;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
@@ -78,7 +81,7 @@ static void ZSTD_insertDUBT1(
const BYTE* match;
U32* smallerPtr = bt + 2*(current&btMask);
U32* largerPtr = smallerPtr + 1;
- U32 matchIndex = *smallerPtr;
+ U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
U32 dummy32; /* to be nullified at the end */
U32 const windowLow = ms->window.lowLimit;
@@ -91,11 +94,16 @@ static void ZSTD_insertDUBT1(
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(matchIndex < current);
+ /* note : all candidates are now supposed sorted,
+ * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
+ * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
- if ( (!extDict)
+ if ( (dictMode != ZSTD_extDict)
|| (matchIndex+matchLength >= dictLimit) /* both in current segment*/
|| (current < dictLimit) /* both in extDict */) {
- const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
+ const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit)) ?
+ base : dictBase;
assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
|| (current < dictLimit) );
match = mBase + matchIndex;
@@ -104,7 +112,7 @@ static void ZSTD_insertDUBT1(
match = dictBase + matchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ match = base + matchIndex; /* preparation for next read of match[matchLength] */
}
DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
@@ -138,13 +146,92 @@ static void ZSTD_insertDUBT1(
}
-static size_t ZSTD_DUBT_findBestMatch (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* const ip, const BYTE* const iend,
- size_t* offsetPtr,
- U32 const mls,
- U32 const extDict)
+static size_t
+ZSTD_DUBT_findBetterDictMatch (
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ size_t bestLength,
+ U32 nbCompares,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
+ const U32 * const dictHashTable = dms->hashTable;
+ U32 const hashLog = dmsCParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 dictMatchIndex = dictHashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ U32 const current = (U32)(ip-base);
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
+ U32 const dictLowLimit = dms->window.lowLimit;
+ U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
+
+ U32* const dictBt = dms->chainTable;
+ U32 const btLog = dmsCParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
+
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+
+ (void)dictMode;
+ assert(dictMode == ZSTD_dictMatchState);
+
+ while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
+ U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dictBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dictHighLimit)
+ match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ U32 matchIndex = dictMatchIndex + dictIndexDelta;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+ DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
+ current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+ }
+ if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+
+}
+
+
+static size_t
+ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hashLog = cParams->hashLog;
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
@@ -175,7 +262,7 @@ static size_t ZSTD_DUBT_findBestMatch (
&& (nbCandidates > 1) ) {
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
matchIndex);
- *unsortedMark = previousCandidate;
+ *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
previousCandidate = matchIndex;
matchIndex = *nextCandidate;
nextCandidate = bt + 2*(matchIndex&btMask);
@@ -183,11 +270,13 @@ static size_t ZSTD_DUBT_findBestMatch (
nbCandidates --;
}
+ /* nullify last candidate if it's still unsorted
+ * simplification, detrimental to compression ratio, beneficial for speed */
if ( (matchIndex > unsortLimit)
&& (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
matchIndex);
- *nextCandidate = *unsortedMark = 0; /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
+ *nextCandidate = *unsortedMark = 0;
}
/* batch sort stacked candidates */
@@ -195,21 +284,21 @@ static size_t ZSTD_DUBT_findBestMatch (
while (matchIndex) { /* will end on matchIndex == 0 */
U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
U32 const nextCandidateIdx = *nextCandidateIdxPtr;
- ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
- nbCandidates, unsortLimit, extDict);
+ ZSTD_insertDUBT1(ms, matchIndex, iend,
+ nbCandidates, unsortLimit, dictMode);
matchIndex = nextCandidateIdx;
nbCandidates++;
}
/* find longest match */
- { size_t commonLengthSmaller=0, commonLengthLarger=0;
+ { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
U32* smallerPtr = bt + 2*(current&btMask);
U32* largerPtr = bt + 2*(current&btMask) + 1;
- U32 matchEndIdx = current+8+1;
+ U32 matchEndIdx = current + 8 + 1;
U32 dummy32; /* to be nullified at the end */
size_t bestLength = 0;
@@ -221,7 +310,7 @@ static size_t ZSTD_DUBT_findBestMatch (
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
const BYTE* match;
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+ if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
match = base + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
} else {
@@ -237,6 +326,11 @@ static size_t ZSTD_DUBT_findBestMatch (
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ if (dictMode == ZSTD_dictMatchState) {
+ nbCompares = 0; /* in addition to avoiding checking any
+ * further in this loop, make sure we
+ * skip checking in the dictionary. */
+ }
break; /* drop, to guarantee consistency (miss a little bit of compression) */
}
}
@@ -259,6 +353,13 @@ static size_t ZSTD_DUBT_findBestMatch (
*smallerPtr = *largerPtr = 0;
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ bestLength = ZSTD_DUBT_findBetterDictMatch(
+ ms, ip, iend,
+ offsetPtr, bestLength, nbCompares,
+ mls, dictMode);
+ }
+
assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
if (bestLength >= MINMATCH) {
@@ -272,61 +373,64 @@ static size_t ZSTD_DUBT_findBestMatch (
/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 mls /* template */)
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls /* template */,
+ const ZSTD_dictMode_e dictMode)
{
DEBUGLOG(7, "ZSTD_BtFindBestMatch");
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
- return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
+ ZSTD_updateDUBT(ms, ip, iLimit, mls);
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
}
-static size_t ZSTD_BtFindBestMatch_selectMLS (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* ip, const BYTE* const iLimit,
- size_t* offsetPtr)
+static size_t
+ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
{
- switch(cParams->searchLength)
+ switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
- case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
- case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
case 7 :
- case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
}
}
-/** Tree updater, providing best match */
-static size_t ZSTD_BtFindBestMatch_extDict (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
- const U32 mls)
+static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
{
- DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
- if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
- return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+ case 7 :
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
+ }
}
-static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
+ ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
{
- switch(cParams->searchLength)
+ switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
- case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
- case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
+ case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+ case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
case 7 :
- case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
+ case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
}
}
@@ -335,12 +439,13 @@ static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
/* *********************************
* Hash Chain
***********************************/
-#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
/* Update chains up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
static U32 ZSTD_insertAndFindFirstIndex_internal(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
+ const ZSTD_compressionParameters* const cParams,
const BYTE* ip, U32 const mls)
{
U32* const hashTable = ms->hashTable;
@@ -362,22 +467,21 @@ static U32 ZSTD_insertAndFindFirstIndex_internal(
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
}
-U32 ZSTD_insertAndFindFirstIndex(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* ip)
-{
- return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
}
/* inlining is important to hardwire a hot branch (template emulation) */
FORCE_INLINE_TEMPLATE
size_t ZSTD_HcFindBestMatch_generic (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
- const U32 mls, const U32 extDict)
+ const U32 mls, const ZSTD_dictMode_e dictMode)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const chainTable = ms->chainTable;
const U32 chainSize = (1 << cParams->chainLog);
const U32 chainMask = chainSize-1;
@@ -397,8 +501,9 @@ size_t ZSTD_HcFindBestMatch_generic (
for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
size_t currentMl=0;
- if ((!extDict) || matchIndex >= dictLimit) {
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
if (match[ml] == ip[ml]) /* potentially better */
currentMl = ZSTD_count(ip, match, iLimit);
} else {
@@ -419,38 +524,87 @@ size_t ZSTD_HcFindBestMatch_generic (
matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
}
+ if (dictMode == ZSTD_dictMatchState) {
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32* const dmsChainTable = dms->chainTable;
+ const U32 dmsChainSize = (1 << dms->cParams.chainLog);
+ const U32 dmsChainMask = dmsChainSize - 1;
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+ const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
+
+ matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
+
+ for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= dmsMinChain) break;
+ matchIndex = dmsChainTable[matchIndex & dmsChainMask];
+ }
+ }
+
return ml;
}
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
{
- switch(cParams->searchLength)
+ switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
+ }
+}
+
+
+static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iLimit,
+ size_t* offsetPtr)
+{
+ switch(ms->cParams.minMatch)
+ {
+ default : /* includes case 3 */
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
+ case 7 :
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
}
}
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
- size_t* const offsetPtr)
+ size_t* offsetPtr)
{
- switch(cParams->searchLength)
+ switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
case 7 :
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
}
}
@@ -462,30 +616,55 @@ FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_lazy_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams,
const void* src, size_t srcSize,
- const U32 searchMethod, const U32 depth)
+ const U32 searchMethod, const U32 depth,
+ ZSTD_dictMode_e const dictMode)
{
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
- const BYTE* const base = ms->window.base + ms->window.dictLimit;
+ const BYTE* const base = ms->window.base;
+ const U32 prefixLowestIndex = ms->window.dictLimit;
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
typedef size_t (*searchMax_f)(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
- searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
+ searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
+ (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
+ (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ?
+ dms->window.dictLimit : 0;
+ const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
+ dms->window.base : NULL;
+ const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ?
+ dictBase + dictLowestIndex : NULL;
+ const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
+ dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
+
/* init */
- ip += (ip==base);
+ ip += (dictAndPrefixLength == 0);
ms->nextToUpdate3 = ms->nextToUpdate;
- { U32 const maxRep = (U32)(ip-base);
+ if (dictMode == ZSTD_noDict) {
+ U32 const maxRep = (U32)(ip - prefixLowest);
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
}
+ if (dictMode == ZSTD_dictMatchState) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
/* Match Loop */
while (ip < ilimit) {
@@ -494,15 +673,28 @@ size_t ZSTD_compressBlock_lazy_generic(
const BYTE* start=ip+1;
/* check repCode */
- if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
- /* repcode : we take it */
+ if (dictMode == ZSTD_dictMatchState) {
+ const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
+ const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+ }
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
if (depth==0) goto _storeSequence;
}
/* first search (depth 0) */
- { size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
if (ml2 > matchLength)
matchLength = ml2, start = ip, offset=offsetFound;
}
@@ -516,15 +708,31 @@ size_t ZSTD_compressBlock_lazy_generic(
if (depth>=1)
while (ip<ilimit) {
ip ++;
- if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ if ( (dictMode == ZSTD_noDict)
+ && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
int const gain2 = (int)(mlRep * 3);
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offset = 0, start = ip;
}
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+ if (dictMode == ZSTD_dictMatchState) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -535,15 +743,31 @@ size_t ZSTD_compressBlock_lazy_generic(
/* let's find an even better one */
if ((depth==2) && (ip<ilimit)) {
ip ++;
- if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
- size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
- int const gain2 = (int)(ml2 * 4);
+ if ( (dictMode == ZSTD_noDict)
+ && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 4);
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
- if ((ml2 >= 4) && (gain2 > gain1))
- matchLength = ml2, offset = 0, start = ip;
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
}
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+ if (dictMode == ZSTD_dictMatchState) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offset = 0, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -560,9 +784,17 @@ size_t ZSTD_compressBlock_lazy_generic(
*/
/* catch up */
if (offset) {
- while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > base))
- && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
- { start--; matchLength++; }
+ if (dictMode == ZSTD_noDict) {
+ while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
+ && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
+ { start--; matchLength++; }
+ }
+ if (dictMode == ZSTD_dictMatchState) {
+ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+ const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ }
offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
}
/* store sequence */
@@ -573,16 +805,39 @@ _storeSequence:
}
/* check immediate repcode */
- while ( ((ip <= ilimit) & (offset_2>0))
- && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
- /* store sequence */
- matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
- ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } }
+ if (dictMode == ZSTD_dictMatchState) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex = current2 - offset_2;
+ const BYTE* repMatch = dictMode == ZSTD_dictMatchState
+ && repIndex < prefixLowestIndex ?
+ dictBase - dictIndexDelta + repIndex :
+ base + repIndex;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( ((ip <= ilimit) & (offset_2>0))
+ && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+ /* store sequence */
+ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
/* Save reps for next block */
rep[0] = offset_1 ? offset_1 : savedOffset;
@@ -595,30 +850,58 @@ _storeSequence:
size_t ZSTD_compressBlock_btlazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
}
@@ -626,7 +909,6 @@ FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_lazy_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams,
const void* src, size_t srcSize,
const U32 searchMethod, const U32 depth)
{
@@ -644,9 +926,9 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
const BYTE* const dictStart = dictBase + lowestIndex;
typedef size_t (*searchMax_f)(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
- searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
+ searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
U32 offset_1 = rep[0], offset_2 = rep[1];
@@ -674,8 +956,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
} }
/* first search (depth 0) */
- { size_t offsetFound = 99999999;
- size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
if (ml2 > matchLength)
matchLength = ml2, start = ip, offset=offsetFound;
}
@@ -707,8 +989,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
} }
/* search match, depth 1 */
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -737,8 +1019,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
} }
/* search match, depth 2 */
- { size_t offset2=99999999;
- size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+ { size_t offset2=999999999;
+ size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -794,31 +1076,31 @@ _storeSequence:
size_t ZSTD_compressBlock_greedy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
}
size_t ZSTD_compressBlock_lazy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
}
size_t ZSTD_compressBlock_lazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
}
size_t ZSTD_compressBlock_btlazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);
}
diff --git a/thirdparty/zstd/compress/zstd_lazy.h b/thirdparty/zstd/compress/zstd_lazy.h
index bda064f199..ef85a6df9c 100644
--- a/thirdparty/zstd/compress/zstd_lazy.h
+++ b/thirdparty/zstd/compress/zstd_lazy.h
@@ -17,37 +17,48 @@ extern "C" {
#include "zstd_compress_internal.h"
-U32 ZSTD_insertAndFindFirstIndex(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* ip);
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
size_t ZSTD_compressBlock_btlazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btlazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
#if defined (__cplusplus)
}
diff --git a/thirdparty/zstd/compress/zstd_ldm.c b/thirdparty/zstd/compress/zstd_ldm.c
index bffd8a3dfa..58eb2ffe4d 100644
--- a/thirdparty/zstd/compress/zstd_ldm.c
+++ b/thirdparty/zstd/compress/zstd_ldm.c
@@ -9,6 +9,7 @@
#include "zstd_ldm.h"
+#include "debug.h"
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
@@ -20,7 +21,7 @@
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
ZSTD_compressionParameters const* cParams)
{
- U32 const windowLog = cParams->windowLog;
+ params->windowLog = cParams->windowLog;
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
@@ -33,12 +34,13 @@ void ZSTD_ldm_adjustParameters(ldmParams_t* params,
params->minMatchLength = minMatch;
}
if (params->hashLog == 0) {
- params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
+ params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
}
- if (params->hashEveryLog == 0) {
- params->hashEveryLog =
- windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
+ if (params->hashRateLog == 0) {
+ params->hashRateLog = params->windowLog < params->hashLog
+ ? 0
+ : params->windowLog - params->hashLog;
}
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
}
@@ -117,20 +119,20 @@ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
*
* Gets the small hash, checksum, and tag from the rollingHash.
*
- * If the tag matches (1 << ldmParams.hashEveryLog)-1, then
+ * If the tag matches (1 << ldmParams.hashRateLog)-1, then
* creates an ldmEntry from the offset, and inserts it into the hash table.
*
* hBits is the length of the small hash, which is the most significant hBits
* of rollingHash. The checksum is the next 32 most significant bits, followed
- * by ldmParams.hashEveryLog bits that make up the tag. */
+ * by ldmParams.hashRateLog bits that make up the tag. */
static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
U64 const rollingHash,
U32 const hBits,
U32 const offset,
ldmParams_t const ldmParams)
{
- U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
- U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
+ U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
+ U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
if (tag == tagMask) {
U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
@@ -141,56 +143,6 @@ static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
}
}
-/** ZSTD_ldm_getRollingHash() :
- * Get a 64-bit hash using the first len bytes from buf.
- *
- * Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
- * H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
- *
- * where the constant a is defined to be prime8bytes.
- *
- * The implementation adds an offset to each byte, so
- * H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
-static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
-{
- U64 ret = 0;
- U32 i;
- for (i = 0; i < len; i++) {
- ret *= prime8bytes;
- ret += buf[i] + LDM_HASH_CHAR_OFFSET;
- }
- return ret;
-}
-
-/** ZSTD_ldm_ipow() :
- * Return base^exp. */
-static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
-{
- U64 ret = 1;
- while (exp) {
- if (exp & 1) { ret *= base; }
- exp >>= 1;
- base *= base;
- }
- return ret;
-}
-
-U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
- DEBUGLOG(4, "ZSTD_ldm_getHashPower: mml=%u", minMatchLength);
- assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
- return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
-}
-
-/** ZSTD_ldm_updateHash() :
- * Updates hash by removing toRemove and adding toAdd. */
-static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
-{
- hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
- hash *= prime8bytes;
- hash += toAdd + LDM_HASH_CHAR_OFFSET;
- return hash;
-}
-
/** ZSTD_ldm_countBackwardsMatch() :
* Returns the number of bytes that match backwards before pIn and pMatch.
*
@@ -216,21 +168,18 @@ static size_t ZSTD_ldm_countBackwardsMatch(
* The tables for the other strategies are filled within their
* block compressors. */
static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
- ZSTD_compressionParameters const* cParams,
void const* end)
{
const BYTE* const iend = (const BYTE*)end;
- switch(cParams->strategy)
+ switch(ms->cParams.strategy)
{
case ZSTD_fast:
- ZSTD_fillHashTable(ms, cParams, iend);
- ms->nextToUpdate = (U32)(iend - ms->window.base);
+ ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
break;
case ZSTD_dfast:
- ZSTD_fillDoubleHashTable(ms, cParams, iend);
- ms->nextToUpdate = (U32)(iend - ms->window.base);
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
break;
case ZSTD_greedy:
@@ -239,6 +188,7 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
case ZSTD_btlazy2:
case ZSTD_btopt:
case ZSTD_btultra:
+ case ZSTD_btultra2:
break;
default:
assert(0); /* not possible : not a valid strategy id */
@@ -262,9 +212,9 @@ static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
const BYTE* cur = lastHashed + 1;
while (cur < iend) {
- rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
- cur[ldmParams.minMatchLength-1],
- state->hashPower);
+ rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
+ cur[ldmParams.minMatchLength-1],
+ state->hashPower);
ZSTD_ldm_makeEntryAndInsertByTag(state,
rollingHash, hBits,
(U32)(cur - base), ldmParams);
@@ -298,8 +248,8 @@ static size_t ZSTD_ldm_generateSequences_internal(
U64 const hashPower = ldmState->hashPower;
U32 const hBits = params->hashLog - params->bucketSizeLog;
U32 const ldmBucketSize = 1U << params->bucketSizeLog;
- U32 const hashEveryLog = params->hashEveryLog;
- U32 const ldmTagMask = (1U << params->hashEveryLog) - 1;
+ U32 const hashRateLog = params->hashRateLog;
+ U32 const ldmTagMask = (1U << params->hashRateLog) - 1;
/* Prefix and extDict parameters */
U32 const dictLimit = ldmState->window.dictLimit;
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
@@ -325,16 +275,16 @@ static size_t ZSTD_ldm_generateSequences_internal(
size_t forwardMatchLength = 0, backwardMatchLength = 0;
ldmEntry_t* bestEntry = NULL;
if (ip != istart) {
- rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
- lastHashed[minMatchLength],
- hashPower);
+ rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],
+ lastHashed[minMatchLength],
+ hashPower);
} else {
- rollingHash = ZSTD_ldm_getRollingHash(ip, minMatchLength);
+ rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
}
lastHashed = ip;
/* Do not insert and do not look for a match */
- if (ZSTD_ldm_getTag(rollingHash, hBits, hashEveryLog) != ldmTagMask) {
+ if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {
ip++;
continue;
}
@@ -508,7 +458,7 @@ size_t ZSTD_ldm_generateSequences(
* * Try invalidation after the sequence generation and test the
* the offset against maxDist directly.
*/
- ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL);
+ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL, NULL);
/* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
ldmState, sequences, params, chunkStart, chunkSize);
@@ -591,19 +541,19 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
- int const extDict)
+ void const* src, size_t srcSize)
{
- unsigned const minMatch = cParams->searchLength;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ unsigned const minMatch = cParams->minMatch;
ZSTD_blockCompressor const blockCompressor =
- ZSTD_selectBlockCompressor(cParams->strategy, extDict);
- BYTE const* const base = ms->window.base;
+ ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
/* Input positions */
BYTE const* ip = istart;
+ DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
assert(rawSeqStore->pos <= rawSeqStore->size);
assert(rawSeqStore->size <= rawSeqStore->capacity);
/* Loop through each sequence and apply the block compressor to the lits */
@@ -621,14 +571,13 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
/* Fill tables for block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
- ZSTD_ldm_fillFastTables(ms, cParams, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
/* Run the block compressor */
+ DEBUGLOG(5, "calling block compressor on segment of size %u", sequence.litLength);
{
size_t const newLitLength =
- blockCompressor(ms, seqStore, rep, cParams, ip,
- sequence.litLength);
+ blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
ip += sequence.litLength;
- ms->nextToUpdate = (U32)(ip - base);
/* Update the repcodes */
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
rep[i] = rep[i-1];
@@ -642,12 +591,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
}
/* Fill the tables for the block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
- ZSTD_ldm_fillFastTables(ms, cParams, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
/* Compress the last literals */
- {
- size_t const lastLiterals = blockCompressor(ms, seqStore, rep, cParams,
- ip, iend - ip);
- ms->nextToUpdate = (U32)(iend - base);
- return lastLiterals;
- }
+ return blockCompressor(ms, seqStore, rep, ip, iend - ip);
}
diff --git a/thirdparty/zstd/compress/zstd_ldm.h b/thirdparty/zstd/compress/zstd_ldm.h
index 0c3789ff13..a47846128b 100644
--- a/thirdparty/zstd/compress/zstd_ldm.h
+++ b/thirdparty/zstd/compress/zstd_ldm.h
@@ -21,7 +21,7 @@ extern "C" {
* Long distance matching
***************************************/
-#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
/**
* ZSTD_ldm_generateSequences():
@@ -61,9 +61,7 @@ size_t ZSTD_ldm_generateSequences(
*/
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams,
- void const* src, size_t srcSize,
- int const extDict);
+ void const* src, size_t srcSize);
/**
* ZSTD_ldm_skipSequences():
@@ -88,12 +86,8 @@ size_t ZSTD_ldm_getTableSize(ldmParams_t params);
*/
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
-/** ZSTD_ldm_getTableSize() :
- * Return prime8bytes^(minMatchLength-1) */
-U64 ZSTD_ldm_getHashPower(U32 minMatchLength);
-
/** ZSTD_ldm_adjustParameters() :
- * If the params->hashEveryLog is not set, set it to its default value based on
+ * If the params->hashRateLog is not set, set it to its default value based on
* windowLog and params->hashLog.
*
* Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
diff --git a/thirdparty/zstd/compress/zstd_opt.c b/thirdparty/zstd/compress/zstd_opt.c
index f63f0c5852..44de6e97fd 100644
--- a/thirdparty/zstd/compress/zstd_opt.c
+++ b/thirdparty/zstd/compress/zstd_opt.c
@@ -9,139 +9,245 @@
*/
#include "zstd_compress_internal.h"
+#include "hist.h"
#include "zstd_opt.h"
-#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats. Also used for matchSum (?) */
+#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */
#define ZSTD_MAX_PRICE (1<<30)
+#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+
/*-*************************************
* Price functions for optimal parser
***************************************/
-static void ZSTD_setLog2Prices(optState_t* optPtr)
+
+#if 0 /* approximation at bit level */
+# define BITCOST_ACCURACY 0
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat))
+#elif 0 /* fractional bit accuracy */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+#else /* opt==approx, ultra==accurate */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+#endif
+
+MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
{
- optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
- optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
- optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
- optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
+ return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
}
+MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
+{
+ U32 const stat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(stat);
+ U32 const BWeight = hb * BITCOST_MULTIPLIER;
+ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + BITCOST_ACCURACY < 31);
+ return weight;
+}
-static void ZSTD_rescaleFreqs(optState_t* const optPtr,
- const BYTE* const src, size_t const srcSize)
+#if (DEBUGLEVEL>=2)
+/* debugging function,
+ * @return price in bytes as fractional value
+ * for debug messages only */
+MEM_STATIC double ZSTD_fCost(U32 price)
{
- optPtr->staticPrices = 0;
+ return (double)price / (BITCOST_MULTIPLIER*8);
+}
+#endif
- if (optPtr->litLengthSum == 0) { /* first init */
- unsigned u;
- if (srcSize <= 1024) optPtr->staticPrices = 1;
+static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
+{
+ optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+ optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
+ optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
+ optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
+}
- assert(optPtr->litFreq!=NULL);
- for (u=0; u<=MaxLit; u++)
- optPtr->litFreq[u] = 0;
- for (u=0; u<srcSize; u++)
- optPtr->litFreq[src[u]]++;
- optPtr->litSum = 0;
- for (u=0; u<=MaxLit; u++) {
- optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> ZSTD_FREQ_DIV);
- optPtr->litSum += optPtr->litFreq[u];
- }
- for (u=0; u<=MaxLL; u++)
- optPtr->litLengthFreq[u] = 1;
- optPtr->litLengthSum = MaxLL+1;
- for (u=0; u<=MaxML; u++)
- optPtr->matchLengthFreq[u] = 1;
- optPtr->matchLengthSum = MaxML+1;
- for (u=0; u<=MaxOff; u++)
- optPtr->offCodeFreq[u] = 1;
- optPtr->offCodeSum = (MaxOff+1);
-
- } else {
- unsigned u;
-
- optPtr->litSum = 0;
- for (u=0; u<=MaxLit; u++) {
- optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> (ZSTD_FREQ_DIV+1));
- optPtr->litSum += optPtr->litFreq[u];
- }
- optPtr->litLengthSum = 0;
- for (u=0; u<=MaxLL; u++) {
- optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
- optPtr->litLengthSum += optPtr->litLengthFreq[u];
- }
- optPtr->matchLengthSum = 0;
- for (u=0; u<=MaxML; u++) {
- optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
- optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
+/* ZSTD_downscaleStat() :
+ * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
+ * return the resulting sum of elements */
+static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
+{
+ U32 s, sum=0;
+ DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
+ assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* ZSTD_rescaleFreqs() :
+ * if first block (detected by optPtr->litLengthSum == 0) : init statistics
+ * take hints from dictionary if there is one
+ * or init from zero, using src for literals stats, or flat 1 for match symbols
+ * otherwise downscale existing stats, to be used as seed for next block.
+ */
+static void
+ZSTD_rescaleFreqs(optState_t* const optPtr,
+ const BYTE* const src, size_t const srcSize,
+ int const optLevel)
+{
+ DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
+ optPtr->priceType = zop_dynamic;
+
+ if (optPtr->litLengthSum == 0) { /* first block : init */
+ if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
+ DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+ optPtr->priceType = zop_predef;
}
- optPtr->offCodeSum = 0;
- for (u=0; u<=MaxOff; u++) {
- optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
- optPtr->offCodeSum += optPtr->offCodeFreq[u];
+
+ assert(optPtr->symbolCosts != NULL);
+ if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
+ /* huffman table presumed generated by dictionary */
+ optPtr->priceType = zop_dynamic;
+
+ assert(optPtr->litFreq != NULL);
+ optPtr->litSum = 0;
+ { unsigned lit;
+ for (lit=0; lit<=MaxLit; lit++) {
+ U32 const scaleLog = 11; /* scale to 2K */
+ U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
+ assert(bitCost <= scaleLog);
+ optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litSum += optPtr->litFreq[lit];
+ } }
+
+ { unsigned ll;
+ FSE_CState_t llstate;
+ FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
+ optPtr->litLengthSum = 0;
+ for (ll=0; ll<=MaxLL; ll++) {
+ U32 const scaleLog = 10; /* scale to 1K */
+ U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
+ assert(bitCost < scaleLog);
+ optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litLengthSum += optPtr->litLengthFreq[ll];
+ } }
+
+ { unsigned ml;
+ FSE_CState_t mlstate;
+ FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
+ optPtr->matchLengthSum = 0;
+ for (ml=0; ml<=MaxML; ml++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
+ assert(bitCost < scaleLog);
+ optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
+ } }
+
+ { unsigned of;
+ FSE_CState_t ofstate;
+ FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
+ optPtr->offCodeSum = 0;
+ for (of=0; of<=MaxOff; of++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
+ assert(bitCost < scaleLog);
+ optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->offCodeSum += optPtr->offCodeFreq[of];
+ } }
+
+ } else { /* not a dictionary */
+
+ assert(optPtr->litFreq != NULL);
+ { unsigned lit = MaxLit;
+ HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
+ }
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+
+ { unsigned ll;
+ for (ll=0; ll<=MaxLL; ll++)
+ optPtr->litLengthFreq[ll] = 1;
+ }
+ optPtr->litLengthSum = MaxLL+1;
+
+ { unsigned ml;
+ for (ml=0; ml<=MaxML; ml++)
+ optPtr->matchLengthFreq[ml] = 1;
+ }
+ optPtr->matchLengthSum = MaxML+1;
+
+ { unsigned of;
+ for (of=0; of<=MaxOff; of++)
+ optPtr->offCodeFreq[of] = 1;
+ }
+ optPtr->offCodeSum = MaxOff+1;
+
}
+
+ } else { /* new block : re-use previous statistics, scaled down */
+
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+ optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+ optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+ optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
}
- ZSTD_setLog2Prices(optPtr);
+ ZSTD_setBasePrices(optPtr, optLevel);
}
-
/* ZSTD_rawLiteralsCost() :
- * cost of literals (only) in given segment (which length can be null)
- * does not include cost of literalLength symbol */
+ * price of literals (only) in specified segment (which length can be 0).
+ * does not include price of literalLength symbol */
static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
- const optState_t* const optPtr)
+ const optState_t* const optPtr,
+ int optLevel)
{
- if (optPtr->staticPrices) return (litLength*6); /* 6 bit per literal - no statistic used */
if (litLength == 0) return 0;
-
- /* literals */
- { U32 u;
- U32 cost = litLength * optPtr->log2litSum;
- for (u=0; u < litLength; u++)
- cost -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
- return cost;
+ if (optPtr->priceType == zop_predef)
+ return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
+
+ /* dynamic statistics */
+ { U32 price = litLength * optPtr->litSumBasePrice;
+ U32 u;
+ for (u=0; u < litLength; u++) {
+ assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
+ price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ }
+ return price;
}
}
/* ZSTD_litLengthPrice() :
* cost of literalLength symbol */
-static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr)
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
{
- if (optPtr->staticPrices) return ZSTD_highbit32((U32)litLength+1);
+ if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
- /* literal Length */
+ /* dynamic statistics */
{ U32 const llCode = ZSTD_LLcode(litLength);
- U32 const price = LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
- return price;
+ return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ + optPtr->litLengthSumBasePrice
+ - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
}
}
-/* ZSTD_litLengthPrice() :
- * cost of the literal part of a sequence,
- * including literals themselves, and literalLength symbol */
-static U32 ZSTD_fullLiteralsCost(const BYTE* const literals, U32 const litLength,
- const optState_t* const optPtr)
-{
- return ZSTD_rawLiteralsCost(literals, litLength, optPtr)
- + ZSTD_litLengthPrice(litLength, optPtr);
-}
-
/* ZSTD_litLengthContribution() :
* @return ( cost(litlength) - cost(0) )
* this value can then be added to rawLiteralsCost()
* to provide a cost which is directly comparable to a match ending at same position */
-static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr)
+static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel)
{
- if (optPtr->staticPrices) return ZSTD_highbit32(litLength+1);
+ if (optPtr->priceType >= zop_predef) return WEIGHT(litLength, optLevel);
- /* literal Length */
+ /* dynamic statistics */
{ U32 const llCode = ZSTD_LLcode(litLength);
- int const contribution = LL_bits[llCode]
- + ZSTD_highbit32(optPtr->litLengthFreq[0]+1)
- - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
+ int const contribution = (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ + WEIGHT(optPtr->litLengthFreq[0], optLevel) /* note: log2litLengthSum cancel out */
+ - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
#if 1
return contribution;
#else
@@ -155,10 +261,11 @@ static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* con
* which can be compared to the ending cost of a match
* should a new match start at this position */
static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
- const optState_t* const optPtr)
+ const optState_t* const optPtr,
+ int optLevel)
{
- int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr)
- + ZSTD_litLengthContribution(litLength, optPtr);
+ int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel)
+ + ZSTD_litLengthContribution(litLength, optPtr, optLevel);
return contribution;
}
@@ -166,31 +273,38 @@ static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLe
* Provides the cost of the match part (offset + matchLength) of a sequence
* Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
* optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
-FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice(
- U32 const offset, U32 const matchLength,
- const optState_t* const optPtr,
- int const optLevel)
+FORCE_INLINE_TEMPLATE U32
+ZSTD_getMatchPrice(U32 const offset,
+ U32 const matchLength,
+ const optState_t* const optPtr,
+ int const optLevel)
{
U32 price;
U32 const offCode = ZSTD_highbit32(offset+1);
U32 const mlBase = matchLength - MINMATCH;
assert(matchLength >= MINMATCH);
- if (optPtr->staticPrices) /* fixed scheme, do not use statistics */
- return ZSTD_highbit32((U32)mlBase+1) + 16 + offCode;
+ if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
+ return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
- price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
- if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2; /* handicap for long distance offsets, favor decompression speed */
+ /* dynamic statistics */
+ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
+ if ((optLevel<2) /*static*/ && offCode >= 20)
+ price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
/* match Length */
{ U32 const mlCode = ZSTD_MLcode(mlBase);
- price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
+ price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
}
+ price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
+
DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
return price;
}
+/* ZSTD_updateStats() :
+ * assumption : literals + litLengtn <= iend */
static void ZSTD_updateStats(optState_t* const optPtr,
U32 litLength, const BYTE* literals,
U32 offsetCode, U32 matchLength)
@@ -269,10 +383,11 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE*
* ip : assumed <= iend-8 .
* @return : nb of positions added */
static U32 ZSTD_insertBt1(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
- U32 const mls, U32 const extDict)
+ U32 const mls, const int extDict)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hashLog = cParams->hashLog;
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
@@ -308,7 +423,8 @@ static U32 ZSTD_insertBt1(
assert(ip <= iend-8); /* required for h calculation */
hashTable[h] = current; /* Update Hash Table */
- while (nbCompares-- && (matchIndex > windowLow)) {
+ assert(windowLow > 0);
+ while (nbCompares-- && (matchIndex >= windowLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(matchIndex < current);
@@ -334,8 +450,8 @@ static U32 ZSTD_insertBt1(
}
#endif
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
- assert(matchIndex+matchLength >= dictLimit); /* might be wrong if extDict is incorrectly set to 0 */
+ if (!extDict || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
match = base + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
} else {
@@ -379,35 +495,36 @@ static U32 ZSTD_insertBt1(
FORCE_INLINE_TEMPLATE
void ZSTD_updateTree_internal(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+ ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
- const U32 mls, const U32 extDict)
+ const U32 mls, const ZSTD_dictMode_e dictMode)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
- DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (extDict:%u)",
- idx, target, extDict);
+ DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ idx, target, dictMode);
while(idx < target)
- idx += ZSTD_insertBt1(ms, cParams, base+idx, iend, mls, extDict);
+ idx += ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
ms->nextToUpdate = target;
}
-void ZSTD_updateTree(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* ip, const BYTE* iend)
-{
- ZSTD_updateTree_internal(ms, cParams, ip, iend, cParams->searchLength, 0 /*extDict*/);
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+ ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
}
FORCE_INLINE_TEMPLATE
U32 ZSTD_insertBtAndGetAllMatches (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* const ip, const BYTE* const iLimit, int const extDict,
- U32 rep[ZSTD_REP_NUM], U32 const ll0,
- ZSTD_match_t* matches, const U32 lengthToBeat, U32 const mls /* template */)
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
+ U32 rep[ZSTD_REP_NUM],
+ U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+ ZSTD_match_t* matches,
+ const U32 lengthToBeat,
+ U32 const mls /* template */)
{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
const BYTE* const base = ms->window.base;
U32 const current = (U32)(ip-base);
@@ -426,6 +543,7 @@ U32 ZSTD_insertBtAndGetAllMatches (
const BYTE* const prefixStart = base + dictLimit;
U32 const btLow = btMask >= current ? 0 : current - btMask;
U32 const windowLow = ms->window.lowLimit;
+ U32 const matchLow = windowLow ? windowLow : 1;
U32* smallerPtr = bt + 2*(current&btMask);
U32* largerPtr = bt + 2*(current&btMask) + 1;
U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */
@@ -433,10 +551,24 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 mnum = 0;
U32 nbCompares = 1U << cParams->searchLog;
+ const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_compressionParameters* const dmsCParams =
+ dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
+ const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
+ const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
+ U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
+ U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
+ U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
+ U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
+ U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
+ U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
+ U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
+
size_t bestLength = lengthToBeat-1;
- DEBUGLOG(7, "ZSTD_insertBtAndGetAllMatches");
+ DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current);
/* check repCode */
+ assert(ll0 <= 1); /* necessarily 1 or 0 */
{ U32 const lastR = ZSTD_REP_NUM + ll0;
U32 repCode;
for (repCode = ll0; repCode < lastR; repCode++) {
@@ -449,18 +581,26 @@ U32 ZSTD_insertBtAndGetAllMatches (
repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
}
} else { /* repIndex < dictLimit || repIndex >= current */
- const BYTE* const repMatch = dictBase + repIndex;
+ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
+ dmsBase + repIndex - dmsIndexDelta :
+ dictBase + repIndex;
assert(current >= windowLow);
- if ( extDict /* this case only valid in extDict mode */
+ if ( dictMode == ZSTD_extDict
&& ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */
& (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+ }
+ if (dictMode == ZSTD_dictMatchState
+ && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */
+ & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
} }
/* save longer solution */
if (repLen > bestLength) {
- DEBUGLOG(8, "found rep-match %u of length %u",
- repCode - ll0, (U32)repLen);
+ DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
+ repCode, ll0, repOffset, repLen);
bestLength = repLen;
matches[mnum].off = repCode - ll0;
matches[mnum].len = (U32)repLen;
@@ -473,10 +613,10 @@ U32 ZSTD_insertBtAndGetAllMatches (
/* HC3 match finder */
if ((mls == 3) /*static*/ && (bestLength < mls)) {
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
- if ((matchIndex3 > windowLow)
+ if ((matchIndex3 >= matchLow)
& (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
size_t mlen;
- if ((!extDict) /*static*/ || (matchIndex3 >= dictLimit)) {
+ if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
const BYTE* const match = base + matchIndex3;
mlen = ZSTD_count(ip, match, iLimit);
} else {
@@ -498,17 +638,21 @@ U32 ZSTD_insertBtAndGetAllMatches (
(ip+mlen == iLimit) ) { /* best possible length */
ms->nextToUpdate = current+1; /* skip insertion */
return 1;
- } } } }
+ }
+ }
+ }
+ /* no dictMatchState lookup: dicts don't have a populated HC3 table */
+ }
hashTable[h] = current; /* Update Hash Table */
- while (nbCompares-- && (matchIndex > windowLow)) {
+ while (nbCompares-- && (matchIndex >= matchLow)) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
const BYTE* match;
assert(current > matchIndex);
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+ if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
match = base + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
@@ -520,8 +664,8 @@ U32 ZSTD_insertBtAndGetAllMatches (
}
if (matchLength > bestLength) {
- DEBUGLOG(8, "found match of length %u at distance %u",
- (U32)matchLength, current - matchIndex);
+ DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
assert(matchEndIdx > matchIndex);
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
@@ -529,9 +673,10 @@ U32 ZSTD_insertBtAndGetAllMatches (
matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
matches[mnum].len = (U32)matchLength;
mnum++;
- if (matchLength > ZSTD_OPT_NUM) break;
- if (ip+matchLength == iLimit) { /* equal : no way to know if inf or sup */
- break; /* drop, to preserve bt consistency (miss a little bit of compression) */
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
+ break; /* drop, to preserve bt consistency (miss a little bit of compression) */
}
}
@@ -552,6 +697,47 @@ U32 ZSTD_insertBtAndGetAllMatches (
*smallerPtr = *largerPtr = 0;
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
+ U32 dictMatchIndex = dms->hashTable[dmsH];
+ const U32* const dmsBt = dms->chainTable;
+ commonLengthSmaller = commonLengthLarger = 0;
+ while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
+ const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dmsBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dmsHighLimit)
+ match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ matchIndex = dictMatchIndex + dmsIndexDelta;
+ DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
+ if (match[matchLength] < ip[matchLength]) {
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+ }
+
assert(matchEndIdx > current+8);
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
return mnum;
@@ -559,23 +745,24 @@ U32 ZSTD_insertBtAndGetAllMatches (
FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* ip, const BYTE* const iHighLimit, int const extDict,
+ ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
U32 rep[ZSTD_REP_NUM], U32 const ll0,
ZSTD_match_t* matches, U32 const lengthToBeat)
{
- U32 const matchLengthSearch = cParams->searchLength;
- DEBUGLOG(7, "ZSTD_BtGetAllMatches");
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32 const matchLengthSearch = cParams->minMatch;
+ DEBUGLOG(8, "ZSTD_BtGetAllMatches");
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
- ZSTD_updateTree_internal(ms, cParams, ip, iHighLimit, matchLengthSearch, extDict);
+ ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
switch(matchLengthSearch)
{
- case 3 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 3);
+ case 3 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 3);
default :
- case 4 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 4);
- case 5 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 5);
+ case 4 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 4);
+ case 5 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 5);
case 7 :
- case 6 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 6);
+ case 6 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 6);
}
}
@@ -587,7 +774,7 @@ typedef struct repcodes_s {
U32 rep[3];
} repcodes_t;
-repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
+static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
{
repcodes_t newReps;
if (offset >= ZSTD_REP_NUM) { /* full offset */
@@ -609,65 +796,35 @@ repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
}
-typedef struct {
- const BYTE* anchor;
- U32 litlen;
- U32 rawLitCost;
-} cachedLiteralPrice_t;
-
-static U32 ZSTD_rawLiteralsCost_cached(
- cachedLiteralPrice_t* const cachedLitPrice,
- const BYTE* const anchor, U32 const litlen,
- const optState_t* const optStatePtr)
+static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
{
- U32 startCost;
- U32 remainingLength;
- const BYTE* startPosition;
-
- if (anchor == cachedLitPrice->anchor) {
- startCost = cachedLitPrice->rawLitCost;
- startPosition = anchor + cachedLitPrice->litlen;
- assert(litlen >= cachedLitPrice->litlen);
- remainingLength = litlen - cachedLitPrice->litlen;
- } else {
- startCost = 0;
- startPosition = anchor;
- remainingLength = litlen;
- }
-
- { U32 const rawLitCost = startCost + ZSTD_rawLiteralsCost(startPosition, remainingLength, optStatePtr);
- cachedLitPrice->anchor = anchor;
- cachedLitPrice->litlen = litlen;
- cachedLitPrice->rawLitCost = rawLitCost;
- return rawLitCost;
- }
+ return sol.litlen + sol.mlen;
}
-static U32 ZSTD_fullLiteralsCost_cached(
- cachedLiteralPrice_t* const cachedLitPrice,
- const BYTE* const anchor, U32 const litlen,
- const optState_t* const optStatePtr)
-{
- return ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
- + ZSTD_litLengthPrice(litlen, optStatePtr);
-}
+#if 0 /* debug */
-static int ZSTD_literalsContribution_cached(
- cachedLiteralPrice_t* const cachedLitPrice,
- const BYTE* const anchor, U32 const litlen,
- const optState_t* const optStatePtr)
+static void
+listStats(const U32* table, int lastEltID)
{
- int const contribution = ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
- + ZSTD_litLengthContribution(litlen, optStatePtr);
- return contribution;
+ int const nbElts = lastEltID + 1;
+ int enb;
+ for (enb=0; enb < nbElts; enb++) {
+ (void)table;
+ //RAWLOG(2, "%3i:%3i, ", enb, table[enb]);
+ RAWLOG(2, "%4i,", table[enb]);
+ }
+ RAWLOG(2, " \n");
}
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore,
- U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams,
- const void* src, size_t srcSize,
- const int optLevel, const int extDict)
+#endif
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const int optLevel,
+ const ZSTD_dictMode_e dictMode)
{
optState_t* const optStatePtr = &ms->opt;
const BYTE* const istart = (const BYTE*)src;
@@ -677,72 +834,77 @@ size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ms->window.base;
const BYTE* const prefixStart = base + ms->window.dictLimit;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
- U32 const minMatch = (cParams->searchLength == 3) ? 3 : 4;
+ U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
ZSTD_optimal_t* const opt = optStatePtr->priceTable;
ZSTD_match_t* const matches = optStatePtr->matchTable;
- cachedLiteralPrice_t cachedLitPrice;
+ ZSTD_optimal_t lastSequence;
/* init */
- DEBUGLOG(5, "ZSTD_compressBlock_opt_generic");
+ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
+ (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
+ assert(optLevel <= 2);
ms->nextToUpdate3 = ms->nextToUpdate;
- ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
+ ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
ip += (ip==prefixStart);
- memset(&cachedLitPrice, 0, sizeof(cachedLitPrice));
/* Match Loop */
while (ip < ilimit) {
U32 cur, last_pos = 0;
- U32 best_mlen, best_off;
/* find first match */
{ U32 const litlen = (U32)(ip - anchor);
U32 const ll0 = !litlen;
- U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, ip, iend, extDict, rep, ll0, matches, minMatch);
+ U32 const nbMatches = ZSTD_BtGetAllMatches(ms, ip, iend, dictMode, rep, ll0, matches, minMatch);
if (!nbMatches) { ip++; continue; }
/* initialize opt[0] */
{ U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
- opt[0].mlen = 1;
+ opt[0].mlen = 0; /* means is_a_literal */
opt[0].litlen = litlen;
+ opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel);
/* large match -> immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
- DEBUGLOG(7, "found %u matches of maxLength=%u and offset=%u at cPos=%u => start new serie",
- nbMatches, maxML, matches[nbMatches-1].off, (U32)(ip-prefixStart));
+ U32 const maxOffset = matches[nbMatches-1].off;
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new serie",
+ nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
if (maxML > sufficient_len) {
- best_mlen = maxML;
- best_off = matches[nbMatches-1].off;
- DEBUGLOG(7, "large match (%u>%u), immediate encoding",
- best_mlen, sufficient_len);
+ lastSequence.litlen = litlen;
+ lastSequence.mlen = maxML;
+ lastSequence.off = maxOffset;
+ DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+ maxML, sufficient_len);
cur = 0;
- last_pos = 1;
+ last_pos = ZSTD_totalLen(lastSequence);
goto _shortestPath;
} }
/* set prices for first matches starting position == 0 */
- { U32 const literalsPrice = ZSTD_fullLiteralsCost_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
+ { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
U32 pos;
U32 matchNb;
- for (pos = 0; pos < minMatch; pos++) {
- opt[pos].mlen = 1;
- opt[pos].price = ZSTD_MAX_PRICE;
+ for (pos = 1; pos < minMatch; pos++) {
+ opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
}
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
U32 const offset = matches[matchNb].off;
U32 const end = matches[matchNb].len;
repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
for ( ; pos <= end ; pos++ ) {
- U32 const matchPrice = literalsPrice + ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
- DEBUGLOG(7, "rPos:%u => set initial price : %u",
- pos, matchPrice);
+ U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
+ U32 const sequencePrice = literalsPrice + matchPrice;
+ DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
+ pos, ZSTD_fCost(sequencePrice));
opt[pos].mlen = pos;
opt[pos].off = offset;
opt[pos].litlen = litlen;
- opt[pos].price = matchPrice;
+ opt[pos].price = sequencePrice;
+ ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
} }
last_pos = pos-1;
@@ -753,55 +915,67 @@ size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore
for (cur = 1; cur <= last_pos; cur++) {
const BYTE* const inr = ip + cur;
assert(cur < ZSTD_OPT_NUM);
+ DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
/* Fix current position with one literal if cheaper */
- { U32 const litlen = (opt[cur-1].mlen == 1) ? opt[cur-1].litlen + 1 : 1;
- int price; /* note : contribution can be negative */
- if (cur > litlen) {
- price = opt[cur - litlen].price + ZSTD_literalsContribution(inr-litlen, litlen, optStatePtr);
- } else {
- price = ZSTD_literalsContribution_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
- }
+ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+ int const price = opt[cur-1].price
+ + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+ - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
assert(price < 1000000000); /* overflow check */
if (price <= opt[cur].price) {
- DEBUGLOG(7, "rPos:%u : better price (%u<%u) using literal",
- cur, price, opt[cur].price);
- opt[cur].mlen = 1;
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+ opt[cur].mlen = 0;
opt[cur].off = 0;
opt[cur].litlen = litlen;
opt[cur].price = price;
memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
- } }
+ } else {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+ opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+ }
+ }
/* last match must start at a minimum distance of 8 from oend */
if (inr > ilimit) continue;
if (cur == last_pos) break;
- if ( (optLevel==0) /*static*/
- && (opt[cur+1].price <= opt[cur].price) )
+ if ( (optLevel==0) /*static_test*/
+ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+ DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+ }
- { U32 const ll0 = (opt[cur].mlen != 1);
- U32 const litlen = (opt[cur].mlen == 1) ? opt[cur].litlen : 0;
- U32 const previousPrice = (cur > litlen) ? opt[cur-litlen].price : 0;
- U32 const basePrice = previousPrice + ZSTD_fullLiteralsCost(inr-litlen, litlen, optStatePtr);
- U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, inr, iend, extDict, opt[cur].rep, ll0, matches, minMatch);
+ { U32 const ll0 = (opt[cur].mlen != 0);
+ U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+ U32 const previousPrice = opt[cur].price;
+ U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 const nbMatches = ZSTD_BtGetAllMatches(ms, inr, iend, dictMode, opt[cur].rep, ll0, matches, minMatch);
U32 matchNb;
- if (!nbMatches) continue;
+ if (!nbMatches) {
+ DEBUGLOG(7, "rPos:%u : no match found", cur);
+ continue;
+ }
{ U32 const maxML = matches[nbMatches-1].len;
- DEBUGLOG(7, "rPos:%u, found %u matches, of maxLength=%u",
- cur, nbMatches, maxML);
+ DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+ inr-istart, cur, nbMatches, maxML);
if ( (maxML > sufficient_len)
- | (cur + maxML >= ZSTD_OPT_NUM) ) {
- best_mlen = maxML;
- best_off = matches[nbMatches-1].off;
- last_pos = cur + 1;
+ || (cur + maxML >= ZSTD_OPT_NUM) ) {
+ lastSequence.mlen = maxML;
+ lastSequence.off = matches[nbMatches-1].off;
+ lastSequence.litlen = litlen;
+ cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+ last_pos = cur + ZSTD_totalLen(lastSequence);
+ if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
goto _shortestPath;
- }
- }
+ } }
/* set prices using matches found at position == cur */
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
@@ -811,81 +985,97 @@ size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore
U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
U32 mlen;
- DEBUGLOG(7, "testing match %u => offCode=%u, mlen=%u, llen=%u",
+ DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
matchNb, matches[matchNb].off, lastML, litlen);
- for (mlen = lastML; mlen >= startML; mlen--) {
+ for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
U32 const pos = cur + mlen;
int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
if ((pos > last_pos) || (price < opt[pos].price)) {
- DEBUGLOG(7, "rPos:%u => new better price (%u<%u)",
- pos, price, opt[pos].price);
- while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
opt[pos].mlen = mlen;
opt[pos].off = offset;
opt[pos].litlen = litlen;
opt[pos].price = price;
+ ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory));
memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
} else {
- if (optLevel==0) break; /* gets ~+10% speed for about -0.01 ratio loss */
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
}
} } }
} /* for (cur = 1; cur <= last_pos; cur++) */
- best_mlen = opt[last_pos].mlen;
- best_off = opt[last_pos].off;
- cur = last_pos - best_mlen;
+ lastSequence = opt[last_pos];
+ cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
+ assert(cur < ZSTD_OPT_NUM); /* control overflow*/
_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
- assert(opt[0].mlen == 1);
-
- /* reverse traversal */
- DEBUGLOG(7, "start reverse traversal (last_pos:%u, cur:%u)",
- last_pos, cur);
- { U32 selectedMatchLength = best_mlen;
- U32 selectedOffset = best_off;
- U32 pos = cur;
- while (1) {
- U32 const mlen = opt[pos].mlen;
- U32 const off = opt[pos].off;
- opt[pos].mlen = selectedMatchLength;
- opt[pos].off = selectedOffset;
- selectedMatchLength = mlen;
- selectedOffset = off;
- if (mlen > pos) break;
- pos -= mlen;
- } }
-
- /* save sequences */
- { U32 pos;
- for (pos=0; pos < last_pos; ) {
- U32 const llen = (U32)(ip - anchor);
- U32 const mlen = opt[pos].mlen;
- U32 const offset = opt[pos].off;
- if (mlen == 1) { ip++; pos++; continue; } /* literal position => move on */
- pos += mlen; ip += mlen;
-
- /* repcodes update : like ZSTD_updateRep(), but update in place */
- if (offset >= ZSTD_REP_NUM) { /* full offset */
- rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = offset - ZSTD_REP_MOVE;
- } else { /* repcode */
- U32 const repCode = offset + (llen==0);
- if (repCode) { /* note : if repCode==0, no change */
- U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
- if (repCode >= 2) rep[2] = rep[1];
- rep[1] = rep[0];
- rep[0] = currentOffset;
+ assert(opt[0].mlen == 0);
+
+ { U32 const storeEnd = cur + 1;
+ U32 storeStart = storeEnd;
+ U32 seqPos = cur;
+
+ DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+ last_pos, cur); (void)last_pos;
+ assert(storeEnd < ZSTD_OPT_NUM);
+ DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+ opt[storeEnd] = lastSequence;
+ while (seqPos > 0) {
+ U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+ storeStart--;
+ DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+ opt[storeStart] = opt[seqPos];
+ seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+ }
+
+ /* save sequences */
+ DEBUGLOG(6, "sending selected sequences into seqStore")
+ { U32 storePos;
+ for (storePos=storeStart; storePos <= storeEnd; storePos++) {
+ U32 const llen = opt[storePos].litlen;
+ U32 const mlen = opt[storePos].mlen;
+ U32 const offCode = opt[storePos].off;
+ U32 const advance = llen + mlen;
+ DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
+ anchor - istart, (unsigned)llen, (unsigned)mlen);
+
+ if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
+ assert(storePos == storeEnd); /* must be last sequence */
+ ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
+ continue; /* will finish */
}
- }
- ZSTD_updateStats(optStatePtr, llen, anchor, offset, mlen);
- ZSTD_storeSeq(seqStore, llen, anchor, offset, mlen-MINMATCH);
- anchor = ip;
- } }
- ZSTD_setLog2Prices(optStatePtr);
+ /* repcodes update : like ZSTD_updateRep(), but update in place */
+ if (offCode >= ZSTD_REP_NUM) { /* full offset */
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = offCode - ZSTD_REP_MOVE;
+ } else { /* repcode */
+ U32 const repCode = offCode + (llen==0);
+ if (repCode) { /* note : if repCode==0, no change */
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ if (repCode >= 2) rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = currentOffset;
+ } }
+
+ assert(anchor + llen <= iend);
+ ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
+ ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH);
+ anchor += advance;
+ ip = anchor;
+ } }
+ ZSTD_setBasePrices(optStatePtr, optLevel);
+ }
+
} /* while (ip < ilimit) */
/* Return the last literals size */
@@ -895,29 +1085,133 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
size_t ZSTD_compressBlock_btopt(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/);
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
+}
+
+
+/* used in 2-pass strategy */
+static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
+{
+ U32 s, sum=0;
+ assert(ZSTD_FREQ_DIV+bonus >= 0);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] <<= ZSTD_FREQ_DIV+bonus;
+ table[s]--;
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* used in 2-pass strategy */
+MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
+{
+ optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
+ optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
+ optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
+ optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
+}
+
+/* ZSTD_initStats_ultra():
+ * make a first compression pass, just to seed stats with more accurate starting values.
+ * only works on first block, with no dictionary and no ldm.
+ * this function cannot error, hence its constract must be respected.
+ */
+static void
+ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
+ memcpy(tmpRep, rep, sizeof(tmpRep));
+
+ DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
+ assert(ms->opt.litLengthSum == 0); /* first block */
+ assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
+ assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
+ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
+
+ ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/
+
+ /* invalidate first scan from history */
+ ZSTD_resetSeqStore(seqStore);
+ ms->window.base -= srcSize;
+ ms->window.dictLimit += (U32)srcSize;
+ ms->window.lowLimit = ms->window.dictLimit;
+ ms->nextToUpdate = ms->window.dictLimit;
+ ms->nextToUpdate3 = ms->window.dictLimit;
+
+ /* re-inforce weight of collected statistics */
+ ZSTD_upscaleStats(&ms->opt);
}
size_t ZSTD_compressBlock_btultra(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 const current = (U32)((const BYTE*)src - ms->window.base);
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
+
+ /* 2-pass strategy:
+ * this strategy makes a first pass over first block to collect statistics
+ * and seed next round's statistics with it.
+ * After 1st pass, function forgets everything, and starts a new block.
+ * Consequently, this can only work if no data has been previously loaded in tables,
+ * aka, no dictionary, no prefix, no ldm preprocessing.
+ * The compression ratio gain is generally small (~0.5% on first block),
+ * the cost is 2x cpu time on first block. */
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ if ( (ms->opt.litLengthSum==0) /* first block */
+ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
+ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
+ && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (srcSize > ZSTD_PREDEF_THRESHOLD)
+ ) {
+ ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
+ }
+
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/);
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_btopt_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/);
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
}
size_t ZSTD_compressBlock_btultra_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+ const void* src, size_t srcSize)
{
- return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/);
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
}
+
+/* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
diff --git a/thirdparty/zstd/compress/zstd_opt.h b/thirdparty/zstd/compress/zstd_opt.h
index b8dc389f31..094f747665 100644
--- a/thirdparty/zstd/compress/zstd_opt.h
+++ b/thirdparty/zstd/compress/zstd_opt.h
@@ -17,23 +17,37 @@ extern "C" {
#include "zstd_compress_internal.h"
-void ZSTD_updateTree(
- ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
- const BYTE* ip, const BYTE* iend); /* used in ZSTD_loadDictionaryContent() */
+/* used in ZSTD_loadDictionaryContent() */
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
size_t ZSTD_compressBlock_btopt(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btopt_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ void const* src, size_t srcSize);
+
+ /* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
#if defined (__cplusplus)
}
diff --git a/thirdparty/zstd/compress/zstdmt_compress.c b/thirdparty/zstd/compress/zstdmt_compress.c
index c7a205d8c7..2cbd6ffade 100644
--- a/thirdparty/zstd/compress/zstdmt_compress.c
+++ b/thirdparty/zstd/compress/zstdmt_compress.c
@@ -9,21 +9,19 @@
*/
-/* ====== Tuning parameters ====== */
-#define ZSTDMT_NBWORKERS_MAX 200
-#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (2 GB)) /* note : limited by `jobSize` type, which is `unsigned` */
-#define ZSTDMT_OVERLAPLOG_DEFAULT 6
-
-
/* ====== Compiler specifics ====== */
#if defined(_MSC_VER)
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
#endif
+/* ====== Constants ====== */
+#define ZSTDMT_OVERLAPLOG_DEFAULT 0
+
+
/* ====== Dependencies ====== */
#include <string.h> /* memcpy, memset */
-#include <limits.h> /* INT_MAX */
+#include <limits.h> /* INT_MAX, UINT_MAX */
#include "pool.h" /* threadpool */
#include "threading.h" /* mutex */
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
@@ -37,18 +35,19 @@
#define ZSTD_RESIZE_SEQPOOL 0
/* ====== Debug ====== */
-#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
+ && !defined(_MSC_VER) \
+ && !defined(__MINGW32__)
# include <stdio.h>
# include <unistd.h>
# include <sys/times.h>
-# define DEBUGLOGRAW(l, ...) if (l<=ZSTD_DEBUG) { fprintf(stderr, __VA_ARGS__); }
# define DEBUG_PRINTHEX(l,p,n) { \
unsigned debug_u; \
for (debug_u=0; debug_u<(n); debug_u++) \
- DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
- DEBUGLOGRAW(l, " \n"); \
+ RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
+ RAWLOG(l, " \n"); \
}
static unsigned long long GetCurrentClockTimeMicroseconds(void)
@@ -56,13 +55,13 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
static clock_t _ticksPerSecond = 0;
if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
- { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
- return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); }
-}
+ { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
+ return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
+} }
#define MUTEX_WAIT_TIME_DLEVEL 6
#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
- if (ZSTD_DEBUG >= MUTEX_WAIT_TIME_DLEVEL) { \
+ if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
ZSTD_pthread_mutex_lock(mutex); \
{ unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
@@ -160,6 +159,25 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
}
+
+static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
+{
+ unsigned const maxNbBuffers = 2*nbWorkers + 3;
+ if (srcBufPool==NULL) return NULL;
+ if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
+ return srcBufPool;
+ /* need a larger buffer pool */
+ { ZSTD_customMem const cMem = srcBufPool->cMem;
+ size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
+ ZSTDMT_bufferPool* newBufPool;
+ ZSTDMT_freeBufferPool(srcBufPool);
+ newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+ if (newBufPool==NULL) return newBufPool;
+ ZSTDMT_setBufferSize(newBufPool, bSize);
+ return newBufPool;
+ }
+}
+
/** ZSTDMT_getBuffer() :
* assumption : bufPool must be valid
* @return : a buffer, with start pointer and size
@@ -229,8 +247,8 @@ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
/* store buffer for later re-use, up to pool capacity */
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
{
- if (buf.start == NULL) return; /* compatible with release on NULL */
DEBUGLOG(5, "ZSTDMT_releaseBuffer");
+ if (buf.start == NULL) return; /* compatible with release on NULL */
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
if (bufPool->nbBuffers < bufPool->totalBuffers) {
bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
@@ -300,7 +318,8 @@ static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
{
- ZSTDMT_seqPool* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+ ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+ if (seqPool == NULL) return NULL;
ZSTDMT_setNbSeq(seqPool, 0);
return seqPool;
}
@@ -310,6 +329,10 @@ static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
ZSTDMT_freeBufferPool(seqPool);
}
+static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
+{
+ return ZSTDMT_expandBufferPool(pool, nbWorkers);
+}
/* ===== CCtx Pool ===== */
@@ -317,8 +340,8 @@ static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
typedef struct {
ZSTD_pthread_mutex_t poolMutex;
- unsigned totalCCtx;
- unsigned availCCtx;
+ int totalCCtx;
+ int availCCtx;
ZSTD_customMem cMem;
ZSTD_CCtx* cctx[1]; /* variable size */
} ZSTDMT_CCtxPool;
@@ -326,16 +349,16 @@ typedef struct {
/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
{
- unsigned u;
- for (u=0; u<pool->totalCCtx; u++)
- ZSTD_freeCCtx(pool->cctx[u]); /* note : compatible with free on NULL */
+ int cid;
+ for (cid=0; cid<pool->totalCCtx; cid++)
+ ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
ZSTD_pthread_mutex_destroy(&pool->poolMutex);
ZSTD_free(pool, pool->cMem);
}
/* ZSTDMT_createCCtxPool() :
* implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
-static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbWorkers,
+static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
ZSTD_customMem cMem)
{
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
@@ -355,6 +378,18 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbWorkers,
return cctxPool;
}
+static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
+ int nbWorkers)
+{
+ if (srcPool==NULL) return NULL;
+ if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
+ /* need a larger cctx pool */
+ { ZSTD_customMem const cMem = srcPool->cMem;
+ ZSTDMT_freeCCtxPool(srcPool);
+ return ZSTDMT_createCCtxPool(nbWorkers, cMem);
+ }
+}
+
/* only works during initialization phase, not during compression */
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
{
@@ -425,17 +460,16 @@ typedef struct {
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
} serialState_t;
-static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params)
+static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)
{
/* Adjust parameters */
if (params.ldmParams.enableLdm) {
DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
- params.ldmParams.windowLog = params.cParams.windowLog;
ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
- assert(params.ldmParams.hashEveryLog < 32);
+ assert(params.ldmParams.hashRateLog < 32);
serialState->ldmState.hashPower =
- ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+ ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
} else {
memset(&params.ldmParams, 0, sizeof(params.ldmParams));
}
@@ -453,7 +487,7 @@ static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool*
serialState->params.ldmParams.hashLog -
serialState->params.ldmParams.bucketSizeLog;
/* Size the seq pool tables */
- ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, params.jobSize));
+ ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
/* Reset the window */
ZSTD_window_clear(&serialState->ldmState.window);
serialState->ldmWindow = serialState->ldmState.window;
@@ -473,6 +507,7 @@ static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool*
memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
}
serialState->params = params;
+ serialState->params.jobSize = (U32)jobSize;
return 0;
}
@@ -505,6 +540,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
/* Wait for our turn */
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
while (serialState->nextJobID < jobID) {
+ DEBUGLOG(5, "wait for serialState->cond");
ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
}
/* A future job may error and skip our job */
@@ -514,6 +550,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
size_t error;
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
seqStore.size == 0 && seqStore.capacity > 0);
+ assert(src.size <= serialState->params.jobSize);
ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
error = ZSTD_ldm_generateSequences(
&serialState->ldmState, &seqStore,
@@ -593,14 +630,32 @@ typedef struct {
unsigned frameChecksumNeeded; /* used only by mtctx */
} ZSTDMT_jobDescription;
+#define JOB_ERROR(e) { \
+ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
+ job->cSize = e; \
+ ZSTD_pthread_mutex_unlock(&job->job_mutex); \
+ goto _endJob; \
+}
+
/* ZSTDMT_compressionJob() is a POOL_function type */
-void ZSTDMT_compressionJob(void* jobDescription)
+static void ZSTDMT_compressionJob(void* jobDescription)
{
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
buffer_t dstBuff = job->dstBuff;
+ size_t lastCBlockSize = 0;
+
+ /* ressources */
+ if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
+ if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
+ dstBuff = ZSTDMT_getBuffer(job->bufPool);
+ if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
+ job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
+ }
+ if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
+ JOB_ERROR(ERROR(memory_allocation));
/* Don't compute the checksum for chunks, since we compute it externally,
* but write it in the header.
@@ -609,47 +664,31 @@ void ZSTDMT_compressionJob(void* jobDescription)
/* Don't run LDM for the chunks, since we handle it externally */
jobParams.ldmParams.enableLdm = 0;
- /* ressources */
- if (cctx==NULL) {
- job->cSize = ERROR(memory_allocation);
- goto _endJob;
- }
- if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
- dstBuff = ZSTDMT_getBuffer(job->bufPool);
- if (dstBuff.start==NULL) {
- job->cSize = ERROR(memory_allocation);
- goto _endJob;
- }
- job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
- }
/* init */
if (job->cdict) {
- size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, job->cdict, jobParams, job->fullFrameSize);
+ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
assert(job->firstJob); /* only allowed for first job */
- if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+ if (ZSTD_isError(initError)) JOB_ERROR(initError);
} else { /* srcStart points at reloaded section */
U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
- { size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstJob);
- if (ZSTD_isError(forceWindowError)) {
- job->cSize = forceWindowError;
- goto _endJob;
- } }
+ { size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
+ if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
+ }
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
+ ZSTD_dtlm_fast,
NULL, /*cdict*/
jobParams, pledgedSrcSize);
- if (ZSTD_isError(initError)) {
- job->cSize = initError;
- goto _endJob;
- } } }
+ if (ZSTD_isError(initError)) JOB_ERROR(initError);
+ } }
/* Perform serial step as early as possible, but after CCtx initialization */
ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
- if (ZSTD_isError(hSize)) { job->cSize = hSize; /* save error code */ goto _endJob; }
+ if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
ZSTD_invalidateRepCodes(cctx);
}
@@ -667,7 +706,7 @@ void ZSTDMT_compressionJob(void* jobDescription)
assert(job->cSize == 0);
for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
- if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
+ if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
ip += chunkSize;
op += cSize; assert(op < oend);
/* stats */
@@ -680,18 +719,16 @@ void ZSTDMT_compressionJob(void* jobDescription)
ZSTD_pthread_mutex_unlock(&job->job_mutex);
}
/* last block */
- assert(chunkSize > 0); assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
+ assert(chunkSize > 0);
+ assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
size_t const cSize = (job->lastJob) ?
ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
- if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
- /* stats */
- ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
- job->cSize += cSize;
- ZSTD_pthread_mutex_unlock(&job->job_mutex);
+ if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
+ lastCBlockSize = cSize;
} }
_endJob:
@@ -704,7 +741,9 @@ _endJob:
ZSTDMT_releaseCCtx(job->cctxPool, cctx);
/* report */
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
- job->consumed = job->src.size;
+ if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
+ job->cSize += lastCBlockSize;
+ job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
ZSTD_pthread_cond_signal(&job->job_cond);
ZSTD_pthread_mutex_unlock(&job->job_mutex);
}
@@ -736,6 +775,14 @@ typedef struct {
static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
+#define RSYNC_LENGTH 32
+
+typedef struct {
+ U64 hash;
+ U64 hitMask;
+ U64 primePower;
+} rsyncState_t;
+
struct ZSTDMT_CCtx_s {
POOL_ctx* factory;
ZSTDMT_jobDescription* jobs;
@@ -745,10 +792,11 @@ struct ZSTDMT_CCtx_s {
ZSTD_CCtx_params params;
size_t targetSectionSize;
size_t targetPrefixSize;
- roundBuff_t roundBuff;
+ int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
inBuff_t inBuff;
- int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create another one. */
+ roundBuff_t roundBuff;
serialState_t serial;
+ rsyncState_t rsync;
unsigned singleBlockingThread;
unsigned jobIDMask;
unsigned doneJobID;
@@ -798,13 +846,27 @@ static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_custom
return jobTable;
}
+static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
+ U32 nbJobs = nbWorkers + 2;
+ if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
+ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+ mtctx->jobIDMask = 0;
+ mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
+ if (mtctx->jobs==NULL) return ERROR(memory_allocation);
+ assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
+ mtctx->jobIDMask = nbJobs - 1;
+ }
+ return 0;
+}
+
+
/* ZSTDMT_CCtxParam_setNbWorkers():
* Internal use only */
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
{
if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
params->nbWorkers = nbWorkers;
- params->overlapSizeLog = ZSTDMT_OVERLAPLOG_DEFAULT;
+ params->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
params->jobSize = 0;
return nbWorkers;
}
@@ -875,7 +937,7 @@ static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
- DEBUGLOG(5, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
+ DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
}
ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
@@ -914,40 +976,64 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
}
/* Internal only */
-size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
- ZSTDMT_parameter parameter, unsigned value) {
+size_t
+ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
+ ZSTDMT_parameter parameter,
+ int value)
+{
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
switch(parameter)
{
case ZSTDMT_p_jobSize :
- DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %u", value);
- if ( (value > 0) /* value==0 => automatic job size */
- & (value < ZSTDMT_JOBSIZE_MIN) )
+ DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
+ if ( value != 0 /* default */
+ && value < ZSTDMT_JOBSIZE_MIN)
value = ZSTDMT_JOBSIZE_MIN;
+ assert(value >= 0);
+ if (value > ZSTDMT_JOBSIZE_MAX) value = ZSTDMT_JOBSIZE_MAX;
params->jobSize = value;
return value;
- case ZSTDMT_p_overlapSectionLog :
- if (value > 9) value = 9;
- DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value);
- params->overlapSizeLog = (value >= 9) ? 9 : value;
+
+ case ZSTDMT_p_overlapLog :
+ DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
+ if (value < ZSTD_OVERLAPLOG_MIN) value = ZSTD_OVERLAPLOG_MIN;
+ if (value > ZSTD_OVERLAPLOG_MAX) value = ZSTD_OVERLAPLOG_MAX;
+ params->overlapLog = value;
+ return value;
+
+ case ZSTDMT_p_rsyncable :
+ value = (value != 0);
+ params->rsyncable = value;
return value;
+
default :
return ERROR(parameter_unsupported);
}
}
-size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value)
+size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
{
DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
- switch(parameter)
- {
- case ZSTDMT_p_jobSize :
- return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
- case ZSTDMT_p_overlapSectionLog :
- return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
- default :
+ return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
+}
+
+size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
+{
+ switch (parameter) {
+ case ZSTDMT_p_jobSize:
+ assert(mtctx->params.jobSize <= INT_MAX);
+ *value = (int)(mtctx->params.jobSize);
+ break;
+ case ZSTDMT_p_overlapLog:
+ *value = mtctx->params.overlapLog;
+ break;
+ case ZSTDMT_p_rsyncable:
+ *value = mtctx->params.rsyncable;
+ break;
+ default:
return ERROR(parameter_unsupported);
}
+ return 0;
}
/* Sets parameters relevant to the compression job,
@@ -960,13 +1046,30 @@ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
jobParams.cParams = params.cParams;
jobParams.fParams = params.fParams;
jobParams.compressionLevel = params.compressionLevel;
- jobParams.disableLiteralCompression = params.disableLiteralCompression;
return jobParams;
}
+
+/* ZSTDMT_resize() :
+ * @return : error code if fails, 0 on success */
+static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
+{
+ if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
+ CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
+ mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
+ if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
+ mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
+ if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
+ mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
+ if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
+ ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
+ return 0;
+}
+
+
/*! ZSTDMT_updateCParams_whileCompressing() :
- * Updates only a selected set of compression parameters, to remain compatible with current frame.
+ * Updates a selected set of compression parameters, remaining compatible with currently active frame.
* New parameters will be applied to next compression job. */
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
{
@@ -981,38 +1084,36 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
}
}
-/* ZSTDMT_getNbWorkers():
- * @return nb threads currently active in mtctx.
- * mtctx must be valid */
-unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx)
-{
- assert(mtctx != NULL);
- return mtctx->params.nbWorkers;
-}
-
/* ZSTDMT_getFrameProgression():
* tells how much data has been consumed (input) and produced (output) for current frame.
* able to count progression inside worker threads.
- * Note : mutex will be acquired during statistics collection. */
+ * Note : mutex will be acquired during statistics collection inside workers. */
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
{
ZSTD_frameProgression fps;
- DEBUGLOG(6, "ZSTDMT_getFrameProgression");
- fps.consumed = mtctx->consumed;
- fps.produced = mtctx->produced;
+ DEBUGLOG(5, "ZSTDMT_getFrameProgression");
fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
+ fps.consumed = mtctx->consumed;
+ fps.produced = fps.flushed = mtctx->produced;
+ fps.currentJobID = mtctx->nextJobID;
+ fps.nbActiveWorkers = 0;
{ unsigned jobNb;
unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
mtctx->doneJobID, lastJobNb, mtctx->jobReady)
for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
unsigned const wJobID = jobNb & mtctx->jobIDMask;
- ZSTD_pthread_mutex_lock(&mtctx->jobs[wJobID].job_mutex);
- { size_t const cResult = mtctx->jobs[wJobID].cSize;
+ ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
+ ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+ { size_t const cResult = jobPtr->cSize;
size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
- fps.consumed += mtctx->jobs[wJobID].consumed;
- fps.ingested += mtctx->jobs[wJobID].src.size;
+ size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+ assert(flushed <= produced);
+ fps.ingested += jobPtr->src.size;
+ fps.consumed += jobPtr->consumed;
fps.produced += produced;
+ fps.flushed += flushed;
+ fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
}
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
}
@@ -1021,26 +1122,98 @@ ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
}
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
+{
+ size_t toFlush;
+ unsigned const jobID = mtctx->doneJobID;
+ assert(jobID <= mtctx->nextJobID);
+ if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */
+
+ /* look into oldest non-fully-flushed job */
+ { unsigned const wJobID = jobID & mtctx->jobIDMask;
+ ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
+ ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
+ { size_t const cResult = jobPtr->cSize;
+ size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
+ size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
+ assert(flushed <= produced);
+ toFlush = produced - flushed;
+ if (toFlush==0 && (jobPtr->consumed >= jobPtr->src.size)) {
+ /* doneJobID is not-fully-flushed, but toFlush==0 : doneJobID should be compressing some more data */
+ assert(jobPtr->consumed < jobPtr->src.size);
+ }
+ }
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+ }
+
+ return toFlush;
+}
+
+
/* ------------------------------------------ */
/* ===== Multi-threaded compression ===== */
/* ------------------------------------------ */
-static size_t ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
+static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
{
if (params.ldmParams.enableLdm)
+ /* In Long Range Mode, the windowLog is typically oversized.
+ * In which case, it's preferable to determine the jobSize
+ * based on chainLog instead. */
return MAX(21, params.cParams.chainLog + 4);
return MAX(20, params.cParams.windowLog + 2);
}
-static size_t ZSTDMT_computeOverlapLog(ZSTD_CCtx_params const params)
+static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
{
- unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog;
- if (params.ldmParams.enableLdm)
- return (MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - overlapRLog);
- return overlapRLog >= 9 ? 0 : (params.cParams.windowLog - overlapRLog);
+ switch(strat)
+ {
+ case ZSTD_btultra2:
+ return 9;
+ case ZSTD_btultra:
+ case ZSTD_btopt:
+ return 8;
+ case ZSTD_btlazy2:
+ case ZSTD_lazy2:
+ return 7;
+ case ZSTD_lazy:
+ case ZSTD_greedy:
+ case ZSTD_dfast:
+ case ZSTD_fast:
+ default:;
+ }
+ return 6;
+}
+
+static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
+{
+ assert(0 <= ovlog && ovlog <= 9);
+ if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
+ return ovlog;
+}
+
+static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
+{
+ int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
+ int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
+ assert(0 <= overlapRLog && overlapRLog <= 8);
+ if (params.ldmParams.enableLdm) {
+ /* In Long Range Mode, the windowLog is typically oversized.
+ * In which case, it's preferable to determine the jobSize
+ * based on chainLog instead.
+ * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
+ ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
+ - overlapRLog;
+ }
+ assert(0 <= ovLog && ovLog <= 30);
+ DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
+ DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
+ return (ovLog==0) ? 0 : (size_t)1 << ovLog;
}
-static unsigned ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers) {
+static unsigned
+ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
+{
assert(nbWorkers>0);
{ size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
size_t const jobMaxSize = jobSizeTarget << 2;
@@ -1063,7 +1236,7 @@ static size_t ZSTDMT_compress_advanced_internal(
ZSTD_CCtx_params params)
{
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
- size_t const overlapSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
+ size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
@@ -1087,18 +1260,10 @@ static size_t ZSTDMT_compress_advanced_internal(
assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
- if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+ if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
return ERROR(memory_allocation);
- if (nbJobs > mtctx->jobIDMask+1) { /* enlarge job table */
- U32 jobsTableSize = nbJobs;
- ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
- mtctx->jobIDMask = 0;
- mtctx->jobs = ZSTDMT_createJobsTable(&jobsTableSize, mtctx->cMem);
- if (mtctx->jobs==NULL) return ERROR(memory_allocation);
- assert((jobsTableSize != 0) && ((jobsTableSize & (jobsTableSize - 1)) == 0)); /* ensure jobsTableSize is a power of 2 */
- mtctx->jobIDMask = jobsTableSize - 1;
- }
+ CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
{ unsigned u;
for (u=0; u<nbJobs; u++) {
@@ -1182,16 +1347,17 @@ static size_t ZSTDMT_compress_advanced_internal(
}
size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict,
- ZSTD_parameters params,
- unsigned overlapLog)
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_parameters params,
+ int overlapLog)
{
ZSTD_CCtx_params cctxParams = mtctx->params;
cctxParams.cParams = params.cParams;
cctxParams.fParams = params.fParams;
- cctxParams.overlapSizeLog = overlapLog;
+ assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
+ cctxParams.overlapLog = overlapLog;
return ZSTDMT_compress_advanced_internal(mtctx,
dst, dstCapacity,
src, srcSize,
@@ -1204,8 +1370,8 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
const void* src, size_t srcSize,
int compressionLevel)
{
- U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 9 : ZSTDMT_OVERLAPLOG_DEFAULT;
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
+ int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
params.fParams.contentSizeFlag = 1;
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
}
@@ -1221,18 +1387,19 @@ size_t ZSTDMT_initCStream_internal(
const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
unsigned long long pledgedSrcSize)
{
- DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u, disableLiteralCompression=%i)",
- (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx, params.disableLiteralCompression);
- /* params are supposed to be fully validated at this point */
+ DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
+ (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
+
+ /* params supposed partially fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
- assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
/* init */
- if (params.jobSize == 0) {
- params.jobSize = 1U << ZSTDMT_computeTargetJobLog(params);
- }
- if (params.jobSize > ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
+ if (params.nbWorkers != mtctx->params.nbWorkers)
+ CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
+
+ if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
+ if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
if (mtctx->singleBlockingThread) {
@@ -1267,12 +1434,24 @@ size_t ZSTDMT_initCStream_internal(
mtctx->cdict = cdict;
}
- mtctx->targetPrefixSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
- DEBUGLOG(4, "overlapLog=%u => %u KB", params.overlapSizeLog, (U32)(mtctx->targetPrefixSize>>10));
+ mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
+ DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
mtctx->targetSectionSize = params.jobSize;
- if (mtctx->targetSectionSize < ZSTDMT_JOBSIZE_MIN) mtctx->targetSectionSize = ZSTDMT_JOBSIZE_MIN;
+ if (mtctx->targetSectionSize == 0) {
+ mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
+ }
+ if (params.rsyncable) {
+ /* Aim for the targetsectionSize as the average job size. */
+ U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
+ U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
+ assert(jobSizeMB >= 1);
+ DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
+ mtctx->rsync.hash = 0;
+ mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
+ mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
+ }
if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
- DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), params.jobSize);
+ DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
{
@@ -1312,7 +1491,7 @@ size_t ZSTDMT_initCStream_internal(
mtctx->allJobsCompleted = 0;
mtctx->consumed = 0;
mtctx->produced = 0;
- if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+ if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))
return ERROR(memory_allocation);
return 0;
}
@@ -1420,7 +1599,7 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZS
mtctx->jobs[jobID].jobID = mtctx->nextJobID;
mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
mtctx->jobs[jobID].lastJob = endFrame;
- mtctx->jobs[jobID].frameChecksumNeeded = endFrame && (mtctx->nextJobID>0) && mtctx->params.fParams.checksumFlag;
+ mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
mtctx->jobs[jobID].dstFlushed = 0;
/* Update the round buffer pos and clear the input buffer to be reset */
@@ -1468,6 +1647,8 @@ static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZS
/*! ZSTDMT_flushProduced() :
+ * flush whatever data has been produced but not yet flushed in current job.
+ * move to next job if current one is fully flushed.
* `output` : `pos` will be updated with amount of data flushed .
* `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
* @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
@@ -1496,7 +1677,7 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
/* try to flush something */
{ size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
- size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
+ size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
if (ZSTD_isError(cSize)) {
DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
@@ -1516,6 +1697,7 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
mtctx->jobs[wJobID].frameChecksumNeeded = 0;
}
+
if (cSize > 0) { /* compression is ongoing or completed */
size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
@@ -1529,11 +1711,12 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
output->pos += toFlush;
mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
- if ( (srcConsumed == srcSize) /* job completed */
+ if ( (srcConsumed == srcSize) /* job is completed */
&& (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
+ DEBUGLOG(5, "dstBuffer released");
mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
mtctx->consumed += srcSize;
@@ -1610,6 +1793,7 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
range_t extDict;
range_t prefix;
+ DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
extDict.start = window.dictBase + window.lowLimit;
extDict.size = window.dictLimit - window.lowLimit;
@@ -1630,12 +1814,13 @@ static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
{
if (mtctx->params.ldmParams.enableLdm) {
ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
+ DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
DEBUGLOG(5, "source [0x%zx, 0x%zx)",
(size_t)buffer.start,
(size_t)buffer.start + buffer.capacity);
ZSTD_PTHREAD_MUTEX_LOCK(mutex);
while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
- DEBUGLOG(6, "Waiting for LDM to finish...");
+ DEBUGLOG(5, "Waiting for LDM to finish...");
ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
}
DEBUGLOG(6, "Done waiting for LDM to finish");
@@ -1655,6 +1840,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
size_t const target = mtctx->targetSectionSize;
buffer_t buffer;
+ DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
assert(mtctx->inBuff.buffer.start == NULL);
assert(mtctx->roundBuff.capacity >= target);
@@ -1668,7 +1854,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
buffer.start = start;
buffer.capacity = prefixSize;
if (ZSTDMT_isOverlapped(buffer, inUse)) {
- DEBUGLOG(6, "Waiting for buffer...");
+ DEBUGLOG(5, "Waiting for buffer...");
return 0;
}
ZSTDMT_waitForLdmComplete(mtctx, buffer);
@@ -1680,7 +1866,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
buffer.capacity = target;
if (ZSTDMT_isOverlapped(buffer, inUse)) {
- DEBUGLOG(6, "Waiting for buffer...");
+ DEBUGLOG(5, "Waiting for buffer...");
return 0;
}
assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
@@ -1701,6 +1887,89 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
return 1;
}
+typedef struct {
+ size_t toLoad; /* The number of bytes to load from the input. */
+ int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
+} syncPoint_t;
+
+/**
+ * Searches through the input for a synchronization point. If one is found, we
+ * will instruct the caller to flush, and return the number of bytes to load.
+ * Otherwise, we will load as many bytes as possible and instruct the caller
+ * to continue as normal.
+ */
+static syncPoint_t
+findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
+{
+ BYTE const* const istart = (BYTE const*)input.src + input.pos;
+ U64 const primePower = mtctx->rsync.primePower;
+ U64 const hitMask = mtctx->rsync.hitMask;
+
+ syncPoint_t syncPoint;
+ U64 hash;
+ BYTE const* prev;
+ size_t pos;
+
+ syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
+ syncPoint.flush = 0;
+ if (!mtctx->params.rsyncable)
+ /* Rsync is disabled. */
+ return syncPoint;
+ if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
+ /* Not enough to compute the hash.
+ * We will miss any synchronization points in this RSYNC_LENGTH byte
+ * window. However, since it depends only in the internal buffers, if the
+ * state is already synchronized, we will remain synchronized.
+ * Additionally, the probability that we miss a synchronization point is
+ * low: RSYNC_LENGTH / targetSectionSize.
+ */
+ return syncPoint;
+ /* Initialize the loop variables. */
+ if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
+ /* We have enough bytes buffered to initialize the hash.
+ * Start scanning at the beginning of the input.
+ */
+ pos = 0;
+ prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
+ hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
+ } else {
+ /* We don't have enough bytes buffered to initialize the hash, but
+ * we know we have at least RSYNC_LENGTH bytes total.
+ * Start scanning after the first RSYNC_LENGTH bytes less the bytes
+ * already buffered.
+ */
+ pos = RSYNC_LENGTH - mtctx->inBuff.filled;
+ prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
+ hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
+ hash = ZSTD_rollingHash_append(hash, istart, pos);
+ }
+ /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
+ * through the input. If we hit a synchronization point, then cut the
+ * job off, and tell the compressor to flush the job. Otherwise, load
+ * all the bytes and continue as normal.
+ * If we go too long without a synchronization point (targetSectionSize)
+ * then a block will be emitted anyways, but this is okay, since if we
+ * are already synchronized we will remain synchronized.
+ */
+ for (; pos < syncPoint.toLoad; ++pos) {
+ BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
+ /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
+ hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
+ if ((hash & hitMask) == hitMask) {
+ syncPoint.toLoad = pos + 1;
+ syncPoint.flush = 1;
+ break;
+ }
+ }
+ return syncPoint;
+}
+
+size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
+{
+ size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
+ if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
+ return hintInSize;
+}
/** ZSTDMT_compressStream_generic() :
* internal use only - exposed to be invoked from zstd_compress.c
@@ -1727,7 +1996,8 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
}
/* single-pass shortcut (note : synchronous-mode) */
- if ( (mtctx->nextJobID == 0) /* just started */
+ if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */
+ && (mtctx->nextJobID == 0) /* just started */
&& (mtctx->inBuff.filled == 0) /* nothing buffered */
&& (!mtctx->jobReady) /* no job already created */
&& (endOp == ZSTD_e_end) /* end order */
@@ -1753,18 +2023,23 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
/* It is only possible for this operation to fail if there are
* still compression jobs ongoing.
*/
+ DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
assert(mtctx->doneJobID != mtctx->nextJobID);
- }
+ } else
+ DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
}
if (mtctx->inBuff.buffer.start != NULL) {
- size_t const toLoad = MIN(input->size - input->pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
+ syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
+ if (syncPoint.flush && endOp == ZSTD_e_continue) {
+ endOp = ZSTD_e_flush;
+ }
assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
- (U32)toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
- memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
- input->pos += toLoad;
- mtctx->inBuff.filled += toLoad;
- forwardInputProgress = toLoad>0;
+ (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
+ memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
+ input->pos += syncPoint.toLoad;
+ mtctx->inBuff.filled += syncPoint.toLoad;
+ forwardInputProgress = syncPoint.toLoad>0;
}
if ((input->pos < input->size) && (endOp == ZSTD_e_end))
endOp = ZSTD_e_flush; /* can't end now : not all input consumed */
@@ -1782,6 +2057,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
/* check for potential compressed data ready to be flushed */
{ size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
+ DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
return remainingToFlush;
}
}
diff --git a/thirdparty/zstd/compress/zstdmt_compress.h b/thirdparty/zstd/compress/zstdmt_compress.h
index f79e3b4418..ee771681fa 100644
--- a/thirdparty/zstd/compress/zstdmt_compress.h
+++ b/thirdparty/zstd/compress/zstdmt_compress.h
@@ -28,6 +28,16 @@
#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
+/* === Constants === */
+#ifndef ZSTDMT_NBWORKERS_MAX
+# define ZSTDMT_NBWORKERS_MAX 200
+#endif
+#ifndef ZSTDMT_JOBSIZE_MIN
+# define ZSTDMT_JOBSIZE_MIN (1 MB)
+#endif
+#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB))
+
+
/* === Memory management === */
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
@@ -52,6 +62,7 @@ ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
+ZSTDLIB_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
@@ -60,16 +71,12 @@ ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);
/* === Advanced functions and parameters === */
-#ifndef ZSTDMT_JOBSIZE_MIN
-# define ZSTDMT_JOBSIZE_MIN (1U << 20) /* 1 MB - Minimum size of each compression job */
-#endif
-
ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict,
ZSTD_parameters params,
- unsigned overlapLog);
+ int overlapLog);
ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
@@ -84,8 +91,9 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
/* ZSTDMT_parameter :
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
typedef enum {
- ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
- ZSTDMT_p_overlapSectionLog /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+ ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
+ ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+ ZSTDMT_p_rsyncable /* Enables rsyncable mode. */
} ZSTDMT_parameter;
/* ZSTDMT_setMTCtxParameter() :
@@ -93,7 +101,12 @@ typedef enum {
* The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
+ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
+
+/* ZSTDMT_getMTCtxParameter() :
+ * Query the ZSTDMT_CCtx for a parameter value.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
/*! ZSTDMT_compressStream_generic() :
@@ -114,11 +127,21 @@ ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
* === Not exposed in libzstd. Never invoke directly ===
* ======================================================== */
-size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
-
-/* ZSTDMT_CCtxParam_setNbWorkers()
- * Set nbWorkers, and clamp it.
- * Also reset jobSize and overlapLog */
+ /*! ZSTDMT_toFlushNow()
+ * Tell how many bytes are ready to be flushed immediately.
+ * Probe the oldest active job (not yet entirely flushed) and check its output buffer.
+ * If return 0, it means there is no active job,
+ * or, it means oldest job is still active, but everything produced has been flushed so far,
+ * therefore flushing is limited by speed of oldest job. */
+size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
+
+/*! ZSTDMT_CCtxParam_setMTCtxParameter()
+ * like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */
+size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value);
+
+/*! ZSTDMT_CCtxParam_setNbWorkers()
+ * Set nbWorkers, and clamp it.
+ * Also reset jobSize and overlapLog */
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
/*! ZSTDMT_updateCParams_whileCompressing() :
@@ -126,14 +149,9 @@ size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorker
* New parameters will be applied to next compression job. */
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
-/* ZSTDMT_getNbWorkers():
- * @return nb threads currently active in mtctx.
- * mtctx must be valid */
-unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx);
-
-/* ZSTDMT_getFrameProgression():
- * tells how much data has been consumed (input) and produced (output) for current frame.
- * able to count progression inside worker threads.
+/*! ZSTDMT_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads.
*/
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);