summaryrefslogtreecommitdiff
path: root/thirdparty
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty')
-rw-r--r--thirdparty/README.md2
-rw-r--r--thirdparty/zstd/common/compiler.h2
-rw-r--r--thirdparty/zstd/common/fse.h2
-rw-r--r--thirdparty/zstd/common/threading.c4
-rw-r--r--thirdparty/zstd/common/xxhash.c4
-rw-r--r--thirdparty/zstd/common/zstd_internal.h57
-rw-r--r--thirdparty/zstd/compress/fse_compress.c4
-rw-r--r--thirdparty/zstd/compress/zstd_compress.c719
-rw-r--r--thirdparty/zstd/compress/zstd_compress_internal.h25
-rw-r--r--thirdparty/zstd/compress/zstd_fast.c327
-rw-r--r--thirdparty/zstd/compress/zstd_lazy.h2
-rw-r--r--thirdparty/zstd/compress/zstd_ldm.c2
-rw-r--r--thirdparty/zstd/compress/zstd_opt.c38
-rw-r--r--thirdparty/zstd/compress/zstdmt_compress.c90
-rw-r--r--thirdparty/zstd/compress/zstdmt_compress.h69
-rw-r--r--thirdparty/zstd/decompress/zstd_ddict.c8
-rw-r--r--thirdparty/zstd/decompress/zstd_decompress.c421
-rw-r--r--thirdparty/zstd/decompress/zstd_decompress_block.c98
-rw-r--r--thirdparty/zstd/decompress/zstd_decompress_internal.h7
-rw-r--r--thirdparty/zstd/zstd.h1021
20 files changed, 1720 insertions, 1182 deletions
diff --git a/thirdparty/README.md b/thirdparty/README.md
index 6997a9ba96..ebdd0b5d04 100644
--- a/thirdparty/README.md
+++ b/thirdparty/README.md
@@ -559,7 +559,7 @@ Files extracted from upstream source:
## zstd
- Upstream: https://github.com/facebook/zstd
-- Version: 1.3.8
+- Version: 1.4.0
- License: BSD-3-Clause
Files extracted from upstream source:
diff --git a/thirdparty/zstd/common/compiler.h b/thirdparty/zstd/common/compiler.h
index 7f561282ca..0836e3ed27 100644
--- a/thirdparty/zstd/common/compiler.h
+++ b/thirdparty/zstd/common/compiler.h
@@ -40,7 +40,7 @@
/**
* FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
- * parameters. They must be inlined for the compiler to elimininate the constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
* branches.
*/
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
diff --git a/thirdparty/zstd/common/fse.h b/thirdparty/zstd/common/fse.h
index f72c519b25..811c670bdd 100644
--- a/thirdparty/zstd/common/fse.h
+++ b/thirdparty/zstd/common/fse.h
@@ -358,7 +358,7 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
typedef enum {
FSE_repeat_none, /**< Cannot use the previous table */
FSE_repeat_check, /**< Can use the previous table but it must be checked */
- FSE_repeat_valid /**< Can use the previous table and it is asumed to be valid */
+ FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */
} FSE_repeat;
/* *****************************************
diff --git a/thirdparty/zstd/common/threading.c b/thirdparty/zstd/common/threading.c
index 8be8c8da94..f3d4fa8418 100644
--- a/thirdparty/zstd/common/threading.c
+++ b/thirdparty/zstd/common/threading.c
@@ -14,8 +14,8 @@
* This file will hold wrapper for systems, which do not support pthreads
*/
-/* create fake symbol to avoid empty trnaslation unit warning */
-int g_ZSTD_threading_useles_symbol;
+/* create fake symbol to avoid empty translation unit warning */
+int g_ZSTD_threading_useless_symbol;
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
diff --git a/thirdparty/zstd/common/xxhash.c b/thirdparty/zstd/common/xxhash.c
index 532b816192..30599aaae4 100644
--- a/thirdparty/zstd/common/xxhash.c
+++ b/thirdparty/zstd/common/xxhash.c
@@ -66,10 +66,10 @@
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
/*!XXH_FORCE_NATIVE_FORMAT :
- * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+ * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
* Results are therefore identical for little-endian and big-endian CPU.
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
- * Should endian-independance be of no importance for your application, you may set the #define below to 1,
+ * Should endian-independence be of no importance for your application, you may set the #define below to 1,
* to improve speed for Big-endian CPU.
* This option has no impact on Little_Endian CPU.
*/
diff --git a/thirdparty/zstd/common/zstd_internal.h b/thirdparty/zstd/common/zstd_internal.h
index edeb74b9c3..31f756ab58 100644
--- a/thirdparty/zstd/common/zstd_internal.h
+++ b/thirdparty/zstd/common/zstd_internal.h
@@ -53,8 +53,50 @@ extern "C" {
#undef MAX
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
-#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
-#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
+
+/**
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information. In order to do that
+ * (particularly, printing the conditional that failed), this can't just wrap
+ * RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+ if (cond) { \
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ }
+
+/**
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+ do { \
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } while(0);
+
+/**
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+ do { \
+ size_t const err_code = (err); \
+ if (ERR_isError(err_code)) { \
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return err_code; \
+ } \
+ } while(0);
/*-*************************************
@@ -200,6 +242,17 @@ typedef struct {
U32 longLengthPos;
} seqStore_t;
+/**
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
+ * similarly, before using `decompressedBound`, check for errors using:
+ * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
+ */
+typedef struct {
+ size_t compressedSize;
+ unsigned long long decompressedBound;
+} ZSTD_frameSizeInfo; /* decompress & legacy */
+
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
diff --git a/thirdparty/zstd/compress/fse_compress.c b/thirdparty/zstd/compress/fse_compress.c
index 60f357bbd2..68b47e1093 100644
--- a/thirdparty/zstd/compress/fse_compress.c
+++ b/thirdparty/zstd/compress/fse_compress.c
@@ -129,9 +129,9 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
{ U32 position = 0;
U32 symbol;
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
- int nbOccurences;
+ int nbOccurrences;
int const freq = normalizedCounter[symbol];
- for (nbOccurences=0; nbOccurences<freq; nbOccurences++) {
+ for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
position = (position + step) & tableMask;
while (position > highThreshold)
diff --git a/thirdparty/zstd/compress/zstd_compress.c b/thirdparty/zstd/compress/zstd_compress.c
index c2c9d3bc55..2e163c8bf3 100644
--- a/thirdparty/zstd/compress/zstd_compress.c
+++ b/thirdparty/zstd/compress/zstd_compress.c
@@ -103,12 +103,31 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
return cctx;
}
+/**
+ * Clears and frees all of the dictionaries in the CCtx.
+ */
+static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
+{
+ ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
+ ZSTD_freeCDict(cctx->localDict.cdict);
+ memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+ memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+ cctx->cdict = NULL;
+}
+
+static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
+{
+ size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
+ size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
+ return bufferSize + cdictSize;
+}
+
static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
{
assert(cctx != NULL);
assert(cctx->staticSize == 0);
ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
- ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
+ ZSTD_clearAllDicts(cctx);
#ifdef ZSTD_MULTITHREAD
ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
#endif
@@ -117,7 +136,8 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support free on NULL */
- if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "not compatible with static CCtx");
ZSTD_freeCCtxContent(cctx);
ZSTD_free(cctx, cctx->customMem);
return 0;
@@ -139,7 +159,7 @@ size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support sizeof on NULL */
return sizeof(*cctx) + cctx->workSpaceSize
- + ZSTD_sizeof_CDict(cctx->cdictLocal)
+ + ZSTD_sizeof_localDict(cctx->localDict)
+ ZSTD_sizeof_mtctx(cctx);
}
@@ -195,7 +215,7 @@ size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
}
size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
- if (!cctxParams) { return ERROR(GENERIC); }
+ RETURN_ERROR_IF(!cctxParams, GENERIC);
memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->compressionLevel = compressionLevel;
cctxParams->fParams.contentSizeFlag = 1;
@@ -204,8 +224,8 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel)
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
{
- if (!cctxParams) { return ERROR(GENERIC); }
- CHECK_F( ZSTD_checkCParams(params.cParams) );
+ RETURN_ERROR_IF(!cctxParams, GENERIC);
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->cParams = params.cParams;
cctxParams->fParams = params.fParams;
@@ -359,6 +379,12 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = ZSTD_dictForceCopy; /* note : how to ensure at compile time that this is the highest value enum ? */
return bounds;
+ case ZSTD_c_literalCompressionMode:
+ ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
+ bounds.lowerBound = ZSTD_lcm_auto;
+ bounds.upperBound = ZSTD_lcm_uncompressed;
+ return bounds;
+
default:
{ ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
return boundError;
@@ -378,10 +404,22 @@ static int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
return 1;
}
-#define BOUNDCHECK(cParam, val) { \
- if (!ZSTD_cParam_withinBounds(cParam,val)) { \
- return ERROR(parameter_outOfBound); \
-} }
+/* ZSTD_cParam_clampBounds:
+ * Clamps the value into the bounded range.
+ */
+static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return bounds.error;
+ if (*value < bounds.lowerBound) *value = bounds.lowerBound;
+ if (*value > bounds.upperBound) *value = bounds.upperBound;
+ return 0;
+}
+
+#define BOUNDCHECK(cParam, val) { \
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+ parameter_outOfBound); \
+}
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
@@ -413,6 +451,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_ldmBucketSizeLog:
case ZSTD_c_ldmHashRateLog:
case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
default:
return 0;
}
@@ -425,18 +464,17 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
if (ZSTD_isUpdateAuthorized(param)) {
cctx->cParamsChanged = 1;
} else {
- return ERROR(stage_wrong);
+ RETURN_ERROR(stage_wrong);
} }
switch(param)
{
- case ZSTD_c_format :
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+ case ZSTD_c_nbWorkers:
+ RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
+ "MT not compatible with static alloc");
+ break;
case ZSTD_c_compressionLevel:
- if (cctx->cdict) return ERROR(stage_wrong);
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
case ZSTD_c_windowLog:
case ZSTD_c_hashLog:
case ZSTD_c_chainLog:
@@ -444,49 +482,32 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
- if (cctx->cdict) return ERROR(stage_wrong);
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_format:
case ZSTD_c_contentSizeFlag:
case ZSTD_c_checksumFlag:
case ZSTD_c_dictIDFlag:
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
- case ZSTD_c_forceMaxWindow : /* Force back-references to remain < windowSize,
- * even when referencing into Dictionary content.
- * default : 0 when using a CDict, 1 when using a Prefix */
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
+ case ZSTD_c_forceMaxWindow:
case ZSTD_c_forceAttachDict:
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
- case ZSTD_c_nbWorkers:
- if ((value!=0) && cctx->staticSize) {
- return ERROR(parameter_unsupported); /* MT not compatible with static alloc */
- }
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
+ case ZSTD_c_literalCompressionMode:
case ZSTD_c_jobSize:
case ZSTD_c_overlapLog:
case ZSTD_c_rsyncable:
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
-
case ZSTD_c_enableLongDistanceMatching:
case ZSTD_c_ldmHashLog:
case ZSTD_c_ldmMinMatch:
case ZSTD_c_ldmBucketSizeLog:
- case ZSTD_c_ldmHashRateLog:
- if (cctx->cdict) return ERROR(stage_wrong);
- return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+ break;
- default: return ERROR(parameter_unsupported);
+ default: RETURN_ERROR(parameter_unsupported);
}
+ return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
}
-size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
- ZSTD_cParameter param, int value)
+size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+ ZSTD_cParameter param, int value)
{
- DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%i, %i)", (int)param, value);
+ DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
switch(param)
{
case ZSTD_c_format :
@@ -495,11 +516,9 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
return (size_t)CCtxParams->format;
case ZSTD_c_compressionLevel : {
- int cLevel = value;
- if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
- if (cLevel < ZSTD_minCLevel()) cLevel = ZSTD_minCLevel();
- if (cLevel) { /* 0 : does not change current level */
- CCtxParams->compressionLevel = cLevel;
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+ if (value) { /* 0 : does not change current level */
+ CCtxParams->compressionLevel = value;
}
if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
return 0; /* return type (size_t) cannot represent negative values */
@@ -573,33 +592,55 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
return CCtxParams->attachDictPref;
}
+ case ZSTD_c_literalCompressionMode : {
+ const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+ CCtxParams->literalCompressionMode = lcm;
+ return CCtxParams->literalCompressionMode;
+ }
+
case ZSTD_c_nbWorkers :
#ifndef ZSTD_MULTITHREAD
- if (value!=0) return ERROR(parameter_unsupported);
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
#else
- return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+ CCtxParams->nbWorkers = value;
+ return CCtxParams->nbWorkers;
#endif
case ZSTD_c_jobSize :
#ifndef ZSTD_MULTITHREAD
- return ERROR(parameter_unsupported);
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
#else
- return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
+ /* Adjust to the minimum non-default value. */
+ if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
+ value = ZSTDMT_JOBSIZE_MIN;
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
+ assert(value >= 0);
+ CCtxParams->jobSize = value;
+ return CCtxParams->jobSize;
#endif
case ZSTD_c_overlapLog :
#ifndef ZSTD_MULTITHREAD
- return ERROR(parameter_unsupported);
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
#else
- return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapLog, value);
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+ CCtxParams->overlapLog = value;
+ return CCtxParams->overlapLog;
#endif
case ZSTD_c_rsyncable :
#ifndef ZSTD_MULTITHREAD
- return ERROR(parameter_unsupported);
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
#else
- return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_rsyncable, value);
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
+ CCtxParams->rsyncable = value;
+ return CCtxParams->rsyncable;
#endif
case ZSTD_c_enableLongDistanceMatching :
@@ -625,21 +666,21 @@ size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* CCtxParams,
return CCtxParams->ldmParams.bucketSizeLog;
case ZSTD_c_ldmHashRateLog :
- if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
- return ERROR(parameter_outOfBound);
+ RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
+ parameter_outOfBound);
CCtxParams->ldmParams.hashRateLog = value;
return CCtxParams->ldmParams.hashRateLog;
- default: return ERROR(parameter_unsupported);
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
}
size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
{
- return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
+ return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
}
-size_t ZSTD_CCtxParam_getParameter(
+size_t ZSTD_CCtxParams_getParameter(
ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
{
switch(param)
@@ -686,6 +727,9 @@ size_t ZSTD_CCtxParam_getParameter(
case ZSTD_c_forceAttachDict :
*value = CCtxParams->attachDictPref;
break;
+ case ZSTD_c_literalCompressionMode :
+ *value = CCtxParams->literalCompressionMode;
+ break;
case ZSTD_c_nbWorkers :
#ifndef ZSTD_MULTITHREAD
assert(CCtxParams->nbWorkers == 0);
@@ -694,7 +738,7 @@ size_t ZSTD_CCtxParam_getParameter(
break;
case ZSTD_c_jobSize :
#ifndef ZSTD_MULTITHREAD
- return ERROR(parameter_unsupported);
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
#else
assert(CCtxParams->jobSize <= INT_MAX);
*value = (int)CCtxParams->jobSize;
@@ -702,14 +746,14 @@ size_t ZSTD_CCtxParam_getParameter(
#endif
case ZSTD_c_overlapLog :
#ifndef ZSTD_MULTITHREAD
- return ERROR(parameter_unsupported);
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
#else
*value = CCtxParams->overlapLog;
break;
#endif
case ZSTD_c_rsyncable :
#ifndef ZSTD_MULTITHREAD
- return ERROR(parameter_unsupported);
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
#else
*value = CCtxParams->rsyncable;
break;
@@ -729,7 +773,7 @@ size_t ZSTD_CCtxParam_getParameter(
case ZSTD_c_ldmHashRateLog :
*value = CCtxParams->ldmParams.hashRateLog;
break;
- default: return ERROR(parameter_unsupported);
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return 0;
}
@@ -745,8 +789,8 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
{
DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
- if (cctx->cdict) return ERROR(stage_wrong);
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ RETURN_ERROR_IF(cctx->cdict, stage_wrong);
cctx->requestedParams = *params;
return 0;
@@ -755,33 +799,71 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams(
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
return 0;
}
+/**
+ * Initializes the local dict using the requested parameters.
+ * NOTE: This does not use the pledged src size, because it may be used for more
+ * than one compression.
+ */
+static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+{
+ ZSTD_localDict* const dl = &cctx->localDict;
+ ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctx->requestedParams, 0, dl->dictSize);
+ if (dl->dict == NULL) {
+ /* No local dictionary. */
+ assert(dl->dictBuffer == NULL);
+ assert(dl->cdict == NULL);
+ assert(dl->dictSize == 0);
+ return 0;
+ }
+ if (dl->cdict != NULL) {
+ assert(cctx->cdict == dl->cdict);
+ /* Local dictionary already initialized. */
+ return 0;
+ }
+ assert(dl->dictSize > 0);
+ assert(cctx->cdict == NULL);
+ assert(cctx->prefixDict.dict == NULL);
+
+ dl->cdict = ZSTD_createCDict_advanced(
+ dl->dict,
+ dl->dictSize,
+ ZSTD_dlm_byRef,
+ dl->dictContentType,
+ cParams,
+ cctx->customMem);
+ RETURN_ERROR_IF(!dl->cdict, memory_allocation);
+ cctx->cdict = dl->cdict;
+ return 0;
+}
+
size_t ZSTD_CCtx_loadDictionary_advanced(
ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
{
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
- if (cctx->staticSize) return ERROR(memory_allocation); /* no malloc for static CCtx */
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "no malloc for static CCtx");
DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
- ZSTD_freeCDict(cctx->cdictLocal); /* in case one already exists */
- if (dict==NULL || dictSize==0) { /* no dictionary mode */
- cctx->cdictLocal = NULL;
- cctx->cdict = NULL;
+ ZSTD_clearAllDicts(cctx); /* in case one already exists */
+ if (dict == NULL || dictSize == 0) /* no dictionary mode */
+ return 0;
+ if (dictLoadMethod == ZSTD_dlm_byRef) {
+ cctx->localDict.dict = dict;
} else {
- ZSTD_compressionParameters const cParams =
- ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
- cctx->cdictLocal = ZSTD_createCDict_advanced(
- dict, dictSize,
- dictLoadMethod, dictContentType,
- cParams, cctx->customMem);
- cctx->cdict = cctx->cdictLocal;
- if (cctx->cdictLocal == NULL)
- return ERROR(memory_allocation);
+ void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
+ RETURN_ERROR_IF(!dictBuffer, memory_allocation);
+ memcpy(dictBuffer, dict, dictSize);
+ cctx->localDict.dictBuffer = dictBuffer;
+ cctx->localDict.dict = dictBuffer;
}
+ cctx->localDict.dictSize = dictSize;
+ cctx->localDict.dictContentType = dictContentType;
return 0;
}
@@ -801,9 +883,10 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s
size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ /* Free the existing local cdict (if any) to save memory. */
+ ZSTD_clearAllDicts(cctx);
cctx->cdict = cdict;
- memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* exclusive */
return 0;
}
@@ -815,8 +898,8 @@ size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSiz
size_t ZSTD_CCtx_refPrefix_advanced(
ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
- cctx->cdict = NULL; /* prefix discards any prior cdict */
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ ZSTD_clearAllDicts(cctx);
cctx->prefixDict.dict = prefix;
cctx->prefixDict.dictSize = prefixSize;
cctx->prefixDict.dictContentType = dictContentType;
@@ -834,8 +917,8 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
- if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
- cctx->cdict = NULL;
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
+ ZSTD_clearAllDicts(cctx);
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
}
return 0;
@@ -888,10 +971,11 @@ static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
}
/** ZSTD_adjustCParams_internal() :
- optimize `cPar` for a given input (`srcSize` and `dictSize`).
- mostly downsizing to reduce memory consumption and initialization latency.
- Both `srcSize` and `dictSize` are optional (use 0 if unknown).
- Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
+ * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
+ * mostly downsize to reduce memory consumption and initialization latency.
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ * note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention.
+ * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
@@ -901,7 +985,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
- if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
+ if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ )
srcSize = minSrcSize; /* presumed small when there is a dictionary */
else if (srcSize == 0)
srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */
@@ -922,7 +1006,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
}
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
- cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
return cPar;
}
@@ -932,7 +1016,7 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize)
{
- cPar = ZSTD_clampCParams(cPar);
+ cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
}
@@ -973,8 +1057,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
- /* Estimate CCtx size is supported for single-threaded compression only. */
- if (params->nbWorkers > 0) { return ERROR(GENERIC); }
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, 0, 0);
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
@@ -1022,10 +1105,12 @@ size_t ZSTD_estimateCCtxSize(int compressionLevel)
size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
- if (params->nbWorkers > 0) { return ERROR(GENERIC); }
- { size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
- size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ { ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, 0, 0);
+ size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
size_t const streamingSize = inBuffSize + outBuffSize;
@@ -1367,13 +1452,13 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
zc->workSpaceSize >> 10,
neededSpace >> 10);
- /* static cctx : no resize, error out */
- if (zc->staticSize) return ERROR(memory_allocation);
+
+ RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
zc->workSpaceSize = 0;
ZSTD_free(zc->workSpace, zc->customMem);
zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
- if (zc->workSpace == NULL) return ERROR(memory_allocation);
+ RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
zc->workSpaceSize = neededSpace;
zc->workSpaceOversizedDuration = 0;
@@ -1644,7 +1729,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(5, "ZSTD_copyCCtx_internal");
- if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
+ RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
{ ZSTD_CCtx_params params = dstCCtx->requestedParams;
@@ -1777,7 +1862,8 @@ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
{
U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
- if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
+ dstSize_tooSmall);
MEM_writeLE24(dst, cBlockHeader24);
memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
return ZSTD_blockHeaderSize + srcSize;
@@ -1788,7 +1874,7 @@ static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void
BYTE* const ostart = (BYTE* const)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
- if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
switch(flSize)
{
@@ -1878,7 +1964,7 @@ static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
- if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
+ RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
{ HUF_repeat repeat = prevHuf->repeatMode;
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
@@ -1960,7 +2046,7 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
* If x == 0: Return 0
* Else: Return floor(-log2(x / 256) * 256)
*/
-static unsigned const kInverseProbabiltyLog256[256] = {
+static unsigned const kInverseProbabilityLog256[256] = {
0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
@@ -1999,7 +2085,7 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
if (count[s] != 0 && norm == 0)
norm = 1;
assert(count[s] < total);
- cost += count[s] * kInverseProbabiltyLog256[norm];
+ cost += count[s] * kInverseProbabilityLog256[norm];
}
return cost >> 8;
}
@@ -2022,7 +2108,7 @@ static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
unsigned const norm256 = normAcc << shift;
assert(norm256 > 0);
assert(norm256 < 256);
- cost += count[s] * kInverseProbabiltyLog256[norm256];
+ cost += count[s] * kInverseProbabilityLog256[norm256];
}
return cost >> 8;
}
@@ -2050,21 +2136,17 @@ static size_t ZSTD_fseBitCost(
unsigned s;
FSE_CState_t cstate;
FSE_initCState(&cstate, ctable);
- if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
- DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
+ RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
+ "Repeat FSE_CTable has maxSymbolValue %u < %u",
ZSTD_getFSEMaxSymbolValue(ctable), max);
- return ERROR(GENERIC);
- }
for (s = 0; s <= max; ++s) {
unsigned const tableLog = cstate.stateLog;
unsigned const badCost = (tableLog + 1) << kAccuracyLog;
unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
if (count[s] == 0)
continue;
- if (bitCost >= badCost) {
- DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
- return ERROR(GENERIC);
- }
+ RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
+ "Repeat FSE_CTable has Prob[%u] == 0", s);
cost += count[s] * bitCost;
}
return cost >> kAccuracyLog;
@@ -2080,7 +2162,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
BYTE wksp[FSE_NCOUNTBOUND];
S16 norm[MaxSeq + 1];
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
- CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
}
@@ -2186,15 +2268,15 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
switch (type) {
case set_rle:
- CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
- if (dstCapacity==0) return ERROR(dstSize_tooSmall);
+ FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
+ RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
*op = codeTable[0];
return 1;
case set_repeat:
memcpy(nextCTable, prevCTable, prevCTableSize);
return 0;
case set_basic:
- CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */
return 0;
case set_compressed: {
S16 norm[MaxSeq + 1];
@@ -2205,14 +2287,14 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
nbSeq_1--;
}
assert(nbSeq_1 > 1);
- CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
- if (FSE_isError(NCountSize)) return NCountSize;
- CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
+ FORWARD_IF_ERROR(NCountSize);
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
return NCountSize;
}
}
- default: return assert(0), ERROR(GENERIC);
+ default: assert(0); RETURN_ERROR(GENERIC);
}
}
@@ -2229,7 +2311,9 @@ ZSTD_encodeSequences_body(
FSE_CState_t stateOffsetBits;
FSE_CState_t stateLitLength;
- CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
+ dstSize_tooSmall, "not enough space remaining");
DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
(int)(blockStream.endPtr - blockStream.startPtr),
(unsigned)dstCapacity);
@@ -2303,7 +2387,7 @@ ZSTD_encodeSequences_body(
FSE_flushCState(&blockStream, &stateLitLength);
{ size_t const streamSize = BIT_closeCStream(&blockStream);
- if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
+ RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
return streamSize;
}
}
@@ -2368,6 +2452,21 @@ static size_t ZSTD_encodeSequences(
sequences, nbSeq, longOffsets);
}
+static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
+{
+ switch (cctxParams->literalCompressionMode) {
+ case ZSTD_lcm_huffman:
+ return 0;
+ case ZSTD_lcm_uncompressed:
+ return 1;
+ default:
+ assert(0 /* impossible: pre-validated */);
+ /* fall-through */
+ case ZSTD_lcm_auto:
+ return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
+ }
+}
+
/* ZSTD_compressSequences_internal():
* actually compresses both literals and sequences */
MEM_STATIC size_t
@@ -2403,22 +2502,22 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
/* Compress literals */
{ const BYTE* const literals = seqStorePtr->litStart;
size_t const litSize = seqStorePtr->lit - literals;
- int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
size_t const cSize = ZSTD_compressLiterals(
&prevEntropy->huf, &nextEntropy->huf,
- cctxParams->cParams.strategy, disableLiteralCompression,
+ cctxParams->cParams.strategy,
+ ZSTD_disableLiteralsCompression(cctxParams),
op, dstCapacity,
literals, litSize,
workspace, wkspSize,
bmi2);
- if (ZSTD_isError(cSize))
- return cSize;
+ FORWARD_IF_ERROR(cSize);
assert(cSize <= dstCapacity);
op += cSize;
}
/* Sequences Header */
- if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall);
if (nbSeq < 0x7F)
*op++ = (BYTE)nbSeq;
else if (nbSeq < LONGNBSEQ)
@@ -2452,7 +2551,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
workspace, wkspSize);
- if (ZSTD_isError(countSize)) return countSize;
+ FORWARD_IF_ERROR(countSize);
if (LLtype == set_compressed)
lastNCount = op;
op += countSize;
@@ -2474,7 +2573,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
workspace, wkspSize);
- if (ZSTD_isError(countSize)) return countSize;
+ FORWARD_IF_ERROR(countSize);
if (Offtype == set_compressed)
lastNCount = op;
op += countSize;
@@ -2494,7 +2593,7 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
workspace, wkspSize);
- if (ZSTD_isError(countSize)) return countSize;
+ FORWARD_IF_ERROR(countSize);
if (MLtype == set_compressed)
lastNCount = op;
op += countSize;
@@ -2509,10 +2608,10 @@ ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
CTable_LitLength, llCodeTable,
sequences, nbSeq,
longOffsets, bmi2);
- if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
+ FORWARD_IF_ERROR(bitstreamSize);
op += bitstreamSize;
/* zstd versions <= 1.3.4 mistakenly report corruption when
- * FSE_readNCount() recieves a buffer < 4 bytes.
+ * FSE_readNCount() receives a buffer < 4 bytes.
* Fixed by https://github.com/facebook/zstd/pull/1146.
* This can happen when the last set_compressed table present is 2
* bytes and the bitstream is only one byte.
@@ -2552,7 +2651,7 @@ ZSTD_compressSequences(seqStore_t* seqStorePtr,
*/
if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
return 0; /* block not compressed */
- if (ZSTD_isError(cSize)) return cSize;
+ FORWARD_IF_ERROR(cSize);
/* Check compressibility */
{ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
@@ -2641,7 +2740,10 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
goto out; /* don't even attempt compression below a certain srcSize */
}
ZSTD_resetSeqStore(&(zc->seqStore));
- ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; /* required for optimal parser to read stats from dictionary */
+ /* required for optimal parser to read stats from dictionary */
+ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
+ /* tell the optimal parser how we expect to compress literals */
+ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
/* a gap between an attached dict and the current window is not safe,
* they must remain adjacent,
@@ -2679,7 +2781,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
/* Updates ldmSeqStore.size */
- CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
&zc->appliedParams.ldmParams,
src, srcSize));
/* Updates ldmSeqStore.pos */
@@ -2752,8 +2854,9 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
- if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
- return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+ dstSize_tooSmall,
+ "not enough space to store compressed block");
if (remaining < blockSize) blockSize = remaining;
if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
@@ -2774,11 +2877,11 @@ static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
{ size_t cSize = ZSTD_compressBlock_internal(cctx,
op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
ip, blockSize);
- if (ZSTD_isError(cSize)) return cSize;
+ FORWARD_IF_ERROR(cSize);
if (cSize == 0) { /* block is not compressible */
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
- if (ZSTD_isError(cSize)) return cSize;
+ FORWARD_IF_ERROR(cSize);
} else {
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader24);
@@ -2811,11 +2914,11 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
U32 const fcsCode = params.fParams.contentSizeFlag ?
(pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
- BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
size_t pos=0;
assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
- if (dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
!params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
@@ -2823,7 +2926,7 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
pos = 4;
}
- op[pos++] = frameHeaderDecriptionByte;
+ op[pos++] = frameHeaderDescriptionByte;
if (!singleSegment) op[pos++] = windowLogByte;
switch(dictIDSizeCode)
{
@@ -2847,11 +2950,11 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
/* ZSTD_writeLastEmptyBlock() :
* output an empty Block with end-of-frame mark to complete a frame
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- * or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
*/
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
{
- if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
{ U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
MEM_writeLE24(dst, cBlockHeader24);
return ZSTD_blockHeaderSize;
@@ -2860,10 +2963,9 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
{
- if (cctx->stage != ZSTDcs_init)
- return ERROR(stage_wrong);
- if (cctx->appliedParams.ldmParams.enableLdm)
- return ERROR(parameter_unsupported);
+ RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
+ RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
+ parameter_unsupported);
cctx->externSeqStore.seq = seq;
cctx->externSeqStore.size = nbSeq;
cctx->externSeqStore.capacity = nbSeq;
@@ -2882,12 +2984,13 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
cctx->stage, (unsigned)srcSize);
- if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
+ RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
+ "missing init (ZSTD_compressBegin)");
if (frame && (cctx->stage==ZSTDcs_init)) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
- if (ZSTD_isError(fhSize)) return fhSize;
+ FORWARD_IF_ERROR(fhSize);
dstCapacity -= fhSize;
dst = (char*)dst + fhSize;
cctx->stage = ZSTDcs_ongoing;
@@ -2922,17 +3025,18 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
- if (ZSTD_isError(cSize)) return cSize;
+ FORWARD_IF_ERROR(cSize);
cctx->consumedSrcSize += srcSize;
cctx->producedCSize += (cSize + fhSize);
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
- if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
- DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
- (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
- return ERROR(srcSize_wrong);
- }
+ RETURN_ERROR_IF(
+ cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
}
return cSize + fhSize;
}
@@ -2957,7 +3061,7 @@ size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
- if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
}
@@ -3020,9 +3124,9 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
NOTE: This behavior is not standard and could be improved in the future. */
static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
U32 s;
- if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
for (s = 0; s <= maxSymbolValue; ++s) {
- if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
}
return 0;
}
@@ -3060,53 +3164,56 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
{ unsigned maxSymbolValue = 255;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
- if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
- if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
dictPtr += hufHeaderSize;
}
{ unsigned offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
- if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
- if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
/* fill all offset symbols to avoid garbage at end of table */
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable,
- offcodeNCount, MaxOff, offcodeLog,
- workspace, HUF_WORKSPACE_SIZE),
- dictionary_corrupted);
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.offcodeCTable,
+ offcodeNCount, MaxOff, offcodeLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted);
dictPtr += offcodeHeaderSize;
}
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
- if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
- if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
/* Every match length code must have non-zero probability */
- CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable,
- matchlengthNCount, matchlengthMaxValue, matchlengthLog,
- workspace, HUF_WORKSPACE_SIZE),
- dictionary_corrupted);
+ FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.matchlengthCTable,
+ matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted);
dictPtr += matchlengthHeaderSize;
}
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
- if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
- if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
/* Every literal length code must have non-zero probability */
- CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
- CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable,
- litlengthNCount, litlengthMaxValue, litlengthLog,
- workspace, HUF_WORKSPACE_SIZE),
- dictionary_corrupted);
+ FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.litlengthCTable,
+ litlengthNCount, litlengthMaxValue, litlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted);
dictPtr += litlengthHeaderSize;
}
- if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
bs->rep[0] = MEM_readLE32(dictPtr+0);
bs->rep[1] = MEM_readLE32(dictPtr+4);
bs->rep[2] = MEM_readLE32(dictPtr+8);
@@ -3119,19 +3226,19 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
}
/* All offset values <= dictContentSize + 128 KB must be representable */
- CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+ FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
/* All repCodes must be <= dictContentSize and != 0*/
{ U32 u;
for (u=0; u<3; u++) {
- if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
- if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
+ RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
} }
bs->entropy.huf.repeatMode = HUF_repeat_valid;
bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
- CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
+ FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
return dictID;
}
}
@@ -3161,8 +3268,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
DEBUGLOG(4, "raw content dictionary detected");
return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
}
- if (dictContentType == ZSTD_dct_fullDict)
- return ERROR(dictionary_wrong);
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
assert(0); /* impossible */
}
@@ -3189,13 +3295,13 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
}
- CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
ZSTDcrp_continue, zbuff) );
{
size_t const dictID = ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
- if (ZSTD_isError(dictID)) return dictID;
+ FORWARD_IF_ERROR(dictID);
assert(dictID <= (size_t)(U32)-1);
cctx->dictID = (U32)dictID;
}
@@ -3212,7 +3318,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
{
DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
/* compression parameters verification and optimization */
- CHECK_F( ZSTD_checkCParams(params.cParams) );
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
return ZSTD_compressBegin_internal(cctx,
dict, dictSize, dictContentType, dtlm,
cdict,
@@ -3260,12 +3366,12 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
size_t fhSize = 0;
DEBUGLOG(4, "ZSTD_writeEpilogue");
- if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */
+ RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
- if (ZSTD_isError(fhSize)) return fhSize;
+ FORWARD_IF_ERROR(fhSize);
dstCapacity -= fhSize;
op += fhSize;
cctx->stage = ZSTDcs_ongoing;
@@ -3274,7 +3380,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
if (cctx->stage != ZSTDcs_ending) {
/* write one last empty block, make it the "last" block */
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
- if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
MEM_writeLE32(op, cBlockHeader24);
op += ZSTD_blockHeaderSize;
dstCapacity -= ZSTD_blockHeaderSize;
@@ -3282,7 +3388,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
- if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
MEM_writeLE32(op, checksum);
op += 4;
@@ -3300,18 +3406,20 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
size_t const cSize = ZSTD_compressContinue_internal(cctx,
dst, dstCapacity, src, srcSize,
1 /* frame mode */, 1 /* last chunk */);
- if (ZSTD_isError(cSize)) return cSize;
+ FORWARD_IF_ERROR(cSize);
endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
- if (ZSTD_isError(endResult)) return endResult;
+ FORWARD_IF_ERROR(endResult);
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
DEBUGLOG(4, "end of frame : controlling src size");
- if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
- DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
- (unsigned)cctx->pledgedSrcSizePlusOne-1, (unsigned)cctx->consumedSrcSize);
- return ERROR(srcSize_wrong);
- } }
+ RETURN_ERROR_IF(
+ cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize = %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
return cSize + endResult;
}
@@ -3339,7 +3447,7 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
ZSTD_parameters params)
{
DEBUGLOG(4, "ZSTD_compress_advanced");
- CHECK_F(ZSTD_checkCParams(params.cParams));
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
return ZSTD_compress_internal(cctx,
dst, dstCapacity,
src, srcSize,
@@ -3356,7 +3464,7 @@ size_t ZSTD_compress_advanced_internal(
ZSTD_CCtx_params params)
{
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
- CHECK_F( ZSTD_compressBegin_internal(cctx,
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) );
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
@@ -3440,7 +3548,7 @@ static size_t ZSTD_initCDict_internal(
void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
cdict->dictBuffer = internalBuffer;
cdict->dictContent = internalBuffer;
- if (!internalBuffer) return ERROR(memory_allocation);
+ RETURN_ERROR_IF(!internalBuffer, memory_allocation);
memcpy(internalBuffer, dictBuffer, dictSize);
}
cdict->dictContentSize = dictSize;
@@ -3466,7 +3574,7 @@ static size_t ZSTD_initCDict_internal(
&cdict->cBlockState, &cdict->matchState, &params,
cdict->dictContent, cdict->dictContentSize,
dictContentType, ZSTD_dtlm_full, cdict->workspace);
- if (ZSTD_isError(dictID)) return dictID;
+ FORWARD_IF_ERROR(dictID);
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
}
@@ -3596,7 +3704,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
- if (cdict==NULL) return ERROR(dictionary_wrong);
+ RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
{ ZSTD_CCtx_params params = cctx->requestedParams;
params.cParams = ZSTD_getCParamsFromCDict(cdict);
/* Increase window log to fit the entire dictionary and source if the
@@ -3632,7 +3740,7 @@ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
- CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
@@ -3700,7 +3808,7 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
- CHECK_F( ZSTD_compressBegin_internal(cctx,
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, dictContentType, ZSTD_dtlm_fast,
cdict,
params, pledgedSrcSize,
@@ -3718,13 +3826,17 @@ static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
/* ZSTD_resetCStream():
* pledgedSrcSize == 0 means "unknown" */
-size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
{
- ZSTD_CCtx_params params = zcs->requestedParams;
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
- if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
- params.fParams.contentSizeFlag = 1;
- return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ return 0;
}
/*! ZSTD_initCStream_internal() :
@@ -3736,32 +3848,18 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_internal");
- params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ zcs->requestedParams = params;
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
-
- if (dict && dictSize >= 8) {
- DEBUGLOG(4, "loading dictionary of size %u", (unsigned)dictSize);
- if (zcs->staticSize) { /* static CCtx : never uses malloc */
- /* incompatible with internal cdict creation */
- return ERROR(memory_allocation);
- }
- ZSTD_freeCDict(zcs->cdictLocal);
- zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
- ZSTD_dlm_byCopy, ZSTD_dct_auto,
- params.cParams, zcs->customMem);
- zcs->cdict = zcs->cdictLocal;
- if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
+ if (dict) {
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
} else {
- if (cdict) {
- params.cParams = ZSTD_getCParamsFromCDict(cdict); /* cParams are enforced from cdict; it includes windowLog */
- }
- ZSTD_freeCDict(zcs->cdictLocal);
- zcs->cdictLocal = NULL;
- zcs->cdict = cdict;
+ /* Dictionary is cleared if !cdict */
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
}
-
- return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+ return 0;
}
/* ZSTD_initCStream_usingCDict_advanced() :
@@ -3772,22 +3870,20 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
- if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
- { ZSTD_CCtx_params params = zcs->requestedParams;
- params.cParams = ZSTD_getCParamsFromCDict(cdict);
- params.fParams = fParams;
- return ZSTD_initCStream_internal(zcs,
- NULL, 0, cdict,
- params, pledgedSrcSize);
- }
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ zcs->requestedParams.fParams = fParams;
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+ return 0;
}
/* note : cdict must outlive compression session */
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
{
- ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
- return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); /* note : will check that cdict != NULL */
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
+ return 0;
}
@@ -3797,33 +3893,53 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
* dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
- ZSTD_parameters params, unsigned long long pledgedSrcSize)
+ ZSTD_parameters params, unsigned long long pss)
{
- DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
- (unsigned)pledgedSrcSize, params.fParams.contentSizeFlag);
- CHECK_F( ZSTD_checkCParams(params.cParams) );
- if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
+ /* for compatibility with older programs relying on this behavior.
+ * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
+ * This line will be removed in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+ return 0;
}
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
{
- ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
- return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN);
+ DEBUGLOG(4, "ZSTD_initCStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
+ return 0;
}
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
{
- U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
- ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
- return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize);
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_srcSize");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
+ return 0;
}
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
{
DEBUGLOG(4, "ZSTD_initCStream");
- return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
+ return 0;
}
/*====== Compression ======*/
@@ -3847,10 +3963,10 @@ static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
* internal function for all *compressStream*() variants
* non-static, because can be called from zstdmt_compress.c
* @return : hint size for next input */
-size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
- ZSTD_outBuffer* output,
- ZSTD_inBuffer* input,
- ZSTD_EndDirective const flushMode)
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective const flushMode)
{
const char* const istart = (const char*)input->src;
const char* const iend = istart + input->size;
@@ -3873,8 +3989,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
switch(zcs->streamStage)
{
case zcss_init:
- /* call ZSTD_initCStream() first ! */
- return ERROR(init_missing);
+ RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
case zcss_load:
if ( (flushMode == ZSTD_e_end)
@@ -3884,7 +3999,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
size_t const cSize = ZSTD_compressEnd(zcs,
op, oend-op, ip, iend-ip);
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
- if (ZSTD_isError(cSize)) return cSize;
+ FORWARD_IF_ERROR(cSize);
ip = iend;
op += cSize;
zcs->frameEnded = 1;
@@ -3925,7 +4040,7 @@ size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
zcs->inBuff + zcs->inToCompress, iSize) :
ZSTD_compressContinue(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize);
- if (ZSTD_isError(cSize)) return cSize;
+ FORWARD_IF_ERROR(cSize);
zcs->frameEnded = lastBlock;
/* prepare next block */
zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
@@ -4001,7 +4116,7 @@ static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
- CHECK_F( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
+ FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
return ZSTD_nextInputSizeHint_MTorST(zcs);
}
@@ -4013,14 +4128,15 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
{
DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
/* check conditions */
- if (output->pos > output->size) return ERROR(GENERIC);
- if (input->pos > input->size) return ERROR(GENERIC);
+ RETURN_ERROR_IF(output->pos > output->size, GENERIC);
+ RETURN_ERROR_IF(input->pos > input->size, GENERIC);
assert(cctx!=NULL);
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
ZSTD_CCtx_params params = cctx->requestedParams;
ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+ FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */
memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
@@ -4039,11 +4155,11 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
params.nbWorkers);
cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
- if (cctx->mtctx == NULL) return ERROR(memory_allocation);
+ RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
}
/* mt compression */
DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
- CHECK_F( ZSTDMT_initCStream_internal(
+ FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
cctx->mtctx,
prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
@@ -4051,7 +4167,7 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
cctx->appliedParams.nbWorkers = params.nbWorkers;
} else
#endif
- { CHECK_F( ZSTD_resetCStream_internal(cctx,
+ { FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
cctx->cdict,
params, cctx->pledgedSrcSizePlusOne-1) );
@@ -4063,20 +4179,30 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
/* compression stage */
#ifdef ZSTD_MULTITHREAD
if (cctx->appliedParams.nbWorkers > 0) {
+ int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
+ size_t flushMin;
+ assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
if (cctx->cParamsChanged) {
ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
cctx->cParamsChanged = 0;
}
- { size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
+ do {
+ flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
if ( ZSTD_isError(flushMin)
|| (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
}
- DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
- return flushMin;
- } }
+ FORWARD_IF_ERROR(flushMin);
+ } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
+ DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
+ /* Either we don't require maximum forward progress, we've finished the
+ * flush, or we are out of output space.
+ */
+ assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
+ return flushMin;
+ }
#endif
- CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
+ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
DEBUGLOG(5, "completed ZSTD_compressStream2");
return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
}
@@ -4107,10 +4233,10 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
dst, dstCapacity, &oPos,
src, srcSize, &iPos,
ZSTD_e_end);
- if (ZSTD_isError(result)) return result;
+ FORWARD_IF_ERROR(result);
if (result != 0) { /* compression not completed, due to lack of output space */
assert(oPos == dstCapacity);
- return ERROR(dstSize_tooSmall);
+ RETURN_ERROR(dstSize_tooSmall);
}
assert(iPos == srcSize); /* all input is expected consumed */
return oPos;
@@ -4132,7 +4258,7 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = { NULL, 0, 0 };
size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
- CHECK_F( remainingToFlush );
+ FORWARD_IF_ERROR( remainingToFlush );
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
/* single thread mode : attempt to calculate remaining to flush more precisely */
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
@@ -4151,7 +4277,7 @@ int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
-{ /* "default" - guarantees a monotonically increasing memory budget */
+{ /* "default" - for any srcSize > 256 KB */
/* W, C, H, S, L, TL, strat */
{ 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
{ 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
@@ -4258,13 +4384,13 @@ static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEV
};
/*! ZSTD_getCParams() :
-* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
-* Size values are optional, provide 0 if not known or unused */
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Size values are optional, provide 0 if not known or unused */
ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
{
size_t const addedSize = srcSizeHint ? 0 : 500;
- U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
- U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
+ U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN; /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */
+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
int row = compressionLevel;
DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
@@ -4272,13 +4398,14 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
{ ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */
- return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); /* refine parameters based on srcSize & dictSize */
}
}
/*! ZSTD_getParams() :
-* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
-* All fields of `ZSTD_frameParameters` are set to default (0) */
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
diff --git a/thirdparty/zstd/compress/zstd_compress_internal.h b/thirdparty/zstd/compress/zstd_compress_internal.h
index 29bca59859..cc3cbb9da9 100644
--- a/thirdparty/zstd/compress/zstd_compress_internal.h
+++ b/thirdparty/zstd/compress/zstd_compress_internal.h
@@ -36,9 +36,9 @@ extern "C" {
#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted".
It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
It's not a big deal though : candidate will just be sorted again.
- Additionnally, candidate position 1 will be lost.
+ Additionally, candidate position 1 will be lost.
But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
- The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy
Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
@@ -55,6 +55,14 @@ typedef struct ZSTD_prefixDict_s {
} ZSTD_prefixDict;
typedef struct {
+ void* dictBuffer;
+ void const* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+ ZSTD_CDict* cdict;
+} ZSTD_localDict;
+
+typedef struct {
U32 CTable[HUF_CTABLE_SIZE_U32(255)];
HUF_repeat repeatMode;
} ZSTD_hufCTables_t;
@@ -107,6 +115,7 @@ typedef struct {
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
+ ZSTD_literalCompressionMode_e literalCompressionMode;
} optState_t;
typedef struct {
@@ -188,6 +197,7 @@ struct ZSTD_CCtx_params_s {
* 1<<wLog, even for dictionary */
ZSTD_dictAttachPref_e attachDictPref;
+ ZSTD_literalCompressionMode_e literalCompressionMode;
/* Multithreading: used to pass parameters to mtctx */
int nbWorkers;
@@ -243,7 +253,7 @@ struct ZSTD_CCtx_s {
U32 frameEnded;
/* Dictionary */
- ZSTD_CDict* cdictLocal;
+ ZSTD_localDict localDict;
const ZSTD_CDict* cdict;
ZSTD_prefixDict prefixDict; /* single-usage dictionary */
@@ -806,13 +816,6 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
void ZSTD_resetSeqStore(seqStore_t* ssPtr);
-/*! ZSTD_compressStream_generic() :
- * Private use only. To be called from zstdmt_compress.c in single-thread mode. */
-size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
- ZSTD_outBuffer* output,
- ZSTD_inBuffer* input,
- ZSTD_EndDirective const flushMode);
-
/*! ZSTD_getCParamsFromCDict() :
* as the name implies */
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
@@ -839,7 +842,7 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
/* ZSTD_writeLastEmptyBlock() :
* output an empty Block with end-of-frame mark to complete a frame
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
- * or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
*/
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
diff --git a/thirdparty/zstd/compress/zstd_fast.c b/thirdparty/zstd/compress/zstd_fast.c
index 40ba0f73e6..ed997b441c 100644
--- a/thirdparty/zstd/compress/zstd_fast.c
+++ b/thirdparty/zstd/compress/zstd_fast.c
@@ -45,7 +45,155 @@ FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_fast_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
- U32 const mls, ZSTD_dictMode_e const dictMode)
+ U32 const mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* anchor = istart;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ /* init */
+ ip0 += (ip0 == prefixStart);
+ ip1 = ip0 + 1;
+ {
+ U32 const maxRep = (U32)(ip0 - prefixStart);
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+
+ /* Main Search Loop */
+ while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
+ size_t mLength;
+ BYTE const* ip2 = ip0 + 2;
+ size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
+ U32 const val0 = MEM_read32(ip0);
+ size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
+ U32 const val1 = MEM_read32(ip1);
+ U32 const current0 = (U32)(ip0-base);
+ U32 const current1 = (U32)(ip1-base);
+ U32 const matchIndex0 = hashTable[h0];
+ U32 const matchIndex1 = hashTable[h1];
+ BYTE const* repMatch = ip2-offset_1;
+ const BYTE* match0 = base + matchIndex0;
+ const BYTE* match1 = base + matchIndex1;
+ U32 offcode;
+ hashTable[h0] = current0; /* update hash table */
+ hashTable[h1] = current1; /* update hash table */
+
+ assert(ip0 + 1 == ip1);
+
+ if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
+ mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
+ ip0 = ip2 - mLength;
+ match0 = repMatch - mLength;
+ offcode = 0;
+ goto _match;
+ }
+ if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
+ /* found a regular match */
+ goto _offset;
+ }
+ if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
+ /* found a regular match after one literal */
+ ip0 = ip1;
+ match0 = match1;
+ goto _offset;
+ }
+ {
+ size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
+ assert(step >= 2);
+ ip0 += step;
+ ip1 += step;
+ continue;
+ }
+_offset: /* Requires: ip0, match0 */
+ /* Compute the offset code */
+ offset_2 = offset_1;
+ offset_1 = (U32)(ip0-match0);
+ offcode = offset_1 + ZSTD_REP_MOVE;
+ mLength = 0;
+ /* Count the backwards match length */
+ while (((ip0>anchor) & (match0>prefixStart))
+ && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
+
+_match: /* Requires: ip0, match0, offcode */
+ /* Count the forward length */
+ mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
+ ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
+ /* match found */
+ ip0 += mLength;
+ anchor = ip0;
+ ip1 = ip0 + 1;
+
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+ while ( (ip0 <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += rLength;
+ ip1 = ip0 + 1;
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+ anchor = ip0;
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
+ }
+ }
+ }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32 const mls = cParams->minMatch;
+ assert(ms->dictMatchState == NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
+ case 5 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
+ case 6 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
+ case 7 :
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
@@ -64,46 +212,26 @@ size_t ZSTD_compressBlock_fast_generic(
U32 offsetSaved = 0;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
- const ZSTD_compressionParameters* const dictCParams =
- dictMode == ZSTD_dictMatchState ?
- &dms->cParams : NULL;
- const U32* const dictHashTable = dictMode == ZSTD_dictMatchState ?
- dms->hashTable : NULL;
- const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
- dms->window.dictLimit : 0;
- const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
- dms->window.base : NULL;
- const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
- dictBase + dictStartIndex : NULL;
- const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
- dms->window.nextSrc : NULL;
- const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
- prefixStartIndex - (U32)(dictEnd - dictBase) :
- 0;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
+ const U32* const dictHashTable = dms->hashTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
- const U32 dictHLog = dictMode == ZSTD_dictMatchState ?
- dictCParams->hashLog : hlog;
-
- assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
+ const U32 dictHLog = dictCParams->hashLog;
/* otherwise, we would get index underflow when translating a dict index
* into a local index */
- assert(dictMode != ZSTD_dictMatchState
- || prefixStartIndex >= (U32)(dictEnd - dictBase));
+ assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
/* init */
ip += (dictAndPrefixLength == 0);
- if (dictMode == ZSTD_noDict) {
- U32 const maxRep = (U32)(ip - prefixStart);
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
- }
- if (dictMode == ZSTD_dictMatchState) {
- /* dictMatchState repCode checks don't currently handle repCode == 0
- * disabling. */
- assert(offset_1 <= dictAndPrefixLength);
- assert(offset_2 <= dictAndPrefixLength);
- }
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
/* Main Search Loop */
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
@@ -113,50 +241,37 @@ size_t ZSTD_compressBlock_fast_generic(
U32 const matchIndex = hashTable[h];
const BYTE* match = base + matchIndex;
const U32 repIndex = current + 1 - offset_1;
- const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
- && repIndex < prefixStartIndex) ?
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashTable[h] = current; /* update hash table */
- if ( (dictMode == ZSTD_dictMatchState)
- && ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
- } else if ( dictMode == ZSTD_noDict
- && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
- mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
- ip++;
- ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
} else if ( (matchIndex <= prefixStartIndex) ) {
- if (dictMode == ZSTD_dictMatchState) {
- size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
- U32 const dictMatchIndex = dictHashTable[dictHash];
- const BYTE* dictMatch = dictBase + dictMatchIndex;
- if (dictMatchIndex <= dictStartIndex ||
- MEM_read32(dictMatch) != MEM_read32(ip)) {
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
- } else {
- /* found a dict match */
- U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
- mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
- while (((ip>anchor) & (dictMatch>dictStart))
- && (ip[-1] == dictMatch[-1])) {
- ip--; dictMatch--; mLength++;
- } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
- ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
- }
- } else {
+ size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+ U32 const dictMatchIndex = dictHashTable[dictHash];
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex <= dictStartIndex ||
+ MEM_read32(dictMatch) != MEM_read32(ip)) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
+ } else {
+ /* found a dict match */
+ U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+ while (((ip>anchor) & (dictMatch>dictStart))
+ && (ip[-1] == dictMatch[-1])) {
+ ip--; dictMatch--; mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
}
} else if (MEM_read32(match) != MEM_read32(ip)) {
/* it's not a match, and we're not going to check the dictionary */
@@ -185,41 +300,27 @@ size_t ZSTD_compressBlock_fast_generic(
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
- if (dictMode == ZSTD_dictMatchState) {
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
- const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
- dictBase - dictIndexDelta + repIndex2 :
- base + repIndex2;
- if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
- const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
- U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
- ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
}
+ break;
}
-
- if (dictMode == ZSTD_noDict) {
- while ( (ip <= ilimit)
- && ( (offset_2>0)
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
- /* store sequence */
- size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
- U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
- ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
- ip += rLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- } } } }
+ }
+ }
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
@@ -229,28 +330,6 @@ size_t ZSTD_compressBlock_fast_generic(
return iend - anchor;
}
-
-size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize)
-{
- ZSTD_compressionParameters const* cParams = &ms->cParams;
- U32 const mls = cParams->minMatch;
- assert(ms->dictMatchState == NULL);
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
- case 5 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
- case 6 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
- case 7 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
- }
-}
-
size_t ZSTD_compressBlock_fast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
@@ -262,13 +341,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
{
default: /* includes case 3 */
case 4 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
case 5 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
case 6 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
case 7 :
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
}
}
diff --git a/thirdparty/zstd/compress/zstd_lazy.h b/thirdparty/zstd/compress/zstd_lazy.h
index ef85a6df9c..bb1763069f 100644
--- a/thirdparty/zstd/compress/zstd_lazy.h
+++ b/thirdparty/zstd/compress/zstd_lazy.h
@@ -19,7 +19,7 @@ extern "C" {
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
-void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
size_t ZSTD_compressBlock_btlazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
diff --git a/thirdparty/zstd/compress/zstd_ldm.c b/thirdparty/zstd/compress/zstd_ldm.c
index 58eb2ffe4d..784d20f3ab 100644
--- a/thirdparty/zstd/compress/zstd_ldm.c
+++ b/thirdparty/zstd/compress/zstd_ldm.c
@@ -429,7 +429,7 @@ size_t ZSTD_ldm_generateSequences(
*/
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
/* The input could be very large (in zstdmt), so it must be broken up into
- * chunks to enforce the maximmum distance and handle overflow correction.
+ * chunks to enforce the maximum distance and handle overflow correction.
*/
assert(sequences->pos <= sequences->size);
assert(sequences->size <= sequences->capacity);
diff --git a/thirdparty/zstd/compress/zstd_opt.c b/thirdparty/zstd/compress/zstd_opt.c
index 44de6e97fd..efb69d3267 100644
--- a/thirdparty/zstd/compress/zstd_opt.c
+++ b/thirdparty/zstd/compress/zstd_opt.c
@@ -64,9 +64,15 @@ MEM_STATIC double ZSTD_fCost(U32 price)
}
#endif
+static int ZSTD_compressedLiterals(optState_t const* const optPtr)
+{
+ return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
+}
+
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
{
- optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
@@ -99,6 +105,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
const BYTE* const src, size_t const srcSize,
int const optLevel)
{
+ int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
optPtr->priceType = zop_dynamic;
@@ -113,9 +120,10 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
/* huffman table presumed generated by dictionary */
optPtr->priceType = zop_dynamic;
- assert(optPtr->litFreq != NULL);
- optPtr->litSum = 0;
- { unsigned lit;
+ if (compressedLiterals) {
+ unsigned lit;
+ assert(optPtr->litFreq != NULL);
+ optPtr->litSum = 0;
for (lit=0; lit<=MaxLit; lit++) {
U32 const scaleLog = 11; /* scale to 2K */
U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
@@ -163,10 +171,11 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
} else { /* not a dictionary */
assert(optPtr->litFreq != NULL);
- { unsigned lit = MaxLit;
+ if (compressedLiterals) {
+ unsigned lit = MaxLit;
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
}
- optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
{ unsigned ll;
for (ll=0; ll<=MaxLL; ll++)
@@ -190,7 +199,8 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
} else { /* new block : re-use previous statistics, scaled down */
- optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
+ if (compressedLiterals)
+ optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
@@ -207,6 +217,10 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
int optLevel)
{
if (litLength == 0) return 0;
+
+ if (!ZSTD_compressedLiterals(optPtr))
+ return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
+
if (optPtr->priceType == zop_predef)
return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
@@ -310,7 +324,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
U32 offsetCode, U32 matchLength)
{
/* literals */
- { U32 u;
+ if (ZSTD_compressedLiterals(optPtr)) {
+ U32 u;
for (u=0; u < litLength; u++)
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
@@ -870,7 +885,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
/* large match -> immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
U32 const maxOffset = matches[nbMatches-1].off;
- DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new serie",
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
if (maxML > sufficient_len) {
@@ -1108,7 +1123,8 @@ static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
/* used in 2-pass strategy */
MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
{
- optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
@@ -1117,7 +1133,7 @@ MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
/* ZSTD_initStats_ultra():
* make a first compression pass, just to seed stats with more accurate starting values.
* only works on first block, with no dictionary and no ldm.
- * this function cannot error, hence its constract must be respected.
+ * this function cannot error, hence its contract must be respected.
*/
static void
ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
diff --git a/thirdparty/zstd/compress/zstdmt_compress.c b/thirdparty/zstd/compress/zstdmt_compress.c
index 2cbd6ffade..38fbb90768 100644
--- a/thirdparty/zstd/compress/zstdmt_compress.c
+++ b/thirdparty/zstd/compress/zstdmt_compress.c
@@ -22,6 +22,7 @@
/* ====== Dependencies ====== */
#include <string.h> /* memcpy, memset */
#include <limits.h> /* INT_MAX, UINT_MAX */
+#include "mem.h" /* MEM_STATIC */
#include "pool.h" /* threadpool */
#include "threading.h" /* mutex */
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
@@ -456,7 +457,7 @@ typedef struct {
* Must be acquired after the main mutex when acquiring both.
*/
ZSTD_pthread_mutex_t ldmWindowMutex;
- ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is udpated */
+ ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
} serialState_t;
@@ -647,7 +648,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
buffer_t dstBuff = job->dstBuff;
size_t lastCBlockSize = 0;
- /* ressources */
+ /* resources */
if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
dstBuff = ZSTDMT_getBuffer(job->bufPool);
@@ -672,7 +673,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} else { /* srcStart points at reloaded section */
U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
- { size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
+ { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
}
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
@@ -864,14 +865,10 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
* Internal use only */
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
{
- if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
- params->nbWorkers = nbWorkers;
- params->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
- params->jobSize = 0;
- return nbWorkers;
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
}
-ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
{
ZSTDMT_CCtx* mtctx;
U32 nbJobs = nbWorkers + 2;
@@ -906,6 +903,17 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
return mtctx;
}
+ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+#ifdef ZSTD_MULTITHREAD
+ return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
+#else
+ (void)nbWorkers;
+ (void)cMem;
+ return NULL;
+#endif
+}
+
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
{
return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
@@ -986,26 +994,13 @@ ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
{
case ZSTDMT_p_jobSize :
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
- if ( value != 0 /* default */
- && value < ZSTDMT_JOBSIZE_MIN)
- value = ZSTDMT_JOBSIZE_MIN;
- assert(value >= 0);
- if (value > ZSTDMT_JOBSIZE_MAX) value = ZSTDMT_JOBSIZE_MAX;
- params->jobSize = value;
- return value;
-
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
case ZSTDMT_p_overlapLog :
DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
- if (value < ZSTD_OVERLAPLOG_MIN) value = ZSTD_OVERLAPLOG_MIN;
- if (value > ZSTD_OVERLAPLOG_MAX) value = ZSTD_OVERLAPLOG_MAX;
- params->overlapLog = value;
- return value;
-
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
case ZSTDMT_p_rsyncable :
- value = (value != 0);
- params->rsyncable = value;
- return value;
-
+ DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
+ return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
default :
return ERROR(parameter_unsupported);
}
@@ -1021,32 +1016,29 @@ size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
{
switch (parameter) {
case ZSTDMT_p_jobSize:
- assert(mtctx->params.jobSize <= INT_MAX);
- *value = (int)(mtctx->params.jobSize);
- break;
+ return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
case ZSTDMT_p_overlapLog:
- *value = mtctx->params.overlapLog;
- break;
+ return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
case ZSTDMT_p_rsyncable:
- *value = mtctx->params.rsyncable;
- break;
+ return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
default:
return ERROR(parameter_unsupported);
}
- return 0;
}
/* Sets parameters relevant to the compression job,
* initializing others to default values. */
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
{
- ZSTD_CCtx_params jobParams;
- memset(&jobParams, 0, sizeof(jobParams));
-
- jobParams.cParams = params.cParams;
- jobParams.fParams = params.fParams;
- jobParams.compressionLevel = params.compressionLevel;
-
+ ZSTD_CCtx_params jobParams = params;
+ /* Clear parameters related to multithreading */
+ jobParams.forceWindow = 0;
+ jobParams.nbWorkers = 0;
+ jobParams.jobSize = 0;
+ jobParams.overlapLog = 0;
+ jobParams.rsyncable = 0;
+ memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
+ memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
return jobParams;
}
@@ -1056,7 +1048,7 @@ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
{
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
- CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
+ FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
@@ -1263,7 +1255,7 @@ static size_t ZSTDMT_compress_advanced_internal(
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
return ERROR(memory_allocation);
- CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
+ FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
{ unsigned u;
for (u=0; u<nbJobs; u++) {
@@ -1396,7 +1388,7 @@ size_t ZSTDMT_initCStream_internal(
/* init */
if (params.nbWorkers != mtctx->params.nbWorkers)
- CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
+ FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
@@ -1547,7 +1539,7 @@ size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
/* ZSTDMT_writeLastEmptyBlock()
* Write a single empty block with an end-of-frame to finish a frame.
* Job must be created from streaming variant.
- * This function is always successfull if expected conditions are fulfilled.
+ * This function is always successful if expected conditions are fulfilled.
*/
static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
{
@@ -1987,7 +1979,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
assert(input->pos <= input->size);
if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
- return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
+ return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
}
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
@@ -2051,7 +2043,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
size_t const jobSize = mtctx->inBuff.filled;
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
- CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
+ FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
}
/* check for potential compressed data ready to be flushed */
@@ -2065,7 +2057,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
- CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
+ FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
/* recommended next input size : fill current input buffer */
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
@@ -2082,7 +2074,7 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* ou
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
(U32)srcSize, (U32)endFrame);
- CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
+ FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
}
/* check if there is any data available to flush */
diff --git a/thirdparty/zstd/compress/zstdmt_compress.h b/thirdparty/zstd/compress/zstdmt_compress.h
index ee771681fa..12e6bcb3a3 100644
--- a/thirdparty/zstd/compress/zstdmt_compress.h
+++ b/thirdparty/zstd/compress/zstdmt_compress.h
@@ -17,10 +17,25 @@
/* Note : This is an internal API.
- * Some methods are still exposed (ZSTDLIB_API),
+ * These APIs used to be exposed with ZSTDLIB_API,
* because it used to be the only way to invoke MT compression.
- * Now, it's recommended to use ZSTD_compress_generic() instead.
- * These methods will stop being exposed in a future version */
+ * Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
+ * instead.
+ *
+ * If you depend on these APIs and can't switch, then define
+ * ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
+ * However, we may completely remove these functions in a future
+ * release, so please switch soon.
+ *
+ * This API requires ZSTD_MULTITHREAD to be defined during compilation,
+ * otherwise ZSTDMT_createCCtx*() will fail.
+ */
+
+#ifdef ZSTD_LEGACY_MULTITHREADED_API
+# define ZSTDMT_API ZSTDLIB_API
+#else
+# define ZSTDMT_API
+#endif
/* === Dependencies === */
#include <stddef.h> /* size_t */
@@ -40,17 +55,19 @@
/* === Memory management === */
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
-ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
-ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
+/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
+ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
ZSTD_customMem cMem);
-ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
-ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
/* === Simple one-pass compression function === */
-ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
@@ -59,31 +76,31 @@ ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
/* === Streaming functions === */
-ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
-ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
+ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
+ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
-ZSTDLIB_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
-ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
+ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
/* === Advanced functions and parameters === */
-ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict,
- ZSTD_parameters params,
- int overlapLog);
+ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict,
+ ZSTD_parameters params,
+ int overlapLog);
-ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
ZSTD_parameters params,
unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
-ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fparams,
unsigned long long pledgedSrcSize); /* note : zero means empty */
@@ -92,7 +109,7 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
typedef enum {
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
- ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+ ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
ZSTDMT_p_rsyncable /* Enables rsyncable mode. */
} ZSTDMT_parameter;
@@ -101,12 +118,12 @@ typedef enum {
* The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
+ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
/* ZSTDMT_getMTCtxParameter() :
* Query the ZSTDMT_CCtx for a parameter value.
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
-ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
+ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
/*! ZSTDMT_compressStream_generic() :
@@ -116,7 +133,7 @@ ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter
* 0 if fully flushed
* or an error code
* note : needs to be init using any ZSTD_initCStream*() variant */
-ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp);
diff --git a/thirdparty/zstd/decompress/zstd_ddict.c b/thirdparty/zstd/decompress/zstd_ddict.c
index 2ad0440684..0af3d23bfe 100644
--- a/thirdparty/zstd/decompress/zstd_ddict.c
+++ b/thirdparty/zstd/decompress/zstd_ddict.c
@@ -105,9 +105,9 @@ ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
/* load entropy tables */
- CHECK_E( ZSTD_loadDEntropy(&ddict->entropy,
- ddict->dictContent, ddict->dictSize),
- dictionary_corrupted );
+ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
+ &ddict->entropy, ddict->dictContent, ddict->dictSize)),
+ dictionary_corrupted);
ddict->entropyPresent = 1;
return 0;
}
@@ -133,7 +133,7 @@ static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
/* parse dictionary content */
- CHECK_F( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
+ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) );
return 0;
}
diff --git a/thirdparty/zstd/decompress/zstd_decompress.c b/thirdparty/zstd/decompress/zstd_decompress.c
index feef1ef67a..675596f5aa 100644
--- a/thirdparty/zstd/decompress/zstd_decompress.c
+++ b/thirdparty/zstd/decompress/zstd_decompress.c
@@ -106,6 +106,7 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
dctx->ddictLocal = NULL;
dctx->dictEnd = NULL;
dctx->ddictIsCold = 0;
+ dctx->dictUses = ZSTD_dont_use;
dctx->inBuff = NULL;
dctx->inBuffSize = 0;
dctx->outBuffSize = 0;
@@ -147,13 +148,20 @@ ZSTD_DCtx* ZSTD_createDCtx(void)
return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
}
+static void ZSTD_clearDict(ZSTD_DCtx* dctx)
+{
+ ZSTD_freeDDict(dctx->ddictLocal);
+ dctx->ddictLocal = NULL;
+ dctx->ddict = NULL;
+ dctx->dictUses = ZSTD_dont_use;
+}
+
size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
{
if (dctx==NULL) return 0; /* support free on NULL */
- if (dctx->staticSize) return ERROR(memory_allocation); /* not compatible with static DCtx */
+ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
{ ZSTD_customMem const cMem = dctx->customMem;
- ZSTD_freeDDict(dctx->ddictLocal);
- dctx->ddictLocal = NULL;
+ ZSTD_clearDict(dctx);
ZSTD_free(dctx->inBuff, cMem);
dctx->inBuff = NULL;
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
@@ -203,7 +211,7 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size)
static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
{
size_t const minInputSize = ZSTD_startingInputLength(format);
- if (srcSize < minInputSize) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong);
{ BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
U32 const dictID= fhd & 3;
@@ -238,7 +246,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
if (srcSize < minInputSize) return minInputSize;
- if (src==NULL) return ERROR(GENERIC); /* invalid parameter */
+ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
if ( (format != ZSTD_f_zstd1_magicless)
&& (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
@@ -251,7 +259,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
zfhPtr->frameType = ZSTD_skippableFrame;
return 0;
}
- return ERROR(prefix_unknown);
+ RETURN_ERROR(prefix_unknown);
}
/* ensure there is enough `srcSize` to fully read/decode frame header */
@@ -269,14 +277,13 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
U64 windowSize = 0;
U32 dictID = 0;
U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
- if ((fhdByte & 0x08) != 0)
- return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
+ RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
+ "reserved bits, must be zero");
if (!singleSegment) {
BYTE const wlByte = ip[pos++];
U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
- if (windowLog > ZSTD_WINDOWLOG_MAX)
- return ERROR(frameParameter_windowTooLarge);
+ RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge);
windowSize = (1ULL << windowLog);
windowSize += (windowSize >> 3) * (wlByte&7);
}
@@ -348,12 +355,11 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize)
size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
U32 sizeU32;
- if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
- return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong);
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
- if ((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32)
- return ERROR(frameParameter_unsupported);
+ RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
+ frameParameter_unsupported);
return skippableHeaderSize + sizeU32;
}
@@ -428,67 +434,124 @@ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t he
{
size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
if (ZSTD_isError(result)) return result; /* invalid header */
- if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
- if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
- return ERROR(dictionary_wrong);
+ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* Skip the dictID check in fuzzing mode, because it makes the search
+ * harder.
+ */
+ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
+ dictionary_wrong);
+#endif
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
return 0;
}
+static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ frameSizeInfo.compressedSize = ret;
+ frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+ return frameSizeInfo;
+}
-/** ZSTD_findFrameCompressedSize() :
- * compatible with legacy mode
- * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
- * `srcSize` must be at least as large as the frame contained
- * @return : the compressed size of the frame starting at `src` */
-size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if (ZSTD_isLegacy(src, srcSize))
- return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
+ return ZSTD_findFrameSizeInfoLegacy(src, srcSize);
#endif
- if ( (srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
- && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START ) {
- return readSkippableFrameSize(src, srcSize);
+
+ if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+ return frameSizeInfo;
} else {
const BYTE* ip = (const BYTE*)src;
const BYTE* const ipstart = ip;
size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
ZSTD_frameHeader zfh;
/* Extract Frame Header */
{ size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
- if (ZSTD_isError(ret)) return ret;
- if (ret > 0) return ERROR(srcSize_wrong);
+ if (ZSTD_isError(ret))
+ return ZSTD_errorFrameSizeInfo(ret);
+ if (ret > 0)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
}
ip += zfh.headerSize;
remainingSize -= zfh.headerSize;
- /* Loop on each block */
+ /* Iterate over each block */
while (1) {
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
- if (ZSTD_isError(cBlockSize)) return cBlockSize;
+ if (ZSTD_isError(cBlockSize))
+ return ZSTD_errorFrameSizeInfo(cBlockSize);
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
- return ERROR(srcSize_wrong);
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
ip += ZSTD_blockHeaderSize + cBlockSize;
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+ nbBlocks++;
if (blockProperties.lastBlock) break;
}
- if (zfh.checksumFlag) { /* Final frame content checksum */
- if (remainingSize < 4) return ERROR(srcSize_wrong);
+ /* Final frame content checksum */
+ if (zfh.checksumFlag) {
+ if (remainingSize < 4)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
ip += 4;
}
- return ip - ipstart;
+ frameSizeInfo.compressedSize = ip - ipstart;
+ frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
+ ? zfh.frameContentSize
+ : nbBlocks * zfh.blockSizeMax;
+ return frameSizeInfo;
}
}
+/** ZSTD_findFrameCompressedSize() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ return frameSizeInfo.compressedSize;
+}
+
+
+/** ZSTD_decompressBound() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the maximum decompressed size of the compressed source
+ */
+unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+{
+ unsigned long long bound = 0;
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+ return ZSTD_CONTENTSIZE_ERROR;
+ src = (const BYTE*)src + compressedSize;
+ srcSize -= compressedSize;
+ bound += decompressedBound;
+ }
+ return bound;
+}
/*-*************************************************************
@@ -522,9 +585,9 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
DEBUGLOG(5, "ZSTD_copyRawBlock");
if (dst == NULL) {
if (srcSize == 0) return 0;
- return ERROR(dstBuffer_null);
+ RETURN_ERROR(dstBuffer_null);
}
- if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall);
memcpy(dst, src, srcSize);
return srcSize;
}
@@ -535,9 +598,9 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
{
if (dst == NULL) {
if (regenSize == 0) return 0;
- return ERROR(dstBuffer_null);
+ RETURN_ERROR(dstBuffer_null);
}
- if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall);
memset(dst, b, regenSize);
return regenSize;
}
@@ -560,15 +623,16 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
/* check */
- if (remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(
+ remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN+ZSTD_blockHeaderSize,
+ srcSize_wrong);
/* Frame Header */
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_FRAMEHEADERSIZE_PREFIX);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
- if (remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize)
- return ERROR(srcSize_wrong);
- CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
+ RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
+ srcSize_wrong);
+ FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
}
@@ -581,7 +645,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
ip += ZSTD_blockHeaderSize;
remainingSrcSize -= ZSTD_blockHeaderSize;
- if (cBlockSize > remainingSrcSize) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong);
switch(blockProperties.blockType)
{
@@ -596,7 +660,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
break;
case bt_reserved :
default:
- return ERROR(corruption_detected);
+ RETURN_ERROR(corruption_detected);
}
if (ZSTD_isError(decodedSize)) return decodedSize;
@@ -609,15 +673,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
}
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
- if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
- return ERROR(corruption_detected);
- } }
+ RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
+ corruption_detected);
+ }
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
U32 checkRead;
- if (remainingSrcSize<4) return ERROR(checksum_wrong);
+ RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong);
checkRead = MEM_readLE32(ip);
- if (checkRead != checkCalc) return ERROR(checksum_wrong);
+ RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong);
ip += 4;
remainingSrcSize -= 4;
}
@@ -652,8 +716,8 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
size_t decodedSize;
size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
if (ZSTD_isError(frameSize)) return frameSize;
- /* legacy support is not compatible with static dctx */
- if (dctx->staticSize) return ERROR(memory_allocation);
+ RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
+ "legacy support is not compatible with static dctx");
decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
@@ -676,7 +740,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
if (ZSTD_isError(skippableSize))
return skippableSize;
- if (srcSize < skippableSize) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize < skippableSize, srcSize_wrong);
src = (const BYTE *)src + skippableSize;
srcSize -= skippableSize;
@@ -685,29 +749,29 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
if (ddict) {
/* we were called from ZSTD_decompress_usingDDict */
- CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict));
} else {
/* this will initialize correctly with no dict if dict == NULL, so
* use this in all cases but ddict */
- CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
}
ZSTD_checkContinuity(dctx, dst);
{ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
&src, &srcSize);
- if ( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
- && (moreThan1Frame==1) ) {
- /* at least one frame successfully completed,
- * but following bytes are garbage :
- * it's more likely to be a srcSize error,
- * specifying more bytes than compressed size of frame(s).
- * This error message replaces ERROR(prefix_unknown),
- * which would be confusing, as the first header is actually correct.
- * Note that one could be unlucky, it might be a corruption error instead,
- * happening right at the place where we expect zstd magic bytes.
- * But this is _much_ less likely than a srcSize field error. */
- return ERROR(srcSize_wrong);
- }
+ RETURN_ERROR_IF(
+ (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+ && (moreThan1Frame==1),
+ srcSize_wrong,
+ "at least one frame successfully completed, but following "
+ "bytes are garbage: it's more likely to be a srcSize error, "
+ "specifying more bytes than compressed size of frame(s). This "
+ "error message replaces ERROR(prefix_unknown), which would be "
+ "confusing, as the first header is actually correct. Note that "
+ "one could be unlucky, it might be a corruption error instead, "
+ "happening right at the place where we expect zstd magic "
+ "bytes. But this is _much_ less likely than a srcSize field "
+ "error.");
if (ZSTD_isError(res)) return res;
assert(res <= dstCapacity);
dst = (BYTE*)dst + res;
@@ -716,7 +780,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
moreThan1Frame = 1;
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
- if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
+ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
return (BYTE*)dst - (BYTE*)dststart;
}
@@ -730,9 +794,26 @@ size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
}
+static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
+{
+ switch (dctx->dictUses) {
+ default:
+ assert(0 /* Impossible */);
+ /* fall-through */
+ case ZSTD_dont_use:
+ ZSTD_clearDict(dctx);
+ return NULL;
+ case ZSTD_use_indefinitely:
+ return dctx->ddict;
+ case ZSTD_use_once:
+ dctx->dictUses = ZSTD_dont_use;
+ return dctx->ddict;
+ }
+}
+
size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
- return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+ return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
}
@@ -741,7 +822,7 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
size_t regenSize;
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
- if (dctx==NULL) return ERROR(memory_allocation);
+ RETURN_ERROR_IF(dctx==NULL, memory_allocation);
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
ZSTD_freeDCtx(dctx);
return regenSize;
@@ -791,8 +872,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
/* Sanity check */
- if (srcSize != dctx->expected)
- return ERROR(srcSize_wrong); /* not allowed */
+ RETURN_ERROR_IF(srcSize != dctx->expected, srcSize_wrong, "not allowed");
if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
switch (dctx->stage)
@@ -817,7 +897,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
case ZSTDds_decodeFrameHeader:
assert(src != NULL);
memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
- CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
dctx->expected = ZSTD_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
return 0;
@@ -867,7 +947,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
break;
case bt_reserved : /* should never happen */
default:
- return ERROR(corruption_detected);
+ RETURN_ERROR(corruption_detected);
}
if (ZSTD_isError(rSize)) return rSize;
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
@@ -876,10 +956,10 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
- if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
- if (dctx->decodedSize != dctx->fParams.frameContentSize) {
- return ERROR(corruption_detected);
- } }
+ RETURN_ERROR_IF(
+ dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && dctx->decodedSize != dctx->fParams.frameContentSize,
+ corruption_detected);
if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
dctx->expected = 4;
dctx->stage = ZSTDds_checkChecksum;
@@ -900,7 +980,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
{ U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
- if (check32 != h32) return ERROR(checksum_wrong);
+ RETURN_ERROR_IF(check32 != h32, checksum_wrong);
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
@@ -921,7 +1001,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
default:
assert(0); /* impossible */
- return ERROR(GENERIC); /* some compiler require default to do something */
+ RETURN_ERROR(GENERIC); /* some compiler require default to do something */
}
}
@@ -945,7 +1025,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
- if (dictSize <= 8) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted);
assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
dictPtr += 8; /* skip header = magic + dictID */
@@ -964,16 +1044,16 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
dictPtr, dictEnd - dictPtr,
workspace, workspaceSize);
#endif
- if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted);
dictPtr += hSize;
}
{ short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff, offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
- if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
- if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
- if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted);
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
ZSTD_buildFSETable( entropy->OFTable,
offcodeNCount, offcodeMaxValue,
OF_base, OF_bits,
@@ -984,9 +1064,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
- if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
- if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
- if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted);
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
ZSTD_buildFSETable( entropy->MLTable,
matchlengthNCount, matchlengthMaxValue,
ML_base, ML_bits,
@@ -997,9 +1077,9 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
- if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
- if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
- if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
+ RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted);
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
ZSTD_buildFSETable( entropy->LLTable,
litlengthNCount, litlengthMaxValue,
LL_base, LL_bits,
@@ -1007,12 +1087,13 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
dictPtr += litlengthHeaderSize;
}
- if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
{ int i;
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
for (i=0; i<3; i++) {
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
- if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(rep==0 || rep >= dictContentSize,
+ dictionary_corrupted);
entropy->rep[i] = rep;
} }
@@ -1030,7 +1111,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
/* load entropy tables */
{ size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
- if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted);
dict = (const char*)dict + eSize;
dictSize -= eSize;
}
@@ -1064,9 +1145,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
- CHECK_F( ZSTD_decompressBegin(dctx) );
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
if (dict && dictSize)
- CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
+ RETURN_ERROR_IF(
+ ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
+ dictionary_corrupted);
return 0;
}
@@ -1085,7 +1168,7 @@ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
DEBUGLOG(4, "DDict is %s",
dctx->ddictIsCold ? "~cold~" : "hot!");
}
- CHECK_F( ZSTD_decompressBegin(dctx) );
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) );
if (ddict) { /* NULL ddict is equivalent to no dictionary */
ZSTD_copyDDictParameters(dctx, ddict);
}
@@ -1104,7 +1187,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
}
/*! ZSTD_getDictID_fromFrame() :
- * Provides the dictID required to decompresse frame stored within `src`.
+ * Provides the dictID required to decompress frame stored within `src`.
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
* - The frame does not require a dictionary (most common case).
@@ -1176,15 +1259,14 @@ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
- if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
- ZSTD_freeDDict(dctx->ddictLocal);
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ ZSTD_clearDict(dctx);
if (dict && dictSize >= 8) {
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
- if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
- } else {
- dctx->ddictLocal = NULL;
+ RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation);
+ dctx->ddict = dctx->ddictLocal;
+ dctx->dictUses = ZSTD_use_indefinitely;
}
- dctx->ddict = dctx->ddictLocal;
return 0;
}
@@ -1200,7 +1282,9 @@ size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSi
size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
- return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType);
+ FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType));
+ dctx->dictUses = ZSTD_use_once;
+ return 0;
}
size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
@@ -1215,9 +1299,8 @@ size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSiz
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
{
DEBUGLOG(4, "ZSTD_initDStream_usingDict");
- zds->streamStage = zdss_init;
- zds->noForwardProgress = 0;
- CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
return ZSTD_FRAMEHEADERSIZE_PREFIX;
}
@@ -1225,7 +1308,7 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
DEBUGLOG(4, "ZSTD_initDStream");
- return ZSTD_initDStream_usingDict(zds, NULL, 0);
+ return ZSTD_initDStream_usingDDict(zds, NULL);
}
/* ZSTD_initDStream_usingDDict() :
@@ -1233,9 +1316,9 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
* this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
- size_t const initResult = ZSTD_initDStream(dctx);
- dctx->ddict = ddict;
- return initResult;
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) );
+ FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) );
+ return ZSTD_FRAMEHEADERSIZE_PREFIX;
}
/* ZSTD_resetDStream() :
@@ -1243,19 +1326,19 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
- DEBUGLOG(4, "ZSTD_resetDStream");
- dctx->streamStage = zdss_loadHeader;
- dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
- dctx->legacyVersion = 0;
- dctx->hostageByte = 0;
+ FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only));
return ZSTD_FRAMEHEADERSIZE_PREFIX;
}
size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
- if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
- dctx->ddict = ddict;
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ ZSTD_clearDict(dctx);
+ if (ddict) {
+ dctx->ddict = ddict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
return 0;
}
@@ -1267,9 +1350,9 @@ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
size_t const min = (size_t)1 << bounds.lowerBound;
size_t const max = (size_t)1 << bounds.upperBound;
- if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
- if (maxWindowSize < min) return ERROR(parameter_outOfBound);
- if (maxWindowSize > max) return ERROR(parameter_outOfBound);
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound);
+ RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound);
dctx->maxWindowSize = maxWindowSize;
return 0;
}
@@ -1311,15 +1394,15 @@ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
}
#define CHECK_DBOUNDS(p,v) { \
- if (!ZSTD_dParam_withinBounds(p, v)) \
- return ERROR(parameter_outOfBound); \
+ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound); \
}
size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
{
- if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
switch(dParam) {
case ZSTD_d_windowLogMax:
+ if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
dctx->maxWindowSize = ((size_t)1) << value;
return 0;
@@ -1329,19 +1412,20 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value
return 0;
default:;
}
- return ERROR(parameter_unsupported);
+ RETURN_ERROR(parameter_unsupported);
}
size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
{
if ( (reset == ZSTD_reset_session_only)
|| (reset == ZSTD_reset_session_and_parameters) ) {
- (void)ZSTD_initDStream(dctx);
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
- if (dctx->streamStage != zdss_init)
- return ERROR(stage_wrong);
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong);
+ ZSTD_clearDict(dctx);
dctx->format = ZSTD_f_zstd1;
dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
}
@@ -1360,7 +1444,8 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long
unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
size_t const minRBSize = (size_t) neededSize;
- if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
+ RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+ frameParameter_windowTooLarge);
return minRBSize;
}
@@ -1378,9 +1463,9 @@ size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
ZSTD_frameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
- if (err>0) return ERROR(srcSize_wrong);
- if (zfh.windowSize > windowSizeMax)
- return ERROR(frameParameter_windowTooLarge);
+ RETURN_ERROR_IF(err>0, srcSize_wrong);
+ RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
+ frameParameter_windowTooLarge);
return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
}
@@ -1406,16 +1491,16 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
- if (input->pos > input->size) { /* forbidden */
- DEBUGLOG(5, "in: pos: %u vs size: %u",
- (U32)input->pos, (U32)input->size);
- return ERROR(srcSize_wrong);
- }
- if (output->pos > output->size) { /* forbidden */
- DEBUGLOG(5, "out: pos: %u vs size: %u",
- (U32)output->pos, (U32)output->size);
- return ERROR(dstSize_tooSmall);
- }
+ RETURN_ERROR_IF(
+ input->pos > input->size,
+ srcSize_wrong,
+ "forbidden. in: pos: %u vs size: %u",
+ (U32)input->pos, (U32)input->size);
+ RETURN_ERROR_IF(
+ output->pos > output->size,
+ dstSize_tooSmall,
+ "forbidden. out: pos: %u vs size: %u",
+ (U32)output->pos, (U32)output->size);
DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
while (someMoreWork) {
@@ -1423,15 +1508,18 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
{
case zdss_init :
DEBUGLOG(5, "stage zdss_init => transparent reset ");
- ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
+ zds->streamStage = zdss_loadHeader;
+ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+ zds->legacyVersion = 0;
+ zds->hostageByte = 0;
/* fall-through */
case zdss_loadHeader :
DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
if (zds->legacyVersion) {
- /* legacy support is incompatible with static dctx */
- if (zds->staticSize) return ERROR(memory_allocation);
+ RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+ "legacy support is incompatible with static dctx");
{ size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
if (hint==0) zds->streamStage = zdss_init;
return hint;
@@ -1443,12 +1531,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
if (legacyVersion) {
- const void* const dict = zds->ddict ? ZSTD_DDict_dictContent(zds->ddict) : NULL;
- size_t const dictSize = zds->ddict ? ZSTD_DDict_dictSize(zds->ddict) : 0;
+ ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
+ const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
+ size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
- /* legacy support is incompatible with static dctx */
- if (zds->staticSize) return ERROR(memory_allocation);
- CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
+ RETURN_ERROR_IF(zds->staticSize, memory_allocation,
+ "legacy support is incompatible with static dctx");
+ FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
zds->previousLegacyVersion, legacyVersion,
dict, dictSize));
zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
@@ -1482,7 +1571,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
if (cSize <= (size_t)(iend-istart)) {
/* shortcut : using single-pass mode */
- size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict);
+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
ip = istart + cSize;
@@ -1495,13 +1584,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
/* Consume header (see ZSTDds_decodeFrameHeader) */
DEBUGLOG(4, "Consume header");
- CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)));
if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
zds->stage = ZSTDds_skipFrame;
} else {
- CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
zds->expected = ZSTD_blockHeaderSize;
zds->stage = ZSTDds_decodeBlockHeader;
}
@@ -1511,7 +1600,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
(U32)(zds->fParams.windowSize >>10),
(U32)(zds->maxWindowSize >> 10) );
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
- if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
+ RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+ frameParameter_windowTooLarge);
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
@@ -1525,14 +1615,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (zds->staticSize) { /* static DCtx */
DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
- if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
- return ERROR(memory_allocation);
+ RETURN_ERROR_IF(
+ bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
+ memory_allocation);
} else {
ZSTD_free(zds->inBuff, zds->customMem);
zds->inBuffSize = 0;
zds->outBuffSize = 0;
zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
- if (zds->inBuff == NULL) return ERROR(memory_allocation);
+ RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation);
}
zds->inBuffSize = neededInBuffSize;
zds->outBuff = zds->inBuff + zds->inBuffSize;
@@ -1574,7 +1665,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
- if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */
+ RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
+ corruption_detected,
+ "should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
}
ip += loadedSize;
@@ -1615,7 +1708,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
default:
assert(0); /* impossible */
- return ERROR(GENERIC); /* some compiler require default to do something */
+ RETURN_ERROR(GENERIC); /* some compiler require default to do something */
} }
/* result */
@@ -1624,8 +1717,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
- if (op==oend) return ERROR(dstSize_tooSmall);
- if (ip==iend) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(op==oend, dstSize_tooSmall);
+ RETURN_ERROR_IF(ip==iend, srcSize_wrong);
assert(0);
}
} else {
diff --git a/thirdparty/zstd/decompress/zstd_decompress_block.c b/thirdparty/zstd/decompress/zstd_decompress_block.c
index 32baad9fbb..a2a7eedcf2 100644
--- a/thirdparty/zstd/decompress/zstd_decompress_block.c
+++ b/thirdparty/zstd/decompress/zstd_decompress_block.c
@@ -56,14 +56,15 @@ static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
blockProperties_t* bpPtr)
{
- if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong);
+
{ U32 const cBlockHeader = MEM_readLE24(src);
U32 const cSize = cBlockHeader >> 3;
bpPtr->lastBlock = cBlockHeader & 1;
bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
bpPtr->origSize = cSize; /* only useful for RLE */
if (bpPtr->blockType == bt_rle) return 1;
- if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected);
return cSize;
}
}
@@ -78,7 +79,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
- if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected);
{ const BYTE* const istart = (const BYTE*) src;
symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
@@ -86,11 +87,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
switch(litEncType)
{
case set_repeat:
- if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
+ RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted);
/* fall-through */
case set_compressed:
- if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
{ size_t lhSize, litSize, litCSize;
U32 singleStream=0;
U32 const lhlCode = (istart[0] >> 2) & 3;
@@ -118,8 +119,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
litCSize = (lhc >> 22) + (istart[4] << 10);
break;
}
- if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
- if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
+ RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected);
/* prefetch huffman table if cold */
if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
@@ -157,7 +158,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
}
}
- if (HUF_isError(hufSuccess)) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
@@ -187,7 +188,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
}
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
- if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected);
memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
@@ -216,17 +217,17 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
case 3:
lhSize = 3;
litSize = MEM_readLE24(istart) >> 4;
- if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
break;
}
- if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected);
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
}
default:
- return ERROR(corruption_detected); /* impossible */
+ RETURN_ERROR(corruption_detected, "impossible");
}
}
}
@@ -436,8 +437,8 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
switch(type)
{
case set_rle :
- if (!srcSize) return ERROR(srcSize_wrong);
- if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(!srcSize, srcSize_wrong);
+ RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected);
{ U32 const symbol = *(const BYTE*)src;
U32 const baseline = baseValue[symbol];
U32 const nbBits = nbAdditionalBits[symbol];
@@ -449,7 +450,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
*DTablePtr = defaultTable;
return 0;
case set_repeat:
- if (!flagRepeatTable) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(!flagRepeatTable, corruption_detected);
/* prefetch FSE table if used */
if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
const void* const pStart = *DTablePtr;
@@ -461,15 +462,15 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb
{ unsigned tableLog;
S16 norm[MaxSeq+1];
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
- if (FSE_isError(headerSize)) return ERROR(corruption_detected);
- if (tableLog > maxLog) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected);
+ RETURN_ERROR_IF(tableLog > maxLog, corruption_detected);
ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
*DTablePtr = DTableSpace;
return headerSize;
}
- default : /* impossible */
+ default :
assert(0);
- return ERROR(GENERIC);
+ RETURN_ERROR(GENERIC, "impossible");
}
}
@@ -483,28 +484,28 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
/* check */
- if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong);
/* SeqHead */
nbSeq = *ip++;
if (!nbSeq) {
*nbSeqPtr=0;
- if (srcSize != 1) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize != 1, srcSize_wrong);
return 1;
}
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
- if (ip+2 > iend) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong);
nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
} else {
- if (ip >= iend) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(ip >= iend, srcSize_wrong);
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
}
}
*nbSeqPtr = nbSeq;
/* FSE table descriptors */
- if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
+ RETURN_ERROR_IF(ip+4 > iend, srcSize_wrong); /* minimum possible size */
{ symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
@@ -517,7 +518,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
- if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected);
ip += llhSize;
}
@@ -527,7 +528,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
- if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected);
ip += ofhSize;
}
@@ -537,7 +538,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq);
- if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected);
ip += mlhSize;
}
}
@@ -590,8 +591,8 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
const BYTE* match = oLitEnd - sequence.offset;
/* check */
- if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must fit within dstBuffer */
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* try to read beyond literal buffer */
+ RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "try to read beyond literal buffer");
/* copy literals */
while (op < oLitEnd) *op++ = *(*litPtr)++;
@@ -599,7 +600,7 @@ size_t ZSTD_execSequenceLast7(BYTE* op,
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base)) {
/* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - vBase),corruption_detected);
match = dictEnd - (base-match);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
@@ -631,8 +632,8 @@ size_t ZSTD_execSequence(BYTE* op,
const BYTE* match = oLitEnd - sequence.offset;
/* check */
- if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+ RETURN_ERROR_IF(oMatchEnd>oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* copy Literals */
@@ -645,8 +646,7 @@ size_t ZSTD_execSequence(BYTE* op,
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
- if (sequence.offset > (size_t)(oLitEnd - virtualStart))
- return ERROR(corruption_detected);
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected);
match = dictEnd + (match - prefixStart);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
@@ -712,8 +712,8 @@ size_t ZSTD_execSequenceLong(BYTE* op,
const BYTE* match = sequence.match;
/* check */
- if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
- if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
+ RETURN_ERROR_IF(oMatchEnd > oend, dstSize_tooSmall, "last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend");
+ RETURN_ERROR_IF(iLitEnd > litLimit, corruption_detected, "over-read beyond lit buffer");
if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
/* copy Literals */
@@ -726,7 +726,7 @@ size_t ZSTD_execSequenceLong(BYTE* op,
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
- if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - dictStart), corruption_detected);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
@@ -801,7 +801,7 @@ ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
* offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
* bits before reloading. This value is the maximum number of bytes we read
- * after reloading when we are decoding long offets.
+ * after reloading when we are decoding long offsets.
*/
#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
(ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
@@ -911,7 +911,9 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
seqState_t seqState;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected);
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
@@ -927,14 +929,14 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
- if (nbSeq) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(nbSeq, corruption_detected);
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
- if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
@@ -1066,7 +1068,9 @@ ZSTD_decompressSequencesLong_body(
seqState.pos = (size_t)(op-prefixStart);
seqState.dictEnd = dictEnd;
assert(iend >= ip);
- CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected);
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
@@ -1076,7 +1080,7 @@ ZSTD_decompressSequencesLong_body(
sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
}
- if (seqNb<seqAdvance) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected);
/* decode and decompress */
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
@@ -1087,7 +1091,7 @@ ZSTD_decompressSequencesLong_body(
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
- if (seqNb<nbSeq) return ERROR(corruption_detected);
+ RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected);
/* finish queue */
seqNb -= seqAdvance;
@@ -1103,7 +1107,7 @@ ZSTD_decompressSequencesLong_body(
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
- if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall);
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
@@ -1176,7 +1180,7 @@ ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
/* ZSTD_decompressSequencesLong() :
* decompression function triggered when a minimum share of offsets is considered "long",
* aka out of cache.
- * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes mearning "farther than memory cache distance".
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
* This function will try to mitigate main memory latency through the use of prefetching */
static size_t
ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
@@ -1240,7 +1244,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
- if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
+ RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong);
/* Decode literals section */
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
diff --git a/thirdparty/zstd/decompress/zstd_decompress_internal.h b/thirdparty/zstd/decompress/zstd_decompress_internal.h
index abd0030519..ccbdfa090f 100644
--- a/thirdparty/zstd/decompress/zstd_decompress_internal.h
+++ b/thirdparty/zstd/decompress/zstd_decompress_internal.h
@@ -89,6 +89,12 @@ typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
typedef enum { zdss_init=0, zdss_loadHeader,
zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+typedef enum {
+ ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
+ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
+ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
+} ZSTD_dictUses_e;
+
struct ZSTD_DCtx_s
{
const ZSTD_seqSymbol* LLTptr;
@@ -123,6 +129,7 @@ struct ZSTD_DCtx_s
const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
U32 dictID;
int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+ ZSTD_dictUses_e dictUses;
/* streaming */
ZSTD_dStreamStage streamStage;
diff --git a/thirdparty/zstd/zstd.h b/thirdparty/zstd/zstd.h
index b18fc8a44b..53470c18f3 100644
--- a/thirdparty/zstd/zstd.h
+++ b/thirdparty/zstd/zstd.h
@@ -70,8 +70,8 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
-#define ZSTD_VERSION_MINOR 3
-#define ZSTD_VERSION_RELEASE 8
+#define ZSTD_VERSION_MINOR 4
+#define ZSTD_VERSION_RELEASE 0
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< to check runtime library version */
@@ -90,6 +90,21 @@ ZSTDLIB_API const char* ZSTD_versionString(void); /* requires v1.3.0+ */
#endif
/***************************************
+* Constants
+***************************************/
+
+/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
+#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
+#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
+
+
+
+/***************************************
* Simple API
***************************************/
/*! ZSTD_compress() :
@@ -145,12 +160,21 @@ ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t
* @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+/*! ZSTD_findFrameCompressedSize() :
+ * `src` should point to the start of a ZSTD frame or skippable frame.
+ * `srcSize` must be >= first frame size
+ * @return : the compressed size of the first frame starting at `src`,
+ * suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
+ * or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
/*====== Helper functions ======*/
#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
@@ -195,279 +219,6 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
const void* src, size_t srcSize);
-/**************************
-* Simple dictionary API
-***************************/
-/*! ZSTD_compress_usingDict() :
- * Compression at an explicit compression level using a Dictionary.
- * A dictionary can be any arbitrary data segment (also called a prefix),
- * or a buffer with specified information (see dictBuilder/zdict.h).
- * Note : This function loads the dictionary, resulting in significant startup delay.
- * It's intended for a dictionary used only once.
- * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
- int compressionLevel);
-
-/*! ZSTD_decompress_usingDict() :
- * Decompression using a known Dictionary.
- * Dictionary must be identical to the one used during compression.
- * Note : This function loads the dictionary, resulting in significant startup delay.
- * It's intended for a dictionary used only once.
- * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize);
-
-
-/***********************************
- * Bulk processing dictionary API
- **********************************/
-typedef struct ZSTD_CDict_s ZSTD_CDict;
-
-/*! ZSTD_createCDict() :
- * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
- * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
- * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
- * Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
- * Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
-ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
- int compressionLevel);
-
-/*! ZSTD_freeCDict() :
- * Function frees memory allocated by ZSTD_createCDict(). */
-ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
-
-/*! ZSTD_compress_usingCDict() :
- * Compression using a digested Dictionary.
- * Recommended when same dictionary is used multiple times.
- * Note : compression level is _decided at dictionary creation time_,
- * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
-ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict);
-
-
-typedef struct ZSTD_DDict_s ZSTD_DDict;
-
-/*! ZSTD_createDDict() :
- * Create a digested dictionary, ready to start decompression operation without startup delay.
- * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
-ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
-
-/*! ZSTD_freeDDict() :
- * Function frees memory allocated with ZSTD_createDDict() */
-ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
-
-/*! ZSTD_decompress_usingDDict() :
- * Decompression using a digested Dictionary.
- * Recommended when same dictionary is used multiple times. */
-ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_DDict* ddict);
-
-
-/****************************
-* Streaming
-****************************/
-
-typedef struct ZSTD_inBuffer_s {
- const void* src; /**< start of input buffer */
- size_t size; /**< size of input buffer */
- size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
-} ZSTD_inBuffer;
-
-typedef struct ZSTD_outBuffer_s {
- void* dst; /**< start of output buffer */
- size_t size; /**< size of output buffer */
- size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
-} ZSTD_outBuffer;
-
-
-
-/*-***********************************************************************
-* Streaming compression - HowTo
-*
-* A ZSTD_CStream object is required to track streaming operation.
-* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
-* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
-* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
-*
-* For parallel execution, use one separate ZSTD_CStream per thread.
-*
-* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
-*
-* Parameters are sticky : when starting a new compression on the same context,
-* it will re-use the same sticky parameters as previous compression session.
-* When in doubt, it's recommended to fully initialize the context before usage.
-* Use ZSTD_initCStream() to set the parameter to a selected compression level.
-* Use advanced API (ZSTD_CCtx_setParameter(), etc.) to set more specific parameters.
-*
-* Use ZSTD_compressStream() as many times as necessary to consume input stream.
-* The function will automatically update both `pos` fields within `input` and `output`.
-* Note that the function may not consume the entire input,
-* for example, because the output buffer is already full,
-* in which case `input.pos < input.size`.
-* The caller must check if input has been entirely consumed.
-* If not, the caller must make some room to receive more compressed data,
-* and then present again remaining input data.
-* @return : a size hint, preferred nb of bytes to use as input for next function call
-* or an error code, which can be tested using ZSTD_isError().
-* Note 1 : it's just a hint, to help latency a little, any value will work fine.
-* Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
-*
-* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
-* using ZSTD_flushStream(). `output->pos` will be updated.
-* Note that, if `output->size` is too small, a single invocation of ZSTD_flushStream() might not be enough (return code > 0).
-* In which case, make some room to receive more compressed data, and call again ZSTD_flushStream().
-* @return : 0 if internal buffers are entirely flushed,
-* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
-* or an error code, which can be tested using ZSTD_isError().
-*
-* ZSTD_endStream() instructs to finish a frame.
-* It will perform a flush and write frame epilogue.
-* The epilogue is required for decoders to consider a frame completed.
-* flush() operation is the same, and follows same rules as ZSTD_flushStream().
-* @return : 0 if frame fully completed and fully flushed,
-* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
-* or an error code, which can be tested using ZSTD_isError().
-*
-* *******************************************************************/
-
-typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
- /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
-/*===== ZSTD_CStream management functions =====*/
-ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
-ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
-
-/*===== Streaming compression functions =====*/
-ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
-ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
-ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
-
-ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
-ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
-
-
-
-/*-***************************************************************************
-* Streaming decompression - HowTo
-*
-* A ZSTD_DStream object is required to track streaming operations.
-* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
-* ZSTD_DStream objects can be re-used multiple times.
-*
-* Use ZSTD_initDStream() to start a new decompression operation.
-* @return : recommended first input size
-* Alternatively, use advanced API to set specific properties.
-*
-* Use ZSTD_decompressStream() repetitively to consume your input.
-* The function will update both `pos` fields.
-* If `input.pos < input.size`, some input has not been consumed.
-* It's up to the caller to present again remaining data.
-* The function tries to flush all data decoded immediately, respecting output buffer size.
-* If `output.pos < output.size`, decoder has flushed everything it could.
-* But if `output.pos == output.size`, there might be some data left within internal buffers.,
-* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
-* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
-* @return : 0 when a frame is completely decoded and fully flushed,
-* or an error code, which can be tested using ZSTD_isError(),
-* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
-* the return value is a suggested next input size (just a hint for better latency)
-* that will never request more than the remaining frame size.
-* *******************************************************************************/
-
-typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
- /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
-/*===== ZSTD_DStream management functions =====*/
-ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
-ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
-
-/*===== Streaming decompression functions =====*/
-ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
-ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
-
-ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
-ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
-
-#endif /* ZSTD_H_235446 */
-
-
-
-
-/****************************************************************************************
- * ADVANCED AND EXPERIMENTAL FUNCTIONS
- ****************************************************************************************
- * The definitions in the following section are considered experimental.
- * They are provided for advanced scenarios.
- * They should never be used with a dynamic library, as prototypes may change in the future.
- * Use them only in association with static linking.
- * ***************************************************************************************/
-
-#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
-#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
-
-
-/****************************************************************************************
- * Candidate API for promotion to stable status
- ****************************************************************************************
- * The following symbols and constants form the "staging area" :
- * they are considered to join "stable API" by v1.4.0.
- * The proposal is written so that it can be made stable "as is",
- * though it's still possible to suggest improvements.
- * Staging is in fact last chance for changes,
- * the API is locked once reaching "stable" status.
- * ***************************************************************************************/
-
-
-/* === Constants === */
-
-/* all magic numbers are supposed read/written to/from files/memory using little-endian convention */
-#define ZSTD_MAGICNUMBER 0xFD2FB528 /* valid since v0.8.0 */
-#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* valid since v0.7.0 */
-#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50 /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
-#define ZSTD_MAGIC_SKIPPABLE_MASK 0xFFFFFFF0
-
-#define ZSTD_BLOCKSIZELOG_MAX 17
-#define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX)
-
-
-/* === query limits === */
-
-ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */
-
-
-/* === frame size === */
-
-/*! ZSTD_findFrameCompressedSize() :
- * `src` should point to the start of a ZSTD frame or skippable frame.
- * `srcSize` must be >= first frame size
- * @return : the compressed size of the first frame starting at `src`,
- * suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
- * or an error code if input is invalid */
-ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
-
-
-/* === Memory management === */
-
-/*! ZSTD_sizeof_*() :
- * These functions give the _current_ memory usage of selected object.
- * Note that object memory usage can evolve (increase or decrease) over time. */
-ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
-ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
-ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
-ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
-ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
-ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
-
-
/***************************************
* Advanced compression API
***************************************/
@@ -503,7 +254,10 @@ typedef enum { ZSTD_fast=1,
typedef enum {
- /* compression parameters */
+ /* compression parameters
+ * Note: When compressing with a ZSTD_CDict these parameters are superseded
+ * by the parameters used to construct the ZSTD_CDict. See ZSTD_CCtx_refCDict()
+ * for more info (superseded-by-cdict). */
ZSTD_c_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
@@ -625,6 +379,7 @@ typedef enum {
* ZSTD_c_format
* ZSTD_c_forceMaxWindow
* ZSTD_c_forceAttachDict
+ * ZSTD_c_literalCompressionMode
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change.
@@ -632,10 +387,10 @@ typedef enum {
ZSTD_c_experimentalParam1=500,
ZSTD_c_experimentalParam2=10,
ZSTD_c_experimentalParam3=1000,
- ZSTD_c_experimentalParam4=1001
+ ZSTD_c_experimentalParam4=1001,
+ ZSTD_c_experimentalParam5=1002,
} ZSTD_cParameter;
-
typedef struct {
size_t error;
int lowerBound;
@@ -677,63 +432,10 @@ ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param
* Note 3 : Whenever all input data is provided and consumed in a single round,
* for example with ZSTD_compress2(),
* or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
- * this value is automatically overriden by srcSize instead.
+ * this value is automatically overridden by srcSize instead.
*/
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
-/*! ZSTD_CCtx_loadDictionary() :
- * Create an internal CDict from `dict` buffer.
- * Decompression will have to use same dictionary.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
- * meaning "return to no-dictionary mode".
- * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
- * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
- * Note 2 : Loading a dictionary involves building tables.
- * It's also a CPU consuming operation, with non-negligible impact on latency.
- * Tables are dependent on compression parameters, and for this reason,
- * compression parameters can no longer be changed after loading a dictionary.
- * Note 3 :`dict` content will be copied internally.
- * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
- * In such a case, dictionary buffer must outlive its users.
- * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
- * to precisely select how dictionary content must be interpreted. */
-ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
-
-/*! ZSTD_CCtx_refCDict() :
- * Reference a prepared dictionary, to be used for all next compressed frames.
- * Note that compression parameters are enforced from within CDict,
- * and supercede any compression parameter previously set within CCtx.
- * The dictionary will remain valid for future compressed frames using same CCtx.
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- * Special : Referencing a NULL CDict means "return to no-dictionary mode".
- * Note 1 : Currently, only one dictionary can be managed.
- * Referencing a new dictionary effectively "discards" any previous one.
- * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
-ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
-
-/*! ZSTD_CCtx_refPrefix() :
- * Reference a prefix (single-usage dictionary) for next compressed frame.
- * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
- * Decompression will need same prefix to properly regenerate data.
- * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
- * but performs much faster, especially during decompression (compression speed is tunable with compression level).
- * @result : 0, or an error code (which can be tested with ZSTD_isError()).
- * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
- * Note 1 : Prefix buffer is referenced. It **must** outlive compression.
- * Its content must remain unmodified during compression.
- * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
- * ensure that the window size is large enough to contain the entire source.
- * See ZSTD_c_windowLog.
- * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
- * It's a CPU consuming operation, with non-negligible impact on latency.
- * If there is a need to use the same prefix multiple times, consider loadDictionary instead.
- * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
- * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
-ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
- const void* prefix, size_t prefixSize);
-
-
typedef enum {
ZSTD_reset_session_only = 1,
ZSTD_reset_parameters = 2,
@@ -756,8 +458,6 @@ typedef enum {
*/
ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
-
-
/*! ZSTD_compress2() :
* Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
* ZSTD_compress2() always starts a new frame.
@@ -772,48 +472,10 @@ ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
-typedef enum {
- ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
- ZSTD_e_flush=1, /* flush any data provided so far,
- * it creates (at least) one new block, that can be decoded immediately on reception;
- * frame will continue: any future data can still reference previously compressed data, improving compression. */
- ZSTD_e_end=2 /* flush any remaining data _and_ close current frame.
- * note that frame is only closed after compressed data is fully flushed (return value == 0).
- * After that point, any additional data starts a new frame.
- * note : each frame is independent (does not reference any content from previous frame). */
-} ZSTD_EndDirective;
-
-/*! ZSTD_compressStream2() :
- * Behaves about the same as ZSTD_compressStream, with additional control on end directive.
- * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
- * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
- * - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
- * - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
- * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
- * - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
- * and then immediately returns, just indicating that there is some data remaining to be flushed.
- * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
- * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
- * - @return provides a minimum amount of data remaining to be flushed from internal buffers
- * or an error code, which can be tested using ZSTD_isError().
- * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
- * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
- * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
- * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
- * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
- * Before starting a new compression job, or changing compression parameters,
- * it is required to fully flush internal buffers.
- */
-ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
- ZSTD_outBuffer* output,
- ZSTD_inBuffer* input,
- ZSTD_EndDirective endOp);
-
-
-/* ============================== */
-/* Advanced decompression API */
-/* ============================== */
+/***************************************
+* Advanced decompression API
+***************************************/
/* The advanced API pushes parameters one by one into an existing DCtx context.
* Parameters are sticky, and remain valid for all following frames
@@ -823,14 +485,14 @@ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
* Therefore, no new decompression function is necessary.
*/
-
typedef enum {
ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
* the streaming API will refuse to allocate memory buffer
* in order to protect the host from unreasonable memory requirements.
* This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
- * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) */
+ * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
+ * Special: value 0 means "use default maximum windowLog". */
/* note : additional experimental parameters are also available
* within the experimental section of the API.
@@ -843,7 +505,6 @@ typedef enum {
} ZSTD_dParameter;
-
/*! ZSTD_dParam_getBounds() :
* All parameters must belong to an interval with lower and upper bounds,
* otherwise they will either trigger an error or be automatically clamped.
@@ -862,6 +523,389 @@ ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
*/
ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
+/*! ZSTD_DCtx_reset() :
+ * Return a DCtx to clean state.
+ * Session and parameters can be reset jointly or separately.
+ * Parameters can only be reset when no active frame is being decompressed.
+ * @return : 0, or an error code, which can be tested with ZSTD_isError()
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+
+
+/****************************
+* Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+ const void* src; /**< start of input buffer */
+ size_t size; /**< size of input buffer */
+ size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+ void* dst; /**< start of output buffer */
+ size_t size; /**< size of output buffer */
+ size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+* Streaming compression - HowTo
+*
+* A ZSTD_CStream object is required to track streaming operation.
+* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+*
+* For parallel execution, use one separate ZSTD_CStream per thread.
+*
+* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
+*
+* Parameters are sticky : when starting a new compression on the same context,
+* it will re-use the same sticky parameters as previous compression session.
+* When in doubt, it's recommended to fully initialize the context before usage.
+* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
+* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
+* set more specific parameters, the pledged source size, or load a dictionary.
+*
+* Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
+* consume input stream. The function will automatically update both `pos`
+* fields within `input` and `output`.
+* Note that the function may not consume the entire input, for example, because
+* the output buffer is already full, in which case `input.pos < input.size`.
+* The caller must check if input has been entirely consumed.
+* If not, the caller must make some room to receive more compressed data,
+* and then present again remaining input data.
+* note: ZSTD_e_continue is guaranteed to make some forward progress when called,
+* but doesn't guarantee maximal forward progress. This is especially relevant
+* when compressing with multiple threads. The call won't block if it can
+* consume some input, but if it can't it will wait for some, but not all,
+* output to be flushed.
+* @return : provides a minimum amount of data remaining to be flushed from internal buffers
+* or an error code, which can be tested using ZSTD_isError().
+*
+* At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
+* using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
+* Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
+* In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
+* operation.
+* note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if internal buffers are entirely flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
+* It will perform a flush and write frame epilogue.
+* The epilogue is required for decoders to consider a frame completed.
+* flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
+* You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
+* start a new frame.
+* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
+* block until the flush is complete or the output buffer is full.
+* @return : 0 if frame fully completed and fully flushed,
+* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
+* or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
+ /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
+
+/*===== Streaming compression functions =====*/
+typedef enum {
+ ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
+ ZSTD_e_flush=1, /* flush any data provided so far,
+ * it creates (at least) one new block, that can be decoded immediately on reception;
+ * frame will continue: any future data can still reference previously compressed data, improving compression.
+ * note : multithreaded compression will block to flush as much output as possible. */
+ ZSTD_e_end=2 /* flush any remaining data _and_ close current frame.
+ * note that frame is only closed after compressed data is fully flushed (return value == 0).
+ * After that point, any additional data starts a new frame.
+ * note : each frame is independent (does not reference any content from previous frame).
+ : note : multithreaded compression will block to flush as much output as possible. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compressStream2() :
+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive.
+ * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
+ * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
+ * - output->pos must be <= dstCapacity, input->pos must be <= srcSize
+ * - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ * - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
+ * - When nbWorkers>=1, function is non-blocking : it just acquires a copy of input, and distributes jobs to internal worker threads, flush whatever is available,
+ * and then immediately returns, just indicating that there is some data remaining to be flushed.
+ * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ * - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
+ * - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ * or an error code, which can be tested using ZSTD_isError().
+ * if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ * only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ * Before starting a new compression job, or changing compression parameters,
+ * it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp);
+
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
+
+/*******************************************************************************
+ * This is a legacy streaming API, and can be replaced by ZSTD_CCtx_reset() and
+ * ZSTD_compressStream2(). It is redundant, but is still fully supported.
+ * Advanced parameters and dictionary compression can only be used through the
+ * new API.
+ ******************************************************************************/
+
+/**
+ * Equivalent to:
+ *
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ */
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+/**
+ * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
+ * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
+ * the next read size (if non-zero and not an error). ZSTD_compressStream2()
+ * returns the number of bytes left to flush (if non-zero and not an error).
+ */
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+/** Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+/** Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+
+/*-***************************************************************************
+* Streaming decompression - HowTo
+*
+* A ZSTD_DStream object is required to track streaming operations.
+* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+* ZSTD_DStream objects can be re-used multiple times.
+*
+* Use ZSTD_initDStream() to start a new decompression operation.
+* @return : recommended first input size
+* Alternatively, use advanced API to set specific properties.
+*
+* Use ZSTD_decompressStream() repetitively to consume your input.
+* The function will update both `pos` fields.
+* If `input.pos < input.size`, some input has not been consumed.
+* It's up to the caller to present again remaining data.
+* The function tries to flush all data decoded immediately, respecting output buffer size.
+* If `output.pos < output.size`, decoder has flushed everything it could.
+* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
+* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
+* @return : 0 when a frame is completely decoded and fully flushed,
+* or an error code, which can be tested using ZSTD_isError(),
+* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
+* the return value is a suggested next input size (just a hint for better latency)
+* that will never request more than the remaining frame size.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
+ /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
+
+/*===== Streaming decompression functions =====*/
+
+/* This function is redundant with the advanced API and equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds);
+ * ZSTD_DCtx_refDDict(zds, NULL);
+ */
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+
+/**************************
+* Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ * Compression at an explicit compression level using a Dictionary.
+ * A dictionary can be any arbitrary data segment (also called a prefix),
+ * or a buffer with specified information (see dictBuilder/zdict.h).
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ * Decompression using a known Dictionary.
+ * Dictionary must be identical to the one used during compression.
+ * Note : This function loads the dictionary, resulting in significant startup delay.
+ * It's intended for a dictionary used only once.
+ * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize);
+
+
+/***********************************
+ * Bulk processing dictionary API
+ **********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it only once.
+ * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup cost.
+ * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ * Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate `dictBuffer` content.
+ * Note : A ZSTD_CDict can be created from an empty dictBuffer, but it is inefficient when used to compress small data. */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+ int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ * Function frees memory allocated by ZSTD_createCDict(). */
+ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times.
+ * Note : compression level is _decided at dictionary creation time_,
+ * and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ * Function frees memory allocated with ZSTD_createDDict() */
+ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ * Decompression using a digested Dictionary.
+ * Recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict);
+
+
+/********************************
+ * Dictionary helper functions
+ *******************************/
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompressed the frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary to be decoded (most common case).
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/*******************************************************************************
+ * Advanced dictionary and prefix API
+ *
+ * This API allows dictionaries to be used with ZSTD_compress2(),
+ * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
+ * only reset with the context is reset with ZSTD_reset_parameters or
+ * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ ******************************************************************************/
+
+
+/*! ZSTD_CCtx_loadDictionary() :
+ * Create an internal CDict from `dict` buffer.
+ * Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
+ * meaning "return to no-dictionary mode".
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
+ * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 2 : Loading a dictionary involves building tables.
+ * It's also a CPU consuming operation, with non-negligible impact on latency.
+ * Tables are dependent on compression parameters, and for this reason,
+ * compression parameters can no longer be changed after loading a dictionary.
+ * Note 3 :`dict` content will be copied internally.
+ * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
+ * In such a case, dictionary buffer must outlive its users.
+ * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ * to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+
+/*! ZSTD_CCtx_refCDict() :
+ * Reference a prepared dictionary, to be used for all next compressed frames.
+ * Note that compression parameters are enforced from within CDict,
+ * and supersede any compression parameter previously set within CCtx.
+ * The parameters ignored are labled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
+ * The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
+ * The dictionary will remain valid for future compressed frames using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special : Referencing a NULL CDict means "return to no-dictionary mode".
+ * Note 1 : Currently, only one dictionary can be managed.
+ * Referencing a new dictionary effectively "discards" any previous one.
+ * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ * Reference a prefix (single-usage dictionary) for next compressed frame.
+ * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
+ * Decompression will need same prefix to properly regenerate data.
+ * Compressing with a prefix is similar in outcome as performing a diff and compressing it,
+ * but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ * Note 1 : Prefix buffer is referenced. It **must** outlive compression.
+ * Its content must remain unmodified during compression.
+ * Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
+ * ensure that the window size is large enough to contain the entire source.
+ * See ZSTD_c_windowLog.
+ * Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ * It's a CPU consuming operation, with non-negligible impact on latency.
+ * If there is a need to use the same prefix multiple times, consider loadDictionary instead.
+ * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dm_rawContent).
+ * Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
+ const void* prefix, size_t prefixSize);
/*! ZSTD_DCtx_loadDictionary() :
* Create an internal DDict from dict buffer,
@@ -910,15 +954,32 @@ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
const void* prefix, size_t prefixSize);
-/*! ZSTD_DCtx_reset() :
- * Return a DCtx to clean state.
- * Session and parameters can be reset jointly or separately.
- * Parameters can only be reset when no active frame is being decompressed.
- * @return : 0, or an error code, which can be tested with ZSTD_isError()
- */
-ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
+/* === Memory management === */
+
+/*! ZSTD_sizeof_*() :
+ * These functions give the _current_ memory usage of selected object.
+ * Note that object memory usage can evolve (increase or decrease) over time. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+#endif /* ZSTD_H_235446 */
+
+/****************************************************************************************
+ * ADVANCED AND EXPERIMENTAL FUNCTIONS
+ ****************************************************************************************
+ * The definitions in the following section are considered experimental.
+ * They are provided for advanced scenarios.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
/****************************************************************************************
* experimental API (static linking only)
@@ -962,7 +1023,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27 /* by default, the streaming decoder will refuse any frame
* requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
* to preserve host's memory from unreasonable requirements.
- * This limit can be overriden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
+ * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
* The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
@@ -1064,15 +1125,24 @@ typedef enum {
ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
} ZSTD_dictAttachPref_e;
+typedef enum {
+ ZSTD_lcm_auto = 0, /**< Automatically determine the compression mode based on the compression level.
+ * Negative compression levels will be uncompressed, and positive compression
+ * levels will be compressed. */
+ ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be
+ * emitted if Huffman compression is not profitable. */
+ ZSTD_lcm_uncompressed = 2, /**< Always emit uncompressed literals. */
+} ZSTD_literalCompressionMode_e;
+
/***************************************
* Frame size functions
***************************************/
/*! ZSTD_findDecompressedSize() :
- * `src` should point the start of a series of ZSTD encoded and/or skippable frames
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
* `srcSize` must be the _exact_ size of this series
- * (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`)
+ * (i.e. there should be a frame boundary at `src + srcSize`)
* @return : - decompressed size of all data in all successive frames
* - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
* - if an error occurred: ZSTD_CONTENTSIZE_ERROR
@@ -1092,6 +1162,21 @@ typedef enum {
* however it does mean that all frame data must be present and valid. */
ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+/** ZSTD_decompressBound() :
+ * `src` should point to the start of a series of ZSTD encoded and/or skippable frames
+ * `srcSize` must be the _exact_ size of this series
+ * (i.e. there should be a frame boundary at `src + srcSize`)
+ * @return : - upper-bound for the decompressed size of all data in all successive frames
+ * - if an error occured: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : an error can occur if `src` contains an invalid or incorrectly formatted frame.
+ * note 2 : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
+ * in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
+ * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
+ * upper-bound = # blocks * min(128 KB, Window_Size)
+ */
+ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
+
/*! ZSTD_frameHeaderSize() :
* srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
* @return : size of the Frame Header,
@@ -1110,7 +1195,7 @@ ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
* It will also consider src size to be arbitrarily "large", which is worst case.
* If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
* ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
* Note : CCtx size estimation is only correct for single-threaded compression. */
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
@@ -1122,7 +1207,7 @@ ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
* It will also consider src size to be arbitrarily "large", which is worst case.
* If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
* ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
- * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
* Note : CStream size estimation is only correct for single-threaded compression.
* ZSTD_DStream memory budget depends on window Size.
* This information can be passed manually, using ZSTD_estimateDStreamSize,
@@ -1226,22 +1311,26 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictS
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
/*! ZSTD_getCParams() :
-* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
-* `estimatedSrcSize` value is optional, select 0 if not known */
+ * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+ * `estimatedSrcSize` value is optional, select 0 if not known */
ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
/*! ZSTD_getParams() :
-* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
-* All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+ * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
/*! ZSTD_checkCParams() :
-* Ensure param values remain within authorized range */
+ * Ensure param values remain within authorized range.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
/*! ZSTD_adjustCParams() :
* optimize params for a given `srcSize` and `dictSize`.
- * both values are optional, select `0` if unknown. */
+ * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
+ * `dictSize` must be `0` when there is no dictionary.
+ * cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
+ * This function never fails (wide contract) */
ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
/*! ZSTD_compress_advanced() :
@@ -1314,6 +1403,12 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre
* See the comments on that enum for an explanation of the feature. */
#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
+/* Controls how the literals are compressed (default is auto).
+ * The value must be of type ZSTD_literalCompressionMode_e.
+ * See ZSTD_literalCompressionMode_t enum definition for details.
+ */
+#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
+
/*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
* and store it into int* value.
@@ -1325,10 +1420,10 @@ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param
/*! ZSTD_CCtx_params :
* Quick howto :
* - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
- * - ZSTD_CCtxParam_setParameter() : Push parameters one by one into
- * an existing ZSTD_CCtx_params structure.
- * This is similar to
- * ZSTD_CCtx_setParameter().
+ * - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
+ * an existing ZSTD_CCtx_params structure.
+ * This is similar to
+ * ZSTD_CCtx_setParameter().
* - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
* an existing CCtx.
* These parameters will be applied to
@@ -1359,20 +1454,20 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compre
*/
ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
-/*! ZSTD_CCtxParam_setParameter() :
+/*! ZSTD_CCtxParams_setParameter() :
* Similar to ZSTD_CCtx_setParameter.
* Set one compression parameter, selected by enum ZSTD_cParameter.
* Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
+ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
-/*! ZSTD_CCtxParam_getParameter() :
+/*! ZSTD_CCtxParams_getParameter() :
* Similar to ZSTD_CCtx_getParameter.
* Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
*/
-ZSTDLIB_API size_t ZSTD_CCtxParam_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
+ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
* Apply a set of ZSTD_CCtx_params to the compression context.
@@ -1415,31 +1510,6 @@ ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
* it must remain read accessible throughout the lifetime of DDict */
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
-
-/*! ZSTD_getDictID_fromDict() :
- * Provides the dictID stored within dictionary.
- * if @return == 0, the dictionary is not conformant with Zstandard specification.
- * It can still be loaded, but as a content-only dictionary. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
-
-/*! ZSTD_getDictID_fromDDict() :
- * Provides the dictID of the dictionary loaded into `ddict`.
- * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
- * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
-
-/*! ZSTD_getDictID_fromFrame() :
- * Provides the dictID required to decompressed the frame stored within `src`.
- * If @return == 0, the dictID could not be decoded.
- * This could for one of the following reasons :
- * - The frame does not require a dictionary to be decoded (most common case).
- * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
- * Note : this use case also happens when using a non-conformant dictionary.
- * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- * - This is not a Zstandard frame.
- * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
-ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
-
/*! ZSTD_DCtx_loadDictionary_byReference() :
* Same as ZSTD_DCtx_loadDictionary(),
* but references `dict` content instead of copying it into `dctx`.
@@ -1501,14 +1571,68 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
********************************************************************/
/*===== Advanced Streaming compression functions =====*/
-ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. */
-ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
+/**! ZSTD_initCStream_srcSize() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
+ * pledgedSrcSize must be correct. If it is not known at init time, use
+ * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
+ * "0" also disables frame content size field. It may be enabled in the future.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingDict() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * Creates of an internal CDict (incompatible with static CCtx), except if
+ * dict == NULL or dictSize < 8, in which case no dict is used.
+ * Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if
+ * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel);
+/**! ZSTD_initCStream_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setZstdParams(zcs, params); // Set the zstd params and leave the rest as-is
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
+ *
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy.
+ */
ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
- ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**< note : cdict will just be referenced, and must outlive compression session */
-ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. */
+ ZSTD_parameters params, unsigned long long pledgedSrcSize);
+/**! ZSTD_initCStream_usingCDict() :
+ * This function is deprecated, and equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * note : cdict will just be referenced, and must outlive compression session
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
+/**! ZSTD_initCStream_usingCDict_advanced() :
+ * This function is deprecated, and is approximately equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setZstdFrameParams(zcs, fParams); // Set the zstd frame params and leave the rest as-is
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ * ZSTD_CCtx_refCDict(zcs, cdict);
+ *
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
+ * pledgedSrcSize must be correct. If srcSize is not known at init time, use
+ * value ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);
/*! ZSTD_resetCStream() :
+ * This function is deprecated, and is equivalent to:
+ * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
+ *
* start a new frame, using same parameters from previous frame.
* This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
* Note that zcs must be init at least once before using ZSTD_resetCStream().
@@ -1555,9 +1679,32 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
/*===== Advanced Streaming decompression functions =====*/
-ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
-ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict is referenced, it must outlive decompression session */
-ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
+ *
+ * note: no dictionary will be used if dict == NULL or dictSize < 8
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ * ZSTD_DCtx_refDDict(zds, ddict);
+ *
+ * note : ddict is referenced, it must outlive decompression session
+ */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
+/**
+ * This function is deprecated, and is equivalent to:
+ *
+ * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
+ *
+ * re-use decompression parameters from previous init; saves dictionary loading
+ */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
/*********************************************************************