summaryrefslogtreecommitdiff
path: root/thirdparty/zstd/compress/zstdmt_compress.c
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/zstd/compress/zstdmt_compress.c')
-rw-r--r--thirdparty/zstd/compress/zstdmt_compress.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/thirdparty/zstd/compress/zstdmt_compress.c b/thirdparty/zstd/compress/zstdmt_compress.c
index 50454a50b9..22aa3e1245 100644
--- a/thirdparty/zstd/compress/zstdmt_compress.c
+++ b/thirdparty/zstd/compress/zstdmt_compress.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
+ * Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -472,8 +472,6 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
assert(params.ldmParams.hashRateLog < 32);
- serialState->ldmState.hashPower =
- ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
} else {
ZSTD_memset(&params.ldmParams, 0, sizeof(params.ldmParams));
}
@@ -486,10 +484,10 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
unsigned const bucketLog =
params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
- size_t const bucketSize = (size_t)1 << bucketLog;
unsigned const prevBucketLog =
serialState->params.ldmParams.hashLog -
serialState->params.ldmParams.bucketSizeLog;
+ size_t const numBuckets = (size_t)1 << bucketLog;
/* Size the seq pool tables */
ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
/* Reset the window */
@@ -501,20 +499,20 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
}
if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
- serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(bucketSize, cMem);
+ serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
}
if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
return 1;
/* Zero the tables */
ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
- ZSTD_memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
+ ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
/* Update window state and fill hash table with dict */
serialState->ldmState.loadedDictEnd = 0;
if (dictSize > 0) {
if (dictContentType == ZSTD_dct_rawContent) {
BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
- ZSTD_window_update(&serialState->ldmState.window, dict, dictSize);
+ ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0);
ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, &params.ldmParams);
serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
} else {
@@ -571,7 +569,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
seqStore.size == 0 && seqStore.capacity > 0);
assert(src.size <= serialState->params.jobSize);
- ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
+ ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
error = ZSTD_ldm_generateSequences(
&serialState->ldmState, &seqStore,
&serialState->params.ldmParams, src.start, src.size);
@@ -683,6 +681,8 @@ static void ZSTDMT_compressionJob(void* jobDescription)
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
/* Don't run LDM for the chunks, since we handle it externally */
jobParams.ldmParams.enableLdm = 0;
+ /* Correct nbWorkers to 0. */
+ jobParams.nbWorkers = 0;
/* init */
@@ -695,6 +695,10 @@ static void ZSTDMT_compressionJob(void* jobDescription)
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
}
+ if (!job->firstJob) {
+ size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
+ if (ZSTD_isError(err)) JOB_ERROR(err);
+ }
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
ZSTD_dtlm_fast,
@@ -750,6 +754,13 @@ static void ZSTDMT_compressionJob(void* jobDescription)
if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
lastCBlockSize = cSize;
} }
+ if (!job->firstJob) {
+ /* Double check that we don't have an ext-dict, because then our
+ * repcode invalidation doesn't work.
+ */
+ assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+ }
+ ZSTD_CCtx_trace(cctx, 0);
_endJob:
ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
@@ -1239,9 +1250,8 @@ size_t ZSTDMT_initCStream_internal(
if (params.rsyncable) {
/* Aim for the targetsectionSize as the average job size. */
- U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
- U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
- assert(jobSizeMB >= 1);
+ U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
+ U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
mtctx->rsync.hash = 0;
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;