summaryrefslogtreecommitdiff
path: root/thirdparty/libwebp/src/enc
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/libwebp/src/enc')
-rw-r--r--thirdparty/libwebp/src/enc/alpha_enc.c2
-rw-r--r--thirdparty/libwebp/src/enc/backward_references_cost_enc.c75
-rw-r--r--thirdparty/libwebp/src/enc/backward_references_enc.c86
-rw-r--r--thirdparty/libwebp/src/enc/backward_references_enc.h12
-rw-r--r--thirdparty/libwebp/src/enc/histogram_enc.c208
-rw-r--r--thirdparty/libwebp/src/enc/histogram_enc.h24
-rw-r--r--thirdparty/libwebp/src/enc/picture_csp_enc.c492
-rw-r--r--thirdparty/libwebp/src/enc/picture_enc.c44
-rw-r--r--thirdparty/libwebp/src/enc/picture_rescale_enc.c72
-rw-r--r--thirdparty/libwebp/src/enc/picture_tools_enc.c45
-rw-r--r--thirdparty/libwebp/src/enc/predictor_enc.c50
-rw-r--r--thirdparty/libwebp/src/enc/quant_enc.c62
-rw-r--r--thirdparty/libwebp/src/enc/vp8i_enc.h24
-rw-r--r--thirdparty/libwebp/src/enc/vp8l_enc.c527
-rw-r--r--thirdparty/libwebp/src/enc/vp8li_enc.h26
-rw-r--r--thirdparty/libwebp/src/enc/webp_enc.c4
16 files changed, 767 insertions, 986 deletions
diff --git a/thirdparty/libwebp/src/enc/alpha_enc.c b/thirdparty/libwebp/src/enc/alpha_enc.c
index 0b54f3e6ec..f7c02690e3 100644
--- a/thirdparty/libwebp/src/enc/alpha_enc.c
+++ b/thirdparty/libwebp/src/enc/alpha_enc.c
@@ -86,7 +86,7 @@ static int EncodeLossless(const uint8_t* const data, int width, int height,
// a decoder bug related to alpha with color cache.
// See: https://code.google.com/p/webp/issues/detail?id=239
// Need to re-enable this later.
- ok = (VP8LEncodeStream(&config, &picture, bw, 0 /*use_cache*/) == VP8_ENC_OK);
+ ok = VP8LEncodeStream(&config, &picture, bw, /*use_cache=*/0);
WebPPictureFree(&picture);
ok = ok && !bw->error_;
if (!ok) {
diff --git a/thirdparty/libwebp/src/enc/backward_references_cost_enc.c b/thirdparty/libwebp/src/enc/backward_references_cost_enc.c
index 516abd73eb..6968ef3c9f 100644
--- a/thirdparty/libwebp/src/enc/backward_references_cost_enc.c
+++ b/thirdparty/libwebp/src/enc/backward_references_cost_enc.c
@@ -15,10 +15,11 @@
//
#include <assert.h>
+#include <float.h>
+#include "src/dsp/lossless_common.h"
#include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h"
-#include "src/dsp/lossless_common.h"
#include "src/utils/color_cache_utils.h"
#include "src/utils/utils.h"
@@ -30,15 +31,15 @@ extern void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs,
const PixOrCopy v);
typedef struct {
- double alpha_[VALUES_IN_BYTE];
- double red_[VALUES_IN_BYTE];
- double blue_[VALUES_IN_BYTE];
- double distance_[NUM_DISTANCE_CODES];
- double* literal_;
+ float alpha_[VALUES_IN_BYTE];
+ float red_[VALUES_IN_BYTE];
+ float blue_[VALUES_IN_BYTE];
+ float distance_[NUM_DISTANCE_CODES];
+ float* literal_;
} CostModel;
static void ConvertPopulationCountTableToBitEstimates(
- int num_symbols, const uint32_t population_counts[], double output[]) {
+ int num_symbols, const uint32_t population_counts[], float output[]) {
uint32_t sum = 0;
int nonzeros = 0;
int i;
@@ -51,7 +52,7 @@ static void ConvertPopulationCountTableToBitEstimates(
if (nonzeros <= 1) {
memset(output, 0, num_symbols * sizeof(*output));
} else {
- const double logsum = VP8LFastLog2(sum);
+ const float logsum = VP8LFastLog2(sum);
for (i = 0; i < num_symbols; ++i) {
output[i] = logsum - VP8LFastLog2(population_counts[i]);
}
@@ -75,8 +76,8 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
}
ConvertPopulationCountTableToBitEstimates(
- VP8LHistogramNumCodes(histo->palette_code_bits_),
- histo->literal_, m->literal_);
+ VP8LHistogramNumCodes(histo->palette_code_bits_), histo->literal_,
+ m->literal_);
ConvertPopulationCountTableToBitEstimates(
VALUES_IN_BYTE, histo->red_, m->red_);
ConvertPopulationCountTableToBitEstimates(
@@ -92,27 +93,27 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
return ok;
}
-static WEBP_INLINE double GetLiteralCost(const CostModel* const m, uint32_t v) {
+static WEBP_INLINE float GetLiteralCost(const CostModel* const m, uint32_t v) {
return m->alpha_[v >> 24] +
m->red_[(v >> 16) & 0xff] +
m->literal_[(v >> 8) & 0xff] +
m->blue_[v & 0xff];
}
-static WEBP_INLINE double GetCacheCost(const CostModel* const m, uint32_t idx) {
+static WEBP_INLINE float GetCacheCost(const CostModel* const m, uint32_t idx) {
const int literal_idx = VALUES_IN_BYTE + NUM_LENGTH_CODES + idx;
return m->literal_[literal_idx];
}
-static WEBP_INLINE double GetLengthCost(const CostModel* const m,
- uint32_t length) {
+static WEBP_INLINE float GetLengthCost(const CostModel* const m,
+ uint32_t length) {
int code, extra_bits;
VP8LPrefixEncodeBits(length, &code, &extra_bits);
return m->literal_[VALUES_IN_BYTE + code] + extra_bits;
}
-static WEBP_INLINE double GetDistanceCost(const CostModel* const m,
- uint32_t distance) {
+static WEBP_INLINE float GetDistanceCost(const CostModel* const m,
+ uint32_t distance) {
int code, extra_bits;
VP8LPrefixEncodeBits(distance, &code, &extra_bits);
return m->distance_[code] + extra_bits;
@@ -122,20 +123,20 @@ static WEBP_INLINE void AddSingleLiteralWithCostModel(
const uint32_t* const argb, VP8LColorCache* const hashers,
const CostModel* const cost_model, int idx, int use_color_cache,
float prev_cost, float* const cost, uint16_t* const dist_array) {
- double cost_val = prev_cost;
+ float cost_val = prev_cost;
const uint32_t color = argb[idx];
const int ix = use_color_cache ? VP8LColorCacheContains(hashers, color) : -1;
if (ix >= 0) {
// use_color_cache is true and hashers contains color
- const double mul0 = 0.68;
+ const float mul0 = 0.68f;
cost_val += GetCacheCost(cost_model, ix) * mul0;
} else {
- const double mul1 = 0.82;
+ const float mul1 = 0.82f;
if (use_color_cache) VP8LColorCacheInsert(hashers, color);
cost_val += GetLiteralCost(cost_model, color) * mul1;
}
if (cost[idx] > cost_val) {
- cost[idx] = (float)cost_val;
+ cost[idx] = cost_val;
dist_array[idx] = 1; // only one is inserted.
}
}
@@ -172,7 +173,7 @@ struct CostInterval {
// The GetLengthCost(cost_model, k) are cached in a CostCacheInterval.
typedef struct {
- double cost_;
+ float cost_;
int start_;
int end_; // Exclusive.
} CostCacheInterval;
@@ -187,7 +188,7 @@ typedef struct {
int count_; // The number of stored intervals.
CostCacheInterval* cache_intervals_;
size_t cache_intervals_size_;
- double cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k).
+ float cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k).
float* costs_;
uint16_t* dist_array_;
// Most of the time, we only need few intervals -> use a free-list, to avoid
@@ -262,10 +263,13 @@ static int CostManagerInit(CostManager* const manager,
CostManagerInitFreeList(manager);
// Fill in the cost_cache_.
+ // Has to be done in two passes due to a GCC bug on i686
+ // related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323
+ for (i = 0; i < cost_cache_size; ++i) {
+ manager->cost_cache_[i] = GetLengthCost(cost_model, i);
+ }
manager->cache_intervals_size_ = 1;
- manager->cost_cache_[0] = GetLengthCost(cost_model, 0);
for (i = 1; i < cost_cache_size; ++i) {
- manager->cost_cache_[i] = GetLengthCost(cost_model, i);
// Get the number of bound intervals.
if (manager->cost_cache_[i] != manager->cost_cache_[i - 1]) {
++manager->cache_intervals_size_;
@@ -294,7 +298,7 @@ static int CostManagerInit(CostManager* const manager,
cur->end_ = 1;
cur->cost_ = manager->cost_cache_[0];
for (i = 1; i < cost_cache_size; ++i) {
- const double cost_val = manager->cost_cache_[i];
+ const float cost_val = manager->cost_cache_[i];
if (cost_val != cur->cost_) {
++cur;
// Initialize an interval.
@@ -303,6 +307,8 @@ static int CostManagerInit(CostManager* const manager,
}
cur->end_ = i + 1;
}
+ assert((size_t)(cur - manager->cache_intervals_) + 1 ==
+ manager->cache_intervals_size_);
}
manager->costs_ = (float*)WebPSafeMalloc(pix_count, sizeof(*manager->costs_));
@@ -311,7 +317,7 @@ static int CostManagerInit(CostManager* const manager,
return 0;
}
// Set the initial costs_ high for every pixel as we will keep the minimum.
- for (i = 0; i < pix_count; ++i) manager->costs_[i] = 1e38f;
+ for (i = 0; i < pix_count; ++i) manager->costs_[i] = FLT_MAX;
return 1;
}
@@ -457,7 +463,7 @@ static WEBP_INLINE void InsertInterval(CostManager* const manager,
// If handling the interval or one of its subintervals becomes to heavy, its
// contribution is added to the costs right away.
static WEBP_INLINE void PushInterval(CostManager* const manager,
- double distance_cost, int position,
+ float distance_cost, int position,
int len) {
size_t i;
CostInterval* interval = manager->head_;
@@ -474,7 +480,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager,
const int k = j - position;
float cost_tmp;
assert(k >= 0 && k < MAX_LENGTH);
- cost_tmp = (float)(distance_cost + manager->cost_cache_[k]);
+ cost_tmp = distance_cost + manager->cost_cache_[k];
if (manager->costs_[j] > cost_tmp) {
manager->costs_[j] = cost_tmp;
@@ -492,7 +498,7 @@ static WEBP_INLINE void PushInterval(CostManager* const manager,
const int end = position + (cost_cache_intervals[i].end_ > len
? len
: cost_cache_intervals[i].end_);
- const float cost = (float)(distance_cost + cost_cache_intervals[i].cost_);
+ const float cost = distance_cost + cost_cache_intervals[i].cost_;
for (; interval != NULL && interval->start_ < end;
interval = interval_next) {
@@ -570,22 +576,21 @@ static int BackwardReferencesHashChainDistanceOnly(
const int pix_count = xsize * ysize;
const int use_color_cache = (cache_bits > 0);
const size_t literal_array_size =
- sizeof(double) * (NUM_LITERAL_CODES + NUM_LENGTH_CODES +
- ((cache_bits > 0) ? (1 << cache_bits) : 0));
+ sizeof(float) * (VP8LHistogramNumCodes(cache_bits));
const size_t cost_model_size = sizeof(CostModel) + literal_array_size;
CostModel* const cost_model =
(CostModel*)WebPSafeCalloc(1ULL, cost_model_size);
VP8LColorCache hashers;
CostManager* cost_manager =
- (CostManager*)WebPSafeMalloc(1ULL, sizeof(*cost_manager));
+ (CostManager*)WebPSafeCalloc(1ULL, sizeof(*cost_manager));
int offset_prev = -1, len_prev = -1;
- double offset_cost = -1;
+ float offset_cost = -1.f;
int first_offset_is_constant = -1; // initialized with 'impossible' value
int reach = 0;
if (cost_model == NULL || cost_manager == NULL) goto Error;
- cost_model->literal_ = (double*)(cost_model + 1);
+ cost_model->literal_ = (float*)(cost_model + 1);
if (use_color_cache) {
cc_init = VP8LColorCacheInit(&hashers, cache_bits);
if (!cc_init) goto Error;
@@ -675,7 +680,7 @@ static int BackwardReferencesHashChainDistanceOnly(
}
ok = !refs->error_;
-Error:
+ Error:
if (cc_init) VP8LColorCacheClear(&hashers);
CostManagerClear(cost_manager);
WebPSafeFree(cost_model);
diff --git a/thirdparty/libwebp/src/enc/backward_references_enc.c b/thirdparty/libwebp/src/enc/backward_references_enc.c
index 519b36a091..49a0fac034 100644
--- a/thirdparty/libwebp/src/enc/backward_references_enc.c
+++ b/thirdparty/libwebp/src/enc/backward_references_enc.c
@@ -10,6 +10,8 @@
// Author: Jyrki Alakuijala (jyrki@google.com)
//
+#include "src/enc/backward_references_enc.h"
+
#include <assert.h>
#include <float.h>
#include <math.h>
@@ -17,10 +19,11 @@
#include "src/dsp/dsp.h"
#include "src/dsp/lossless.h"
#include "src/dsp/lossless_common.h"
-#include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h"
+#include "src/enc/vp8i_enc.h"
#include "src/utils/color_cache_utils.h"
#include "src/utils/utils.h"
+#include "src/webp/encode.h"
#define MIN_BLOCK_SIZE 256 // minimum block size for backward references
@@ -255,10 +258,13 @@ static WEBP_INLINE int MaxFindCopyLength(int len) {
int VP8LHashChainFill(VP8LHashChain* const p, int quality,
const uint32_t* const argb, int xsize, int ysize,
- int low_effort) {
+ int low_effort, const WebPPicture* const pic,
+ int percent_range, int* const percent) {
const int size = xsize * ysize;
const int iter_max = GetMaxItersForQuality(quality);
const uint32_t window_size = GetWindowSizeForHashChain(quality, xsize);
+ int remaining_percent = percent_range;
+ int percent_start = *percent;
int pos;
int argb_comp;
uint32_t base_position;
@@ -276,7 +282,13 @@ int VP8LHashChainFill(VP8LHashChain* const p, int quality,
hash_to_first_index =
(int32_t*)WebPSafeMalloc(HASH_SIZE, sizeof(*hash_to_first_index));
- if (hash_to_first_index == NULL) return 0;
+ if (hash_to_first_index == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
+ }
+
+ percent_range = remaining_percent / 2;
+ remaining_percent -= percent_range;
// Set the int32_t array to -1.
memset(hash_to_first_index, 0xff, HASH_SIZE * sizeof(*hash_to_first_index));
@@ -323,12 +335,22 @@ int VP8LHashChainFill(VP8LHashChain* const p, int quality,
hash_to_first_index[hash_code] = pos++;
argb_comp = argb_comp_next;
}
+
+ if (!WebPReportProgress(
+ pic, percent_start + percent_range * pos / (size - 2), percent)) {
+ WebPSafeFree(hash_to_first_index);
+ return 0;
+ }
}
// Process the penultimate pixel.
chain[pos] = hash_to_first_index[GetPixPairHash64(argb + pos)];
WebPSafeFree(hash_to_first_index);
+ percent_start += percent_range;
+ if (!WebPReportProgress(pic, percent_start, percent)) return 0;
+ percent_range = remaining_percent;
+
// Find the best match interval at each pixel, defined by an offset to the
// pixel and a length. The right-most pixel cannot match anything to the right
// (hence a best length of 0) and the left-most pixel nothing to the left
@@ -417,8 +439,17 @@ int VP8LHashChainFill(VP8LHashChain* const p, int quality,
max_base_position = base_position;
}
}
+
+ if (!WebPReportProgress(pic,
+ percent_start + percent_range *
+ (size - 2 - base_position) /
+ (size - 2),
+ percent)) {
+ return 0;
+ }
}
- return 1;
+
+ return WebPReportProgress(pic, percent_start + percent_range, percent);
}
static WEBP_INLINE void AddSingleLiteral(uint32_t pixel, int use_color_cache,
@@ -728,7 +759,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
int* const best_cache_bits) {
int i;
const int cache_bits_max = (quality <= 25) ? 0 : *best_cache_bits;
- double entropy_min = MAX_ENTROPY;
+ float entropy_min = MAX_ENTROPY;
int cc_init[MAX_COLOR_CACHE_BITS + 1] = { 0 };
VP8LColorCache hashers[MAX_COLOR_CACHE_BITS + 1];
VP8LRefsCursor c = VP8LRefsCursorInit(refs);
@@ -813,14 +844,14 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
}
for (i = 0; i <= cache_bits_max; ++i) {
- const double entropy = VP8LHistogramEstimateBits(histos[i]);
+ const float entropy = VP8LHistogramEstimateBits(histos[i]);
if (i == 0 || entropy < entropy_min) {
entropy_min = entropy;
*best_cache_bits = i;
}
}
ok = 1;
-Error:
+ Error:
for (i = 0; i <= cache_bits_max; ++i) {
if (cc_init[i]) VP8LColorCacheClear(&hashers[i]);
VP8LFreeHistogram(histos[i]);
@@ -890,7 +921,7 @@ static int GetBackwardReferences(int width, int height,
int i, lz77_type;
// Index 0 is for a color cache, index 1 for no cache (if needed).
int lz77_types_best[2] = {0, 0};
- double bit_costs_best[2] = {DBL_MAX, DBL_MAX};
+ float bit_costs_best[2] = {FLT_MAX, FLT_MAX};
VP8LHashChain hash_chain_box;
VP8LBackwardRefs* const refs_tmp = &refs[do_no_cache ? 2 : 1];
int status = 0;
@@ -902,7 +933,7 @@ static int GetBackwardReferences(int width, int height,
for (lz77_type = 1; lz77_types_to_try;
lz77_types_to_try &= ~lz77_type, lz77_type <<= 1) {
int res = 0;
- double bit_cost = 0.;
+ float bit_cost = 0.f;
if ((lz77_types_to_try & lz77_type) == 0) continue;
switch (lz77_type) {
case kLZ77RLE:
@@ -976,15 +1007,16 @@ static int GetBackwardReferences(int width, int height,
const VP8LHashChain* const hash_chain_tmp =
(lz77_types_best[i] == kLZ77Standard) ? hash_chain : &hash_chain_box;
const int cache_bits = (i == 1) ? 0 : *cache_bits_best;
- if (VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits,
- hash_chain_tmp, &refs[i],
- refs_tmp)) {
- double bit_cost_trace;
- VP8LHistogramCreate(histo, refs_tmp, cache_bits);
- bit_cost_trace = VP8LHistogramEstimateBits(histo);
- if (bit_cost_trace < bit_costs_best[i]) {
- BackwardRefsSwap(refs_tmp, &refs[i]);
- }
+ float bit_cost_trace;
+ if (!VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits,
+ hash_chain_tmp, &refs[i],
+ refs_tmp)) {
+ goto Error;
+ }
+ VP8LHistogramCreate(histo, refs_tmp, cache_bits);
+ bit_cost_trace = VP8LHistogramEstimateBits(histo);
+ if (bit_cost_trace < bit_costs_best[i]) {
+ BackwardRefsSwap(refs_tmp, &refs[i]);
}
}
@@ -1000,31 +1032,37 @@ static int GetBackwardReferences(int width, int height,
}
status = 1;
-Error:
+ Error:
VP8LHashChainClear(&hash_chain_box);
VP8LFreeHistogram(histo);
return status;
}
-WebPEncodingError VP8LGetBackwardReferences(
+int VP8LGetBackwardReferences(
int width, int height, const uint32_t* const argb, int quality,
int low_effort, int lz77_types_to_try, int cache_bits_max, int do_no_cache,
const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs,
- int* const cache_bits_best) {
+ int* const cache_bits_best, const WebPPicture* const pic, int percent_range,
+ int* const percent) {
if (low_effort) {
VP8LBackwardRefs* refs_best;
*cache_bits_best = cache_bits_max;
refs_best = GetBackwardReferencesLowEffort(
width, height, argb, cache_bits_best, hash_chain, refs);
- if (refs_best == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY;
+ if (refs_best == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
+ }
// Set it in first position.
BackwardRefsSwap(refs_best, &refs[0]);
} else {
if (!GetBackwardReferences(width, height, argb, quality, lz77_types_to_try,
cache_bits_max, do_no_cache, hash_chain, refs,
cache_bits_best)) {
- return VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
}
}
- return VP8_ENC_OK;
+
+ return WebPReportProgress(pic, *percent + percent_range, percent);
}
diff --git a/thirdparty/libwebp/src/enc/backward_references_enc.h b/thirdparty/libwebp/src/enc/backward_references_enc.h
index 4c0267b41e..4dff1c27b5 100644
--- a/thirdparty/libwebp/src/enc/backward_references_enc.h
+++ b/thirdparty/libwebp/src/enc/backward_references_enc.h
@@ -134,10 +134,11 @@ struct VP8LHashChain {
// Must be called first, to set size.
int VP8LHashChainInit(VP8LHashChain* const p, int size);
-// Pre-compute the best matches for argb.
+// Pre-compute the best matches for argb. pic and percent are for progress.
int VP8LHashChainFill(VP8LHashChain* const p, int quality,
const uint32_t* const argb, int xsize, int ysize,
- int low_effort);
+ int low_effort, const WebPPicture* const pic,
+ int percent_range, int* const percent);
void VP8LHashChainClear(VP8LHashChain* const p); // release memory
static WEBP_INLINE int VP8LHashChainFindOffset(const VP8LHashChain* const p,
@@ -227,11 +228,14 @@ enum VP8LLZ77Type {
// VP8LBackwardRefs is put in the first element, the best value with no-cache in
// the second element.
// In both cases, the last element is used as temporary internally.
-WebPEncodingError VP8LGetBackwardReferences(
+// pic and percent are for progress.
+// Returns false in case of error (stored in pic->error_code).
+int VP8LGetBackwardReferences(
int width, int height, const uint32_t* const argb, int quality,
int low_effort, int lz77_types_to_try, int cache_bits_max, int do_no_cache,
const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs,
- int* const cache_bits_best);
+ int* const cache_bits_best, const WebPPicture* const pic, int percent_range,
+ int* const percent);
#ifdef __cplusplus
}
diff --git a/thirdparty/libwebp/src/enc/histogram_enc.c b/thirdparty/libwebp/src/enc/histogram_enc.c
index 38a0cebcab..8418def2e1 100644
--- a/thirdparty/libwebp/src/enc/histogram_enc.c
+++ b/thirdparty/libwebp/src/enc/histogram_enc.c
@@ -13,15 +13,17 @@
#include "src/webp/config.h"
#endif
+#include <float.h>
#include <math.h>
-#include "src/enc/backward_references_enc.h"
-#include "src/enc/histogram_enc.h"
#include "src/dsp/lossless.h"
#include "src/dsp/lossless_common.h"
+#include "src/enc/backward_references_enc.h"
+#include "src/enc/histogram_enc.h"
+#include "src/enc/vp8i_enc.h"
#include "src/utils/utils.h"
-#define MAX_COST 1.e38
+#define MAX_BIT_COST FLT_MAX
// Number of partitions for the three dominant (literal, red and blue) symbol
// costs.
@@ -228,8 +230,8 @@ void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
// -----------------------------------------------------------------------------
// Entropy-related functions.
-static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) {
- double mix;
+static WEBP_INLINE float BitsEntropyRefine(const VP8LBitEntropy* entropy) {
+ float mix;
if (entropy->nonzeros < 5) {
if (entropy->nonzeros <= 1) {
return 0;
@@ -238,67 +240,67 @@ static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) {
// Let's mix in a bit of entropy to favor good clustering when
// distributions of these are combined.
if (entropy->nonzeros == 2) {
- return 0.99 * entropy->sum + 0.01 * entropy->entropy;
+ return 0.99f * entropy->sum + 0.01f * entropy->entropy;
}
// No matter what the entropy says, we cannot be better than min_limit
// with Huffman coding. I am mixing a bit of entropy into the
// min_limit since it produces much better (~0.5 %) compression results
// perhaps because of better entropy clustering.
if (entropy->nonzeros == 3) {
- mix = 0.95;
+ mix = 0.95f;
} else {
- mix = 0.7; // nonzeros == 4.
+ mix = 0.7f; // nonzeros == 4.
}
} else {
- mix = 0.627;
+ mix = 0.627f;
}
{
- double min_limit = 2 * entropy->sum - entropy->max_val;
- min_limit = mix * min_limit + (1.0 - mix) * entropy->entropy;
+ float min_limit = 2.f * entropy->sum - entropy->max_val;
+ min_limit = mix * min_limit + (1.f - mix) * entropy->entropy;
return (entropy->entropy < min_limit) ? min_limit : entropy->entropy;
}
}
-double VP8LBitsEntropy(const uint32_t* const array, int n) {
+float VP8LBitsEntropy(const uint32_t* const array, int n) {
VP8LBitEntropy entropy;
VP8LBitsEntropyUnrefined(array, n, &entropy);
return BitsEntropyRefine(&entropy);
}
-static double InitialHuffmanCost(void) {
+static float InitialHuffmanCost(void) {
// Small bias because Huffman code length is typically not stored in
// full length.
static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3;
- static const double kSmallBias = 9.1;
+ static const float kSmallBias = 9.1f;
return kHuffmanCodeOfHuffmanCodeSize - kSmallBias;
}
// Finalize the Huffman cost based on streak numbers and length type (<3 or >=3)
-static double FinalHuffmanCost(const VP8LStreaks* const stats) {
+static float FinalHuffmanCost(const VP8LStreaks* const stats) {
// The constants in this function are experimental and got rounded from
// their original values in 1/8 when switched to 1/1024.
- double retval = InitialHuffmanCost();
+ float retval = InitialHuffmanCost();
// Second coefficient: Many zeros in the histogram are covered efficiently
// by a run-length encode. Originally 2/8.
- retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1];
+ retval += stats->counts[0] * 1.5625f + 0.234375f * stats->streaks[0][1];
// Second coefficient: Constant values are encoded less efficiently, but still
// RLE'ed. Originally 6/8.
- retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1];
+ retval += stats->counts[1] * 2.578125f + 0.703125f * stats->streaks[1][1];
// 0s are usually encoded more efficiently than non-0s.
// Originally 15/8.
- retval += 1.796875 * stats->streaks[0][0];
+ retval += 1.796875f * stats->streaks[0][0];
// Originally 26/8.
- retval += 3.28125 * stats->streaks[1][0];
+ retval += 3.28125f * stats->streaks[1][0];
return retval;
}
// Get the symbol entropy for the distribution 'population'.
// Set 'trivial_sym', if there's only one symbol present in the distribution.
-static double PopulationCost(const uint32_t* const population, int length,
- uint32_t* const trivial_sym,
- uint8_t* const is_used) {
+static float PopulationCost(const uint32_t* const population, int length,
+ uint32_t* const trivial_sym,
+ uint8_t* const is_used) {
VP8LBitEntropy bit_entropy;
VP8LStreaks stats;
VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);
@@ -314,11 +316,10 @@ static double PopulationCost(const uint32_t* const population, int length,
// trivial_at_end is 1 if the two histograms only have one element that is
// non-zero: both the zero-th one, or both the last one.
-static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
- const uint32_t* const Y,
- int length, int is_X_used,
- int is_Y_used,
- int trivial_at_end) {
+static WEBP_INLINE float GetCombinedEntropy(const uint32_t* const X,
+ const uint32_t* const Y, int length,
+ int is_X_used, int is_Y_used,
+ int trivial_at_end) {
VP8LStreaks stats;
if (trivial_at_end) {
// This configuration is due to palettization that transforms an indexed
@@ -356,7 +357,7 @@ static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
}
// Estimates the Entropy + Huffman + other block overhead size cost.
-double VP8LHistogramEstimateBits(VP8LHistogram* const p) {
+float VP8LHistogramEstimateBits(VP8LHistogram* const p) {
return
PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_),
NULL, &p->is_used_[0])
@@ -373,8 +374,7 @@ double VP8LHistogramEstimateBits(VP8LHistogram* const p) {
static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
const VP8LHistogram* const b,
- double cost_threshold,
- double* cost) {
+ float cost_threshold, float* cost) {
const int palette_code_bits = a->palette_code_bits_;
int trivial_at_end = 0;
assert(a->palette_code_bits_ == b->palette_code_bits_);
@@ -439,12 +439,11 @@ static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a,
// Since the previous score passed is 'cost_threshold', we only need to compare
// the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out
// early.
-static double HistogramAddEval(const VP8LHistogram* const a,
- const VP8LHistogram* const b,
- VP8LHistogram* const out,
- double cost_threshold) {
- double cost = 0;
- const double sum_cost = a->bit_cost_ + b->bit_cost_;
+static float HistogramAddEval(const VP8LHistogram* const a,
+ const VP8LHistogram* const b,
+ VP8LHistogram* const out, float cost_threshold) {
+ float cost = 0;
+ const float sum_cost = a->bit_cost_ + b->bit_cost_;
cost_threshold += sum_cost;
if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) {
@@ -459,10 +458,10 @@ static double HistogramAddEval(const VP8LHistogram* const a,
// Same as HistogramAddEval(), except that the resulting histogram
// is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit
// the term C(b) which is constant over all the evaluations.
-static double HistogramAddThresh(const VP8LHistogram* const a,
- const VP8LHistogram* const b,
- double cost_threshold) {
- double cost;
+static float HistogramAddThresh(const VP8LHistogram* const a,
+ const VP8LHistogram* const b,
+ float cost_threshold) {
+ float cost;
assert(a != NULL && b != NULL);
cost = -a->bit_cost_;
GetCombinedHistogramEntropy(a, b, cost_threshold, &cost);
@@ -473,24 +472,22 @@ static double HistogramAddThresh(const VP8LHistogram* const a,
// The structure to keep track of cost range for the three dominant entropy
// symbols.
-// TODO(skal): Evaluate if float can be used here instead of double for
-// representing the entropy costs.
typedef struct {
- double literal_max_;
- double literal_min_;
- double red_max_;
- double red_min_;
- double blue_max_;
- double blue_min_;
+ float literal_max_;
+ float literal_min_;
+ float red_max_;
+ float red_min_;
+ float blue_max_;
+ float blue_min_;
} DominantCostRange;
static void DominantCostRangeInit(DominantCostRange* const c) {
c->literal_max_ = 0.;
- c->literal_min_ = MAX_COST;
+ c->literal_min_ = MAX_BIT_COST;
c->red_max_ = 0.;
- c->red_min_ = MAX_COST;
+ c->red_min_ = MAX_BIT_COST;
c->blue_max_ = 0.;
- c->blue_min_ = MAX_COST;
+ c->blue_min_ = MAX_BIT_COST;
}
static void UpdateDominantCostRange(
@@ -505,10 +502,9 @@ static void UpdateDominantCostRange(
static void UpdateHistogramCost(VP8LHistogram* const h) {
uint32_t alpha_sym, red_sym, blue_sym;
- const double alpha_cost =
- PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym,
- &h->is_used_[3]);
- const double distance_cost =
+ const float alpha_cost =
+ PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, &h->is_used_[3]);
+ const float distance_cost =
PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) +
VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
@@ -529,10 +525,10 @@ static void UpdateHistogramCost(VP8LHistogram* const h) {
}
}
-static int GetBinIdForEntropy(double min, double max, double val) {
- const double range = max - min;
+static int GetBinIdForEntropy(float min, float max, float val) {
+ const float range = max - min;
if (range > 0.) {
- const double delta = val - min;
+ const float delta = val - min;
return (int)((NUM_PARTITIONS - 1e-6) * delta / range);
} else {
return 0;
@@ -641,15 +637,11 @@ static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,
// Merges some histograms with same bin_id together if it's advantageous.
// Sets the remaining histograms to NULL.
-static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
- int* num_used,
- const uint16_t* const clusters,
- uint16_t* const cluster_mappings,
- VP8LHistogram* cur_combo,
- const uint16_t* const bin_map,
- int num_bins,
- double combine_cost_factor,
- int low_effort) {
+static void HistogramCombineEntropyBin(
+ VP8LHistogramSet* const image_histo, int* num_used,
+ const uint16_t* const clusters, uint16_t* const cluster_mappings,
+ VP8LHistogram* cur_combo, const uint16_t* const bin_map, int num_bins,
+ float combine_cost_factor, int low_effort) {
VP8LHistogram** const histograms = image_histo->histograms;
int idx;
struct {
@@ -679,11 +671,10 @@ static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
cluster_mappings[clusters[idx]] = clusters[first];
} else {
// try to merge #idx into #first (both share the same bin_id)
- const double bit_cost = histograms[idx]->bit_cost_;
- const double bit_cost_thresh = -bit_cost * combine_cost_factor;
- const double curr_cost_diff =
- HistogramAddEval(histograms[first], histograms[idx],
- cur_combo, bit_cost_thresh);
+ const float bit_cost = histograms[idx]->bit_cost_;
+ const float bit_cost_thresh = -bit_cost * combine_cost_factor;
+ const float curr_cost_diff = HistogramAddEval(
+ histograms[first], histograms[idx], cur_combo, bit_cost_thresh);
if (curr_cost_diff < bit_cost_thresh) {
// Try to merge two histograms only if the combo is a trivial one or
// the two candidate histograms are already non-trivial.
@@ -731,8 +722,8 @@ static uint32_t MyRand(uint32_t* const seed) {
typedef struct {
int idx1;
int idx2;
- double cost_diff;
- double cost_combo;
+ float cost_diff;
+ float cost_combo;
} HistogramPair;
typedef struct {
@@ -787,10 +778,9 @@ static void HistoQueueUpdateHead(HistoQueue* const histo_queue,
// Update the cost diff and combo of a pair of histograms. This needs to be
// called when the the histograms have been merged with a third one.
static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
- const VP8LHistogram* const h2,
- double threshold,
+ const VP8LHistogram* const h2, float threshold,
HistogramPair* const pair) {
- const double sum_cost = h1->bit_cost_ + h2->bit_cost_;
+ const float sum_cost = h1->bit_cost_ + h2->bit_cost_;
pair->cost_combo = 0.;
GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo);
pair->cost_diff = pair->cost_combo - sum_cost;
@@ -799,9 +789,9 @@ static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
// Create a pair from indices "idx1" and "idx2" provided its cost
// is inferior to "threshold", a negative entropy.
// It returns the cost of the pair, or 0. if it superior to threshold.
-static double HistoQueuePush(HistoQueue* const histo_queue,
- VP8LHistogram** const histograms, int idx1,
- int idx2, double threshold) {
+static float HistoQueuePush(HistoQueue* const histo_queue,
+ VP8LHistogram** const histograms, int idx1,
+ int idx2, float threshold) {
const VP8LHistogram* h1;
const VP8LHistogram* h2;
HistogramPair pair;
@@ -945,8 +935,8 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
++tries_with_no_success < num_tries_no_success;
++iter) {
int* mapping_index;
- double best_cost =
- (histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff;
+ float best_cost =
+ (histo_queue.size == 0) ? 0.f : histo_queue.queue[0].cost_diff;
int best_idx1 = -1, best_idx2 = 1;
const uint32_t rand_range = (*num_used - 1) * (*num_used);
// (*num_used) / 2 was chosen empirically. Less means faster but worse
@@ -955,7 +945,7 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
// Pick random samples.
for (j = 0; *num_used >= 2 && j < num_tries; ++j) {
- double curr_cost;
+ float curr_cost;
// Choose two different histograms at random and try to combine them.
const uint32_t tmp = MyRand(&seed) % rand_range;
uint32_t idx1 = tmp / (*num_used - 1);
@@ -1034,7 +1024,7 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
*do_greedy = (*num_used <= min_cluster_size);
ok = 1;
-End:
+ End:
HistoQueueClear(&histo_queue);
WebPSafeFree(mappings);
return ok;
@@ -1057,7 +1047,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
if (out_size > 1) {
for (i = 0; i < in_size; ++i) {
int best_out = 0;
- double best_bits = MAX_COST;
+ float best_bits = MAX_BIT_COST;
int k;
if (in_histo[i] == NULL) {
// Arbitrarily set to the previous value if unused to help future LZ77.
@@ -1065,7 +1055,7 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
continue;
}
for (k = 0; k < out_size; ++k) {
- double cur_bits;
+ float cur_bits;
cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
if (k == 0 || cur_bits < best_bits) {
best_bits = cur_bits;
@@ -1093,13 +1083,13 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
}
}
-static double GetCombineCostFactor(int histo_size, int quality) {
- double combine_cost_factor = 0.16;
+static float GetCombineCostFactor(int histo_size, int quality) {
+ float combine_cost_factor = 0.16f;
if (quality < 90) {
- if (histo_size > 256) combine_cost_factor /= 2.;
- if (histo_size > 512) combine_cost_factor /= 2.;
- if (histo_size > 1024) combine_cost_factor /= 2.;
- if (quality <= 50) combine_cost_factor /= 2.;
+ if (histo_size > 256) combine_cost_factor /= 2.f;
+ if (histo_size > 512) combine_cost_factor /= 2.f;
+ if (histo_size > 1024) combine_cost_factor /= 2.f;
+ if (quality <= 50) combine_cost_factor /= 2.f;
}
return combine_cost_factor;
}
@@ -1169,13 +1159,13 @@ static void RemoveEmptyHistograms(VP8LHistogramSet* const image_histo) {
}
int VP8LGetHistoImageSymbols(int xsize, int ysize,
- const VP8LBackwardRefs* const refs,
- int quality, int low_effort,
- int histogram_bits, int cache_bits,
+ const VP8LBackwardRefs* const refs, int quality,
+ int low_effort, int histogram_bits, int cache_bits,
VP8LHistogramSet* const image_histo,
VP8LHistogram* const tmp_histo,
- uint16_t* const histogram_symbols) {
- int ok = 0;
+ uint16_t* const histogram_symbols,
+ const WebPPicture* const pic, int percent_range,
+ int* const percent) {
const int histo_xsize =
histogram_bits ? VP8LSubSampleSize(xsize, histogram_bits) : 1;
const int histo_ysize =
@@ -1192,7 +1182,10 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
WebPSafeMalloc(2 * image_histo_raw_size, sizeof(map_tmp));
uint16_t* const cluster_mappings = map_tmp + image_histo_raw_size;
int num_used = image_histo_raw_size;
- if (orig_histo == NULL || map_tmp == NULL) goto Error;
+ if (orig_histo == NULL || map_tmp == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ goto Error;
+ }
// Construct the histograms from backward references.
HistogramBuild(xsize, histogram_bits, refs, orig_histo);
@@ -1206,16 +1199,15 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
if (entropy_combine) {
uint16_t* const bin_map = map_tmp;
- const double combine_cost_factor =
+ const float combine_cost_factor =
GetCombineCostFactor(image_histo_raw_size, quality);
const uint32_t num_clusters = num_used;
HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort);
// Collapse histograms with similar entropy.
- HistogramCombineEntropyBin(image_histo, &num_used, histogram_symbols,
- cluster_mappings, tmp_histo, bin_map,
- entropy_combine_num_bins, combine_cost_factor,
- low_effort);
+ HistogramCombineEntropyBin(
+ image_histo, &num_used, histogram_symbols, cluster_mappings, tmp_histo,
+ bin_map, entropy_combine_num_bins, combine_cost_factor, low_effort);
OptimizeHistogramSymbols(image_histo, cluster_mappings, num_clusters,
map_tmp, histogram_symbols);
}
@@ -1229,11 +1221,13 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
int do_greedy;
if (!HistogramCombineStochastic(image_histo, &num_used, threshold_size,
&do_greedy)) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
if (do_greedy) {
RemoveEmptyHistograms(image_histo);
if (!HistogramCombineGreedy(image_histo, &num_used)) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
}
@@ -1243,10 +1237,12 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
RemoveEmptyHistograms(image_histo);
HistogramRemap(orig_histo, image_histo, histogram_symbols);
- ok = 1;
+ if (!WebPReportProgress(pic, *percent + percent_range, percent)) {
+ goto Error;
+ }
Error:
VP8LFreeHistogramSet(orig_histo);
WebPSafeFree(map_tmp);
- return ok;
+ return (pic->error_code == VP8_ENC_OK);
}
diff --git a/thirdparty/libwebp/src/enc/histogram_enc.h b/thirdparty/libwebp/src/enc/histogram_enc.h
index c3428b5d55..4c0bb97464 100644
--- a/thirdparty/libwebp/src/enc/histogram_enc.h
+++ b/thirdparty/libwebp/src/enc/histogram_enc.h
@@ -40,10 +40,10 @@ typedef struct {
int palette_code_bits_;
uint32_t trivial_symbol_; // True, if histograms for Red, Blue & Alpha
// literal symbols are single valued.
- double bit_cost_; // cached value of bit cost.
- double literal_cost_; // Cached values of dominant entropy costs:
- double red_cost_; // literal, red & blue.
- double blue_cost_;
+ float bit_cost_; // cached value of bit cost.
+ float literal_cost_; // Cached values of dominant entropy costs:
+ float red_cost_; // literal, red & blue.
+ float blue_cost_;
uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance
} VP8LHistogram;
@@ -105,21 +105,23 @@ static WEBP_INLINE int VP8LHistogramNumCodes(int palette_code_bits) {
((palette_code_bits > 0) ? (1 << palette_code_bits) : 0);
}
-// Builds the histogram image.
+// Builds the histogram image. pic and percent are for progress.
+// Returns false in case of error (stored in pic->error_code).
int VP8LGetHistoImageSymbols(int xsize, int ysize,
- const VP8LBackwardRefs* const refs,
- int quality, int low_effort,
- int histogram_bits, int cache_bits,
+ const VP8LBackwardRefs* const refs, int quality,
+ int low_effort, int histogram_bits, int cache_bits,
VP8LHistogramSet* const image_histo,
VP8LHistogram* const tmp_histo,
- uint16_t* const histogram_symbols);
+ uint16_t* const histogram_symbols,
+ const WebPPicture* const pic, int percent_range,
+ int* const percent);
// Returns the entropy for the symbols in the input array.
-double VP8LBitsEntropy(const uint32_t* const array, int n);
+float VP8LBitsEntropy(const uint32_t* const array, int n);
// Estimate how many bits the combined entropy of literals and distance
// approximately maps to.
-double VP8LHistogramEstimateBits(VP8LHistogram* const p);
+float VP8LHistogramEstimateBits(VP8LHistogram* const p);
#ifdef __cplusplus
}
diff --git a/thirdparty/libwebp/src/enc/picture_csp_enc.c b/thirdparty/libwebp/src/enc/picture_csp_enc.c
index 35eede9635..fabebcf202 100644
--- a/thirdparty/libwebp/src/enc/picture_csp_enc.c
+++ b/thirdparty/libwebp/src/enc/picture_csp_enc.c
@@ -15,12 +15,19 @@
#include <stdlib.h>
#include <math.h>
+#include "sharpyuv/sharpyuv.h"
+#include "sharpyuv/sharpyuv_csp.h"
#include "src/enc/vp8i_enc.h"
#include "src/utils/random_utils.h"
#include "src/utils/utils.h"
#include "src/dsp/dsp.h"
#include "src/dsp/lossless.h"
#include "src/dsp/yuv.h"
+#include "src/dsp/cpu.h"
+
+#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
+#include <pthread.h>
+#endif
// Uncomment to disable gamma-compression during RGB->U/V averaging
#define USE_GAMMA_COMPRESSION
@@ -76,16 +83,16 @@ int WebPPictureHasTransparency(const WebPPicture* picture) {
#if defined(USE_GAMMA_COMPRESSION)
-// gamma-compensates loss of resolution during chroma subsampling
-#define kGamma 0.80 // for now we use a different gamma value than kGammaF
-#define kGammaFix 12 // fixed-point precision for linear values
-#define kGammaScale ((1 << kGammaFix) - 1)
-#define kGammaTabFix 7 // fixed-point fractional bits precision
-#define kGammaTabScale (1 << kGammaTabFix)
-#define kGammaTabRounder (kGammaTabScale >> 1)
-#define kGammaTabSize (1 << (kGammaFix - kGammaTabFix))
+// Gamma correction compensates loss of resolution during chroma subsampling.
+#define GAMMA_FIX 12 // fixed-point precision for linear values
+#define GAMMA_TAB_FIX 7 // fixed-point fractional bits precision
+#define GAMMA_TAB_SIZE (1 << (GAMMA_FIX - GAMMA_TAB_FIX))
+static const double kGamma = 0.80;
+static const int kGammaScale = ((1 << GAMMA_FIX) - 1);
+static const int kGammaTabScale = (1 << GAMMA_TAB_FIX);
+static const int kGammaTabRounder = (1 << GAMMA_TAB_FIX >> 1);
-static int kLinearToGammaTab[kGammaTabSize + 1];
+static int kLinearToGammaTab[GAMMA_TAB_SIZE + 1];
static uint16_t kGammaToLinearTab[256];
static volatile int kGammaTablesOk = 0;
static void InitGammaTables(void);
@@ -93,13 +100,13 @@ static void InitGammaTables(void);
WEBP_DSP_INIT_FUNC(InitGammaTables) {
if (!kGammaTablesOk) {
int v;
- const double scale = (double)(1 << kGammaTabFix) / kGammaScale;
+ const double scale = (double)(1 << GAMMA_TAB_FIX) / kGammaScale;
const double norm = 1. / 255.;
for (v = 0; v <= 255; ++v) {
kGammaToLinearTab[v] =
(uint16_t)(pow(norm * v, kGamma) * kGammaScale + .5);
}
- for (v = 0; v <= kGammaTabSize; ++v) {
+ for (v = 0; v <= GAMMA_TAB_SIZE; ++v) {
kLinearToGammaTab[v] = (int)(255. * pow(scale * v, 1. / kGamma) + .5);
}
kGammaTablesOk = 1;
@@ -111,12 +118,12 @@ static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
}
static WEBP_INLINE int Interpolate(int v) {
- const int tab_pos = v >> (kGammaTabFix + 2); // integer part
+ const int tab_pos = v >> (GAMMA_TAB_FIX + 2); // integer part
const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
const int v0 = kLinearToGammaTab[tab_pos];
const int v1 = kLinearToGammaTab[tab_pos + 1];
const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
- assert(tab_pos + 1 < kGammaTabSize + 1);
+ assert(tab_pos + 1 < GAMMA_TAB_SIZE + 1);
return y;
}
@@ -124,7 +131,7 @@ static WEBP_INLINE int Interpolate(int v) {
// U/V value, suitable for RGBToU/V calls.
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
const int y = Interpolate(base_value << shift); // final uplifted value
- return (y + kGammaTabRounder) >> kGammaTabFix; // descale
+ return (y + kGammaTabRounder) >> GAMMA_TAB_FIX; // descale
}
#else
@@ -158,415 +165,41 @@ static int RGBToV(int r, int g, int b, VP8Random* const rg) {
//------------------------------------------------------------------------------
// Sharp RGB->YUV conversion
-static const int kNumIterations = 4;
static const int kMinDimensionIterativeConversion = 4;
-// We could use SFIX=0 and only uint8_t for fixed_y_t, but it produces some
-// banding sometimes. Better use extra precision.
-#define SFIX 2 // fixed-point precision of RGB and Y/W
-typedef int16_t fixed_t; // signed type with extra SFIX precision for UV
-typedef uint16_t fixed_y_t; // unsigned type with extra SFIX precision for W
-
-#define SHALF (1 << SFIX >> 1)
-#define MAX_Y_T ((256 << SFIX) - 1)
-#define SROUNDER (1 << (YUV_FIX + SFIX - 1))
-
-#if defined(USE_GAMMA_COMPRESSION)
-
-// We use tables of different size and precision for the Rec709 / BT2020
-// transfer function.
-#define kGammaF (1./0.45)
-static uint32_t kLinearToGammaTabS[kGammaTabSize + 2];
-#define GAMMA_TO_LINEAR_BITS 14
-static uint32_t kGammaToLinearTabS[MAX_Y_T + 1]; // size scales with Y_FIX
-static volatile int kGammaTablesSOk = 0;
-static void InitGammaTablesS(void);
-
-WEBP_DSP_INIT_FUNC(InitGammaTablesS) {
- assert(2 * GAMMA_TO_LINEAR_BITS < 32); // we use uint32_t intermediate values
- if (!kGammaTablesSOk) {
- int v;
- const double norm = 1. / MAX_Y_T;
- const double scale = 1. / kGammaTabSize;
- const double a = 0.09929682680944;
- const double thresh = 0.018053968510807;
- const double final_scale = 1 << GAMMA_TO_LINEAR_BITS;
- for (v = 0; v <= MAX_Y_T; ++v) {
- const double g = norm * v;
- double value;
- if (g <= thresh * 4.5) {
- value = g / 4.5;
- } else {
- const double a_rec = 1. / (1. + a);
- value = pow(a_rec * (g + a), kGammaF);
- }
- kGammaToLinearTabS[v] = (uint32_t)(value * final_scale + .5);
- }
- for (v = 0; v <= kGammaTabSize; ++v) {
- const double g = scale * v;
- double value;
- if (g <= thresh) {
- value = 4.5 * g;
- } else {
- value = (1. + a) * pow(g, 1. / kGammaF) - a;
- }
- // we already incorporate the 1/2 rounding constant here
- kLinearToGammaTabS[v] =
- (uint32_t)(MAX_Y_T * value) + (1 << GAMMA_TO_LINEAR_BITS >> 1);
- }
- // to prevent small rounding errors to cause read-overflow:
- kLinearToGammaTabS[kGammaTabSize + 1] = kLinearToGammaTabS[kGammaTabSize];
- kGammaTablesSOk = 1;
- }
-}
-
-// return value has a fixed-point precision of GAMMA_TO_LINEAR_BITS
-static WEBP_INLINE uint32_t GammaToLinearS(int v) {
- return kGammaToLinearTabS[v];
-}
-
-static WEBP_INLINE uint32_t LinearToGammaS(uint32_t value) {
- // 'value' is in GAMMA_TO_LINEAR_BITS fractional precision
- const uint32_t v = value * kGammaTabSize;
- const uint32_t tab_pos = v >> GAMMA_TO_LINEAR_BITS;
- // fractional part, in GAMMA_TO_LINEAR_BITS fixed-point precision
- const uint32_t x = v - (tab_pos << GAMMA_TO_LINEAR_BITS); // fractional part
- // v0 / v1 are in GAMMA_TO_LINEAR_BITS fixed-point precision (range [0..1])
- const uint32_t v0 = kLinearToGammaTabS[tab_pos + 0];
- const uint32_t v1 = kLinearToGammaTabS[tab_pos + 1];
- // Final interpolation. Note that rounding is already included.
- const uint32_t v2 = (v1 - v0) * x; // note: v1 >= v0.
- const uint32_t result = v0 + (v2 >> GAMMA_TO_LINEAR_BITS);
- return result;
-}
-
-#else
-
-static void InitGammaTablesS(void) {}
-static WEBP_INLINE uint32_t GammaToLinearS(int v) {
- return (v << GAMMA_TO_LINEAR_BITS) / MAX_Y_T;
-}
-static WEBP_INLINE uint32_t LinearToGammaS(uint32_t value) {
- return (MAX_Y_T * value) >> GAMMA_TO_LINEAR_BITS;
-}
-
-#endif // USE_GAMMA_COMPRESSION
-
-//------------------------------------------------------------------------------
-
-static uint8_t clip_8b(fixed_t v) {
- return (!(v & ~0xff)) ? (uint8_t)v : (v < 0) ? 0u : 255u;
-}
-
-static fixed_y_t clip_y(int y) {
- return (!(y & ~MAX_Y_T)) ? (fixed_y_t)y : (y < 0) ? 0 : MAX_Y_T;
-}
-
-//------------------------------------------------------------------------------
-
-static int RGBToGray(int r, int g, int b) {
- const int luma = 13933 * r + 46871 * g + 4732 * b + YUV_HALF;
- return (luma >> YUV_FIX);
-}
-
-static uint32_t ScaleDown(int a, int b, int c, int d) {
- const uint32_t A = GammaToLinearS(a);
- const uint32_t B = GammaToLinearS(b);
- const uint32_t C = GammaToLinearS(c);
- const uint32_t D = GammaToLinearS(d);
- return LinearToGammaS((A + B + C + D + 2) >> 2);
-}
-
-static WEBP_INLINE void UpdateW(const fixed_y_t* src, fixed_y_t* dst, int w) {
- int i;
- for (i = 0; i < w; ++i) {
- const uint32_t R = GammaToLinearS(src[0 * w + i]);
- const uint32_t G = GammaToLinearS(src[1 * w + i]);
- const uint32_t B = GammaToLinearS(src[2 * w + i]);
- const uint32_t Y = RGBToGray(R, G, B);
- dst[i] = (fixed_y_t)LinearToGammaS(Y);
- }
-}
-
-static void UpdateChroma(const fixed_y_t* src1, const fixed_y_t* src2,
- fixed_t* dst, int uv_w) {
- int i;
- for (i = 0; i < uv_w; ++i) {
- const int r = ScaleDown(src1[0 * uv_w + 0], src1[0 * uv_w + 1],
- src2[0 * uv_w + 0], src2[0 * uv_w + 1]);
- const int g = ScaleDown(src1[2 * uv_w + 0], src1[2 * uv_w + 1],
- src2[2 * uv_w + 0], src2[2 * uv_w + 1]);
- const int b = ScaleDown(src1[4 * uv_w + 0], src1[4 * uv_w + 1],
- src2[4 * uv_w + 0], src2[4 * uv_w + 1]);
- const int W = RGBToGray(r, g, b);
- dst[0 * uv_w] = (fixed_t)(r - W);
- dst[1 * uv_w] = (fixed_t)(g - W);
- dst[2 * uv_w] = (fixed_t)(b - W);
- dst += 1;
- src1 += 2;
- src2 += 2;
- }
-}
-
-static void StoreGray(const fixed_y_t* rgb, fixed_y_t* y, int w) {
- int i;
- for (i = 0; i < w; ++i) {
- y[i] = RGBToGray(rgb[0 * w + i], rgb[1 * w + i], rgb[2 * w + i]);
- }
-}
-
-//------------------------------------------------------------------------------
-
-static WEBP_INLINE fixed_y_t Filter2(int A, int B, int W0) {
- const int v0 = (A * 3 + B + 2) >> 2;
- return clip_y(v0 + W0);
-}
-
//------------------------------------------------------------------------------
+// Main function
-static WEBP_INLINE fixed_y_t UpLift(uint8_t a) { // 8bit -> SFIX
- return ((fixed_y_t)a << SFIX) | SHALF;
-}
-
-static void ImportOneRow(const uint8_t* const r_ptr,
- const uint8_t* const g_ptr,
- const uint8_t* const b_ptr,
- int step,
- int pic_width,
- fixed_y_t* const dst) {
- int i;
- const int w = (pic_width + 1) & ~1;
- for (i = 0; i < pic_width; ++i) {
- const int off = i * step;
- dst[i + 0 * w] = UpLift(r_ptr[off]);
- dst[i + 1 * w] = UpLift(g_ptr[off]);
- dst[i + 2 * w] = UpLift(b_ptr[off]);
- }
- if (pic_width & 1) { // replicate rightmost pixel
- dst[pic_width + 0 * w] = dst[pic_width + 0 * w - 1];
- dst[pic_width + 1 * w] = dst[pic_width + 1 * w - 1];
- dst[pic_width + 2 * w] = dst[pic_width + 2 * w - 1];
- }
-}
-
-static void InterpolateTwoRows(const fixed_y_t* const best_y,
- const fixed_t* prev_uv,
- const fixed_t* cur_uv,
- const fixed_t* next_uv,
- int w,
- fixed_y_t* out1,
- fixed_y_t* out2) {
- const int uv_w = w >> 1;
- const int len = (w - 1) >> 1; // length to filter
- int k = 3;
- while (k-- > 0) { // process each R/G/B segments in turn
- // special boundary case for i==0
- out1[0] = Filter2(cur_uv[0], prev_uv[0], best_y[0]);
- out2[0] = Filter2(cur_uv[0], next_uv[0], best_y[w]);
-
- WebPSharpYUVFilterRow(cur_uv, prev_uv, len, best_y + 0 + 1, out1 + 1);
- WebPSharpYUVFilterRow(cur_uv, next_uv, len, best_y + w + 1, out2 + 1);
-
- // special boundary case for i == w - 1 when w is even
- if (!(w & 1)) {
- out1[w - 1] = Filter2(cur_uv[uv_w - 1], prev_uv[uv_w - 1],
- best_y[w - 1 + 0]);
- out2[w - 1] = Filter2(cur_uv[uv_w - 1], next_uv[uv_w - 1],
- best_y[w - 1 + w]);
- }
- out1 += w;
- out2 += w;
- prev_uv += uv_w;
- cur_uv += uv_w;
- next_uv += uv_w;
- }
-}
-
-static WEBP_INLINE uint8_t ConvertRGBToY(int r, int g, int b) {
- const int luma = 16839 * r + 33059 * g + 6420 * b + SROUNDER;
- return clip_8b(16 + (luma >> (YUV_FIX + SFIX)));
-}
+extern void SharpYuvInit(VP8CPUInfo cpu_info_func);
-static WEBP_INLINE uint8_t ConvertRGBToU(int r, int g, int b) {
- const int u = -9719 * r - 19081 * g + 28800 * b + SROUNDER;
- return clip_8b(128 + (u >> (YUV_FIX + SFIX)));
-}
+static void SafeInitSharpYuv(void) {
+#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
+ static pthread_mutex_t initsharpyuv_lock = PTHREAD_MUTEX_INITIALIZER;
+ if (pthread_mutex_lock(&initsharpyuv_lock)) return;
+#endif
-static WEBP_INLINE uint8_t ConvertRGBToV(int r, int g, int b) {
- const int v = +28800 * r - 24116 * g - 4684 * b + SROUNDER;
- return clip_8b(128 + (v >> (YUV_FIX + SFIX)));
-}
+ SharpYuvInit(VP8GetCPUInfo);
-static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv,
- WebPPicture* const picture) {
- int i, j;
- uint8_t* dst_y = picture->y;
- uint8_t* dst_u = picture->u;
- uint8_t* dst_v = picture->v;
- const fixed_t* const best_uv_base = best_uv;
- const int w = (picture->width + 1) & ~1;
- const int h = (picture->height + 1) & ~1;
- const int uv_w = w >> 1;
- const int uv_h = h >> 1;
- for (best_uv = best_uv_base, j = 0; j < picture->height; ++j) {
- for (i = 0; i < picture->width; ++i) {
- const int off = (i >> 1);
- const int W = best_y[i];
- const int r = best_uv[off + 0 * uv_w] + W;
- const int g = best_uv[off + 1 * uv_w] + W;
- const int b = best_uv[off + 2 * uv_w] + W;
- dst_y[i] = ConvertRGBToY(r, g, b);
- }
- best_y += w;
- best_uv += (j & 1) * 3 * uv_w;
- dst_y += picture->y_stride;
- }
- for (best_uv = best_uv_base, j = 0; j < uv_h; ++j) {
- for (i = 0; i < uv_w; ++i) {
- const int off = i;
- const int r = best_uv[off + 0 * uv_w];
- const int g = best_uv[off + 1 * uv_w];
- const int b = best_uv[off + 2 * uv_w];
- dst_u[i] = ConvertRGBToU(r, g, b);
- dst_v[i] = ConvertRGBToV(r, g, b);
- }
- best_uv += 3 * uv_w;
- dst_u += picture->uv_stride;
- dst_v += picture->uv_stride;
- }
- return 1;
+#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
+ (void)pthread_mutex_unlock(&initsharpyuv_lock);
+#endif
}
-//------------------------------------------------------------------------------
-// Main function
-
-#define SAFE_ALLOC(W, H, T) ((T*)WebPSafeMalloc((W) * (H), sizeof(T)))
-
static int PreprocessARGB(const uint8_t* r_ptr,
const uint8_t* g_ptr,
const uint8_t* b_ptr,
int step, int rgb_stride,
WebPPicture* const picture) {
- // we expand the right/bottom border if needed
- const int w = (picture->width + 1) & ~1;
- const int h = (picture->height + 1) & ~1;
- const int uv_w = w >> 1;
- const int uv_h = h >> 1;
- uint64_t prev_diff_y_sum = ~0;
- int j, iter;
-
- // TODO(skal): allocate one big memory chunk. But for now, it's easier
- // for valgrind debugging to have several chunks.
- fixed_y_t* const tmp_buffer = SAFE_ALLOC(w * 3, 2, fixed_y_t); // scratch
- fixed_y_t* const best_y_base = SAFE_ALLOC(w, h, fixed_y_t);
- fixed_y_t* const target_y_base = SAFE_ALLOC(w, h, fixed_y_t);
- fixed_y_t* const best_rgb_y = SAFE_ALLOC(w, 2, fixed_y_t);
- fixed_t* const best_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
- fixed_t* const target_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
- fixed_t* const best_rgb_uv = SAFE_ALLOC(uv_w * 3, 1, fixed_t);
- fixed_y_t* best_y = best_y_base;
- fixed_y_t* target_y = target_y_base;
- fixed_t* best_uv = best_uv_base;
- fixed_t* target_uv = target_uv_base;
- const uint64_t diff_y_threshold = (uint64_t)(3.0 * w * h);
- int ok;
-
- if (best_y_base == NULL || best_uv_base == NULL ||
- target_y_base == NULL || target_uv_base == NULL ||
- best_rgb_y == NULL || best_rgb_uv == NULL ||
- tmp_buffer == NULL) {
- ok = WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
- goto End;
- }
- assert(picture->width >= kMinDimensionIterativeConversion);
- assert(picture->height >= kMinDimensionIterativeConversion);
-
- WebPInitConvertARGBToYUV();
-
- // Import RGB samples to W/RGB representation.
- for (j = 0; j < picture->height; j += 2) {
- const int is_last_row = (j == picture->height - 1);
- fixed_y_t* const src1 = tmp_buffer + 0 * w;
- fixed_y_t* const src2 = tmp_buffer + 3 * w;
-
- // prepare two rows of input
- ImportOneRow(r_ptr, g_ptr, b_ptr, step, picture->width, src1);
- if (!is_last_row) {
- ImportOneRow(r_ptr + rgb_stride, g_ptr + rgb_stride, b_ptr + rgb_stride,
- step, picture->width, src2);
- } else {
- memcpy(src2, src1, 3 * w * sizeof(*src2));
- }
- StoreGray(src1, best_y + 0, w);
- StoreGray(src2, best_y + w, w);
-
- UpdateW(src1, target_y, w);
- UpdateW(src2, target_y + w, w);
- UpdateChroma(src1, src2, target_uv, uv_w);
- memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv));
- best_y += 2 * w;
- best_uv += 3 * uv_w;
- target_y += 2 * w;
- target_uv += 3 * uv_w;
- r_ptr += 2 * rgb_stride;
- g_ptr += 2 * rgb_stride;
- b_ptr += 2 * rgb_stride;
- }
-
- // Iterate and resolve clipping conflicts.
- for (iter = 0; iter < kNumIterations; ++iter) {
- const fixed_t* cur_uv = best_uv_base;
- const fixed_t* prev_uv = best_uv_base;
- uint64_t diff_y_sum = 0;
-
- best_y = best_y_base;
- best_uv = best_uv_base;
- target_y = target_y_base;
- target_uv = target_uv_base;
- for (j = 0; j < h; j += 2) {
- fixed_y_t* const src1 = tmp_buffer + 0 * w;
- fixed_y_t* const src2 = tmp_buffer + 3 * w;
- {
- const fixed_t* const next_uv = cur_uv + ((j < h - 2) ? 3 * uv_w : 0);
- InterpolateTwoRows(best_y, prev_uv, cur_uv, next_uv, w, src1, src2);
- prev_uv = cur_uv;
- cur_uv = next_uv;
- }
-
- UpdateW(src1, best_rgb_y + 0 * w, w);
- UpdateW(src2, best_rgb_y + 1 * w, w);
- UpdateChroma(src1, src2, best_rgb_uv, uv_w);
-
- // update two rows of Y and one row of RGB
- diff_y_sum += WebPSharpYUVUpdateY(target_y, best_rgb_y, best_y, 2 * w);
- WebPSharpYUVUpdateRGB(target_uv, best_rgb_uv, best_uv, 3 * uv_w);
-
- best_y += 2 * w;
- best_uv += 3 * uv_w;
- target_y += 2 * w;
- target_uv += 3 * uv_w;
- }
- // test exit condition
- if (iter > 0) {
- if (diff_y_sum < diff_y_threshold) break;
- if (diff_y_sum > prev_diff_y_sum) break;
- }
- prev_diff_y_sum = diff_y_sum;
+ const int ok = SharpYuvConvert(
+ r_ptr, g_ptr, b_ptr, step, rgb_stride, /*rgb_bit_depth=*/8,
+ picture->y, picture->y_stride, picture->u, picture->uv_stride, picture->v,
+ picture->uv_stride, /*yuv_bit_depth=*/8, picture->width,
+ picture->height, SharpYuvGetConversionMatrix(kSharpYuvMatrixWebp));
+ if (!ok) {
+ return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
- // final reconstruction
- ok = ConvertWRGBToYUV(best_y_base, best_uv_base, picture);
-
- End:
- WebPSafeFree(best_y_base);
- WebPSafeFree(best_uv_base);
- WebPSafeFree(target_y_base);
- WebPSafeFree(target_uv_base);
- WebPSafeFree(best_rgb_y);
- WebPSafeFree(best_rgb_uv);
- WebPSafeFree(tmp_buffer);
return ok;
}
-#undef SAFE_ALLOC
//------------------------------------------------------------------------------
// "Fast" regular RGB->YUV
@@ -591,8 +224,8 @@ static const int kAlphaFix = 19;
// and constant are adjusted very tightly to fit 32b arithmetic.
// In particular, they use the fact that the operands for 'v / a' are actually
// derived as v = (a0.p0 + a1.p1 + a2.p2 + a3.p3) and a = a0 + a1 + a2 + a3
-// with ai in [0..255] and pi in [0..1<<kGammaFix). The constraint to avoid
-// overflow is: kGammaFix + kAlphaFix <= 31.
+// with ai in [0..255] and pi in [0..1<<GAMMA_FIX). The constraint to avoid
+// overflow is: GAMMA_FIX + kAlphaFix <= 31.
static const uint32_t kInvAlpha[4 * 0xff + 1] = {
0, /* alpha = 0 */
524288, 262144, 174762, 131072, 104857, 87381, 74898, 65536,
@@ -818,11 +451,20 @@ static WEBP_INLINE void AccumulateRGB(const uint8_t* const r_ptr,
dst[0] = SUM4(r_ptr + j, step);
dst[1] = SUM4(g_ptr + j, step);
dst[2] = SUM4(b_ptr + j, step);
+ // MemorySanitizer may raise false positives with data that passes through
+ // RGBA32PackedToPlanar_16b_SSE41() due to incorrect modeling of shuffles.
+ // See https://crbug.com/webp/573.
+#ifdef WEBP_MSAN
+ dst[3] = 0;
+#endif
}
if (width & 1) {
dst[0] = SUM2(r_ptr + j);
dst[1] = SUM2(g_ptr + j);
dst[2] = SUM2(b_ptr + j);
+#ifdef WEBP_MSAN
+ dst[3] = 0;
+#endif
}
}
@@ -863,18 +505,18 @@ static int ImportYUVAFromRGBA(const uint8_t* r_ptr,
use_iterative_conversion = 0;
}
- if (!WebPPictureAllocYUVA(picture, width, height)) {
+ if (!WebPPictureAllocYUVA(picture)) {
return 0;
}
if (has_alpha) {
assert(step == 4);
#if defined(USE_GAMMA_COMPRESSION) && defined(USE_INVERSE_ALPHA_TABLE)
- assert(kAlphaFix + kGammaFix <= 31);
+ assert(kAlphaFix + GAMMA_FIX <= 31);
#endif
}
if (use_iterative_conversion) {
- InitGammaTablesS();
+ SafeInitSharpYuv();
if (!PreprocessARGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, picture)) {
return 0;
}
@@ -1044,7 +686,7 @@ int WebPPictureYUVAToARGB(WebPPicture* picture) {
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
// Allocate a new argb buffer (discarding the previous one).
- if (!WebPPictureAllocARGB(picture, picture->width, picture->height)) return 0;
+ if (!WebPPictureAllocARGB(picture)) return 0;
picture->use_argb = 1;
// Convert
@@ -1106,6 +748,8 @@ static int Import(WebPPicture* const picture,
const int width = picture->width;
const int height = picture->height;
+ if (abs(rgb_stride) < (import_alpha ? 4 : 3) * width) return 0;
+
if (!picture->use_argb) {
const uint8_t* a_ptr = import_alpha ? rgb + 3 : NULL;
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
@@ -1163,24 +807,24 @@ static int Import(WebPPicture* const picture,
#if !defined(WEBP_REDUCE_CSP)
int WebPPictureImportBGR(WebPPicture* picture,
- const uint8_t* rgb, int rgb_stride) {
- return (picture != NULL && rgb != NULL)
- ? Import(picture, rgb, rgb_stride, 3, 1, 0)
+ const uint8_t* bgr, int bgr_stride) {
+ return (picture != NULL && bgr != NULL)
+ ? Import(picture, bgr, bgr_stride, 3, 1, 0)
: 0;
}
int WebPPictureImportBGRA(WebPPicture* picture,
- const uint8_t* rgba, int rgba_stride) {
- return (picture != NULL && rgba != NULL)
- ? Import(picture, rgba, rgba_stride, 4, 1, 1)
+ const uint8_t* bgra, int bgra_stride) {
+ return (picture != NULL && bgra != NULL)
+ ? Import(picture, bgra, bgra_stride, 4, 1, 1)
: 0;
}
int WebPPictureImportBGRX(WebPPicture* picture,
- const uint8_t* rgba, int rgba_stride) {
- return (picture != NULL && rgba != NULL)
- ? Import(picture, rgba, rgba_stride, 4, 1, 0)
+ const uint8_t* bgrx, int bgrx_stride) {
+ return (picture != NULL && bgrx != NULL)
+ ? Import(picture, bgrx, bgrx_stride, 4, 1, 0)
: 0;
}
@@ -1201,9 +845,9 @@ int WebPPictureImportRGBA(WebPPicture* picture,
}
int WebPPictureImportRGBX(WebPPicture* picture,
- const uint8_t* rgba, int rgba_stride) {
- return (picture != NULL && rgba != NULL)
- ? Import(picture, rgba, rgba_stride, 4, 0, 0)
+ const uint8_t* rgbx, int rgbx_stride) {
+ return (picture != NULL && rgbx != NULL)
+ ? Import(picture, rgbx, rgbx_stride, 4, 0, 0)
: 0;
}
diff --git a/thirdparty/libwebp/src/enc/picture_enc.c b/thirdparty/libwebp/src/enc/picture_enc.c
index c691622d03..3af6383d38 100644
--- a/thirdparty/libwebp/src/enc/picture_enc.c
+++ b/thirdparty/libwebp/src/enc/picture_enc.c
@@ -45,6 +45,22 @@ int WebPPictureInitInternal(WebPPicture* picture, int version) {
//------------------------------------------------------------------------------
+int WebPValidatePicture(const WebPPicture* const picture) {
+ if (picture == NULL) return 0;
+ if (picture->width <= 0 || picture->height <= 0) {
+ return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
+ }
+ if (picture->width <= 0 || picture->width / 4 > INT_MAX / 4 ||
+ picture->height <= 0 || picture->height / 4 > INT_MAX / 4) {
+ return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
+ }
+ if (picture->colorspace != WEBP_YUV420 &&
+ picture->colorspace != WEBP_YUV420A) {
+ return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
+ }
+ return 1;
+}
+
static void WebPPictureResetBufferARGB(WebPPicture* const picture) {
picture->memory_argb_ = NULL;
picture->argb = NULL;
@@ -63,18 +79,17 @@ void WebPPictureResetBuffers(WebPPicture* const picture) {
WebPPictureResetBufferYUVA(picture);
}
-int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height) {
+int WebPPictureAllocARGB(WebPPicture* const picture) {
void* memory;
+ const int width = picture->width;
+ const int height = picture->height;
const uint64_t argb_size = (uint64_t)width * height;
- assert(picture != NULL);
+ if (!WebPValidatePicture(picture)) return 0;
WebPSafeFree(picture->memory_argb_);
WebPPictureResetBufferARGB(picture);
- if (width <= 0 || height <= 0) {
- return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION);
- }
// allocate a new buffer.
memory = WebPSafeMalloc(argb_size + WEBP_ALIGN_CST, sizeof(*picture->argb));
if (memory == NULL) {
@@ -86,10 +101,10 @@ int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height) {
return 1;
}
-int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
- const WebPEncCSP uv_csp =
- (WebPEncCSP)((int)picture->colorspace & WEBP_CSP_UV_MASK);
+int WebPPictureAllocYUVA(WebPPicture* const picture) {
const int has_alpha = (int)picture->colorspace & WEBP_CSP_ALPHA_BIT;
+ const int width = picture->width;
+ const int height = picture->height;
const int y_stride = width;
const int uv_width = (int)(((int64_t)width + 1) >> 1);
const int uv_height = (int)(((int64_t)height + 1) >> 1);
@@ -98,15 +113,11 @@ int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
uint64_t y_size, uv_size, a_size, total_size;
uint8_t* mem;
- assert(picture != NULL);
+ if (!WebPValidatePicture(picture)) return 0;
WebPSafeFree(picture->memory_);
WebPPictureResetBufferYUVA(picture);
- if (uv_csp != WEBP_YUV420) {
- return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
- }
-
// alpha
a_width = has_alpha ? width : 0;
a_stride = a_width;
@@ -152,15 +163,12 @@ int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
int WebPPictureAlloc(WebPPicture* picture) {
if (picture != NULL) {
- const int width = picture->width;
- const int height = picture->height;
-
WebPPictureFree(picture); // erase previous buffer
if (!picture->use_argb) {
- return WebPPictureAllocYUVA(picture, width, height);
+ return WebPPictureAllocYUVA(picture);
} else {
- return WebPPictureAllocARGB(picture, width, height);
+ return WebPPictureAllocARGB(picture);
}
}
return 1;
diff --git a/thirdparty/libwebp/src/enc/picture_rescale_enc.c b/thirdparty/libwebp/src/enc/picture_rescale_enc.c
index a75f5d9c06..839f91cacc 100644
--- a/thirdparty/libwebp/src/enc/picture_rescale_enc.c
+++ b/thirdparty/libwebp/src/enc/picture_rescale_enc.c
@@ -13,14 +13,15 @@
#include "src/webp/encode.h"
-#if !defined(WEBP_REDUCE_SIZE)
-
#include <assert.h>
#include <stdlib.h>
#include "src/enc/vp8i_enc.h"
+
+#if !defined(WEBP_REDUCE_SIZE)
#include "src/utils/rescaler_utils.h"
#include "src/utils/utils.h"
+#endif // !defined(WEBP_REDUCE_SIZE)
#define HALVE(x) (((x) + 1) >> 1)
@@ -56,6 +57,7 @@ static int AdjustAndCheckRectangle(const WebPPicture* const pic,
return 1;
}
+#if !defined(WEBP_REDUCE_SIZE)
int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) {
if (src == NULL || dst == NULL) return 0;
if (src == dst) return 1;
@@ -81,6 +83,7 @@ int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) {
}
return 1;
}
+#endif // !defined(WEBP_REDUCE_SIZE)
int WebPPictureIsView(const WebPPicture* picture) {
if (picture == NULL) return 0;
@@ -120,6 +123,7 @@ int WebPPictureView(const WebPPicture* src,
return 1;
}
+#if !defined(WEBP_REDUCE_SIZE)
//------------------------------------------------------------------------------
// Picture cropping
@@ -198,34 +202,34 @@ static void AlphaMultiplyY(WebPPicture* const pic, int inverse) {
}
}
-int WebPPictureRescale(WebPPicture* pic, int width, int height) {
+int WebPPictureRescale(WebPPicture* picture, int width, int height) {
WebPPicture tmp;
int prev_width, prev_height;
rescaler_t* work;
- if (pic == NULL) return 0;
- prev_width = pic->width;
- prev_height = pic->height;
+ if (picture == NULL) return 0;
+ prev_width = picture->width;
+ prev_height = picture->height;
if (!WebPRescalerGetScaledDimensions(
prev_width, prev_height, &width, &height)) {
return 0;
}
- PictureGrabSpecs(pic, &tmp);
+ PictureGrabSpecs(picture, &tmp);
tmp.width = width;
tmp.height = height;
if (!WebPPictureAlloc(&tmp)) return 0;
- if (!pic->use_argb) {
+ if (!picture->use_argb) {
work = (rescaler_t*)WebPSafeMalloc(2ULL * width, sizeof(*work));
if (work == NULL) {
WebPPictureFree(&tmp);
return 0;
}
// If present, we need to rescale alpha first (for AlphaMultiplyY).
- if (pic->a != NULL) {
+ if (picture->a != NULL) {
WebPInitAlphaProcessing();
- if (!RescalePlane(pic->a, prev_width, prev_height, pic->a_stride,
+ if (!RescalePlane(picture->a, prev_width, prev_height, picture->a_stride,
tmp.a, width, height, tmp.a_stride, work, 1)) {
return 0;
}
@@ -233,17 +237,15 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
// We take transparency into account on the luma plane only. That's not
// totally exact blending, but still is a good approximation.
- AlphaMultiplyY(pic, 0);
- if (!RescalePlane(pic->y, prev_width, prev_height, pic->y_stride,
+ AlphaMultiplyY(picture, 0);
+ if (!RescalePlane(picture->y, prev_width, prev_height, picture->y_stride,
tmp.y, width, height, tmp.y_stride, work, 1) ||
- !RescalePlane(pic->u,
- HALVE(prev_width), HALVE(prev_height), pic->uv_stride,
- tmp.u,
- HALVE(width), HALVE(height), tmp.uv_stride, work, 1) ||
- !RescalePlane(pic->v,
- HALVE(prev_width), HALVE(prev_height), pic->uv_stride,
- tmp.v,
- HALVE(width), HALVE(height), tmp.uv_stride, work, 1)) {
+ !RescalePlane(picture->u, HALVE(prev_width), HALVE(prev_height),
+ picture->uv_stride, tmp.u, HALVE(width), HALVE(height),
+ tmp.uv_stride, work, 1) ||
+ !RescalePlane(picture->v, HALVE(prev_width), HALVE(prev_height),
+ picture->uv_stride, tmp.v, HALVE(width), HALVE(height),
+ tmp.uv_stride, work, 1)) {
return 0;
}
AlphaMultiplyY(&tmp, 1);
@@ -257,18 +259,17 @@ int WebPPictureRescale(WebPPicture* pic, int width, int height) {
// weighting first (black-matting), scale the RGB values, and remove
// the premultiplication afterward (while preserving the alpha channel).
WebPInitAlphaProcessing();
- AlphaMultiplyARGB(pic, 0);
- if (!RescalePlane((const uint8_t*)pic->argb, prev_width, prev_height,
- pic->argb_stride * 4,
- (uint8_t*)tmp.argb, width, height,
- tmp.argb_stride * 4, work, 4)) {
+ AlphaMultiplyARGB(picture, 0);
+ if (!RescalePlane((const uint8_t*)picture->argb, prev_width, prev_height,
+ picture->argb_stride * 4, (uint8_t*)tmp.argb, width,
+ height, tmp.argb_stride * 4, work, 4)) {
return 0;
}
AlphaMultiplyARGB(&tmp, 1);
}
- WebPPictureFree(pic);
+ WebPPictureFree(picture);
WebPSafeFree(work);
- *pic = tmp;
+ *picture = tmp;
return 1;
}
@@ -280,23 +281,6 @@ int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) {
return 0;
}
-int WebPPictureIsView(const WebPPicture* picture) {
- (void)picture;
- return 0;
-}
-
-int WebPPictureView(const WebPPicture* src,
- int left, int top, int width, int height,
- WebPPicture* dst) {
- (void)src;
- (void)left;
- (void)top;
- (void)width;
- (void)height;
- (void)dst;
- return 0;
-}
-
int WebPPictureCrop(WebPPicture* pic,
int left, int top, int width, int height) {
(void)pic;
diff --git a/thirdparty/libwebp/src/enc/picture_tools_enc.c b/thirdparty/libwebp/src/enc/picture_tools_enc.c
index 38cb01534a..147cc18608 100644
--- a/thirdparty/libwebp/src/enc/picture_tools_enc.c
+++ b/thirdparty/libwebp/src/enc/picture_tools_enc.c
@@ -190,27 +190,28 @@ static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
return (0xff000000u | (r << 16) | (g << 8) | b);
}
-void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
+void WebPBlendAlpha(WebPPicture* picture, uint32_t background_rgb) {
const int red = (background_rgb >> 16) & 0xff;
const int green = (background_rgb >> 8) & 0xff;
const int blue = (background_rgb >> 0) & 0xff;
int x, y;
- if (pic == NULL) return;
- if (!pic->use_argb) {
- const int uv_width = (pic->width >> 1); // omit last pixel during u/v loop
+ if (picture == NULL) return;
+ if (!picture->use_argb) {
+ // omit last pixel during u/v loop
+ const int uv_width = (picture->width >> 1);
const int Y0 = VP8RGBToY(red, green, blue, YUV_HALF);
// VP8RGBToU/V expects the u/v values summed over four pixels
const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
- const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
- uint8_t* y_ptr = pic->y;
- uint8_t* u_ptr = pic->u;
- uint8_t* v_ptr = pic->v;
- uint8_t* a_ptr = pic->a;
+ const int has_alpha = picture->colorspace & WEBP_CSP_ALPHA_BIT;
+ uint8_t* y_ptr = picture->y;
+ uint8_t* u_ptr = picture->u;
+ uint8_t* v_ptr = picture->v;
+ uint8_t* a_ptr = picture->a;
if (!has_alpha || a_ptr == NULL) return; // nothing to do
- for (y = 0; y < pic->height; ++y) {
+ for (y = 0; y < picture->height; ++y) {
// Luma blending
- for (x = 0; x < pic->width; ++x) {
+ for (x = 0; x < picture->width; ++x) {
const uint8_t alpha = a_ptr[x];
if (alpha < 0xff) {
y_ptr[x] = BLEND(Y0, y_ptr[x], alpha);
@@ -219,7 +220,7 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
// Chroma blending every even line
if ((y & 1) == 0) {
uint8_t* const a_ptr2 =
- (y + 1 == pic->height) ? a_ptr : a_ptr + pic->a_stride;
+ (y + 1 == picture->height) ? a_ptr : a_ptr + picture->a_stride;
for (x = 0; x < uv_width; ++x) {
// Average four alpha values into a single blending weight.
// TODO(skal): might lead to visible contouring. Can we do better?
@@ -229,24 +230,24 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
}
- if (pic->width & 1) { // rightmost pixel
+ if (picture->width & 1) { // rightmost pixel
const uint32_t alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]);
u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
}
} else {
- u_ptr += pic->uv_stride;
- v_ptr += pic->uv_stride;
+ u_ptr += picture->uv_stride;
+ v_ptr += picture->uv_stride;
}
- memset(a_ptr, 0xff, pic->width); // reset alpha value to opaque
- a_ptr += pic->a_stride;
- y_ptr += pic->y_stride;
+ memset(a_ptr, 0xff, picture->width); // reset alpha value to opaque
+ a_ptr += picture->a_stride;
+ y_ptr += picture->y_stride;
}
} else {
- uint32_t* argb = pic->argb;
+ uint32_t* argb = picture->argb;
const uint32_t background = MakeARGB32(red, green, blue);
- for (y = 0; y < pic->height; ++y) {
- for (x = 0; x < pic->width; ++x) {
+ for (y = 0; y < picture->height; ++y) {
+ for (x = 0; x < picture->width; ++x) {
const int alpha = (argb[x] >> 24) & 0xff;
if (alpha != 0xff) {
if (alpha > 0) {
@@ -262,7 +263,7 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
}
}
}
- argb += pic->argb_stride;
+ argb += picture->argb_stride;
}
}
}
diff --git a/thirdparty/libwebp/src/enc/predictor_enc.c b/thirdparty/libwebp/src/enc/predictor_enc.c
index 2b5c767280..b3d44b59d5 100644
--- a/thirdparty/libwebp/src/enc/predictor_enc.c
+++ b/thirdparty/libwebp/src/enc/predictor_enc.c
@@ -16,6 +16,7 @@
#include "src/dsp/lossless.h"
#include "src/dsp/lossless_common.h"
+#include "src/enc/vp8i_enc.h"
#include "src/enc/vp8li_enc.h"
#define MAX_DIFF_COST (1e30f)
@@ -31,10 +32,10 @@ static WEBP_INLINE int GetMin(int a, int b) { return (a > b) ? b : a; }
// Methods to calculate Entropy (Shannon).
static float PredictionCostSpatial(const int counts[256], int weight_0,
- double exp_val) {
+ float exp_val) {
const int significant_symbols = 256 >> 4;
- const double exp_decay_factor = 0.6;
- double bits = weight_0 * counts[0];
+ const float exp_decay_factor = 0.6f;
+ float bits = (float)weight_0 * counts[0];
int i;
for (i = 1; i < significant_symbols; ++i) {
bits += exp_val * (counts[i] + counts[256 - i]);
@@ -46,9 +47,9 @@ static float PredictionCostSpatial(const int counts[256], int weight_0,
static float PredictionCostSpatialHistogram(const int accumulated[4][256],
const int tile[4][256]) {
int i;
- double retval = 0;
+ float retval = 0.f;
for (i = 0; i < 4; ++i) {
- const double kExpValue = 0.94;
+ const float kExpValue = 0.94f;
retval += PredictionCostSpatial(tile[i], 1, kExpValue);
retval += VP8LCombinedShannonEntropy(tile[i], accumulated[i]);
}
@@ -472,12 +473,15 @@ static void CopyImageWithPrediction(int width, int height,
// with respect to predictions. If near_lossless_quality < 100, applies
// near lossless processing, shaving off more bits of residuals for lower
// qualities.
-void VP8LResidualImage(int width, int height, int bits, int low_effort,
- uint32_t* const argb, uint32_t* const argb_scratch,
- uint32_t* const image, int near_lossless_quality,
- int exact, int used_subtract_green) {
+int VP8LResidualImage(int width, int height, int bits, int low_effort,
+ uint32_t* const argb, uint32_t* const argb_scratch,
+ uint32_t* const image, int near_lossless_quality,
+ int exact, int used_subtract_green,
+ const WebPPicture* const pic, int percent_range,
+ int* const percent) {
const int tiles_per_row = VP8LSubSampleSize(width, bits);
const int tiles_per_col = VP8LSubSampleSize(height, bits);
+ int percent_start = *percent;
int tile_y;
int histo[4][256];
const int max_quantization = 1 << VP8LNearLosslessBits(near_lossless_quality);
@@ -491,17 +495,24 @@ void VP8LResidualImage(int width, int height, int bits, int low_effort,
for (tile_y = 0; tile_y < tiles_per_col; ++tile_y) {
int tile_x;
for (tile_x = 0; tile_x < tiles_per_row; ++tile_x) {
- const int pred = GetBestPredictorForTile(width, height, tile_x, tile_y,
- bits, histo, argb_scratch, argb, max_quantization, exact,
- used_subtract_green, image);
+ const int pred = GetBestPredictorForTile(
+ width, height, tile_x, tile_y, bits, histo, argb_scratch, argb,
+ max_quantization, exact, used_subtract_green, image);
image[tile_y * tiles_per_row + tile_x] = ARGB_BLACK | (pred << 8);
}
+
+ if (!WebPReportProgress(
+ pic, percent_start + percent_range * tile_y / tiles_per_col,
+ percent)) {
+ return 0;
+ }
}
}
CopyImageWithPrediction(width, height, bits, image, argb_scratch, argb,
low_effort, max_quantization, exact,
used_subtract_green);
+ return WebPReportProgress(pic, percent_start + percent_range, percent);
}
//------------------------------------------------------------------------------
@@ -532,7 +543,7 @@ static float PredictionCostCrossColor(const int accumulated[256],
const int counts[256]) {
// Favor low entropy, locally and globally.
// Favor small absolute values for PredictionCostSpatial
- static const double kExpValue = 2.4;
+ static const float kExpValue = 2.4f;
return VP8LCombinedShannonEntropy(counts, accumulated) +
PredictionCostSpatial(counts, 3, kExpValue);
}
@@ -714,11 +725,14 @@ static void CopyTileWithColorTransform(int xsize, int ysize,
}
}
-void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
- uint32_t* const argb, uint32_t* image) {
+int VP8LColorSpaceTransform(int width, int height, int bits, int quality,
+ uint32_t* const argb, uint32_t* image,
+ const WebPPicture* const pic, int percent_range,
+ int* const percent) {
const int max_tile_size = 1 << bits;
const int tile_xsize = VP8LSubSampleSize(width, bits);
const int tile_ysize = VP8LSubSampleSize(height, bits);
+ int percent_start = *percent;
int accumulated_red_histo[256] = { 0 };
int accumulated_blue_histo[256] = { 0 };
int tile_x, tile_y;
@@ -768,5 +782,11 @@ void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
}
}
}
+ if (!WebPReportProgress(
+ pic, percent_start + percent_range * tile_y / tile_ysize,
+ percent)) {
+ return 0;
+ }
}
+ return 1;
}
diff --git a/thirdparty/libwebp/src/enc/quant_enc.c b/thirdparty/libwebp/src/enc/quant_enc.c
index 6cede28ab4..6d8202d277 100644
--- a/thirdparty/libwebp/src/enc/quant_enc.c
+++ b/thirdparty/libwebp/src/enc/quant_enc.c
@@ -533,7 +533,8 @@ static void InitScore(VP8ModeScore* const rd) {
rd->score = MAX_COST;
}
-static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
+static void CopyScore(VP8ModeScore* WEBP_RESTRICT const dst,
+ const VP8ModeScore* WEBP_RESTRICT const src) {
dst->D = src->D;
dst->SD = src->SD;
dst->R = src->R;
@@ -542,7 +543,8 @@ static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
dst->score = src->score;
}
-static void AddScore(VP8ModeScore* const dst, const VP8ModeScore* const src) {
+static void AddScore(VP8ModeScore* WEBP_RESTRICT const dst,
+ const VP8ModeScore* WEBP_RESTRICT const src) {
dst->D += src->D;
dst->SD += src->SD;
dst->R += src->R;
@@ -588,10 +590,10 @@ static WEBP_INLINE score_t RDScoreTrellis(int lambda, score_t rate,
// Coefficient type.
enum { TYPE_I16_AC = 0, TYPE_I16_DC = 1, TYPE_CHROMA_A = 2, TYPE_I4_AC = 3 };
-static int TrellisQuantizeBlock(const VP8Encoder* const enc,
+static int TrellisQuantizeBlock(const VP8Encoder* WEBP_RESTRICT const enc,
int16_t in[16], int16_t out[16],
int ctx0, int coeff_type,
- const VP8Matrix* const mtx,
+ const VP8Matrix* WEBP_RESTRICT const mtx,
int lambda) {
const ProbaArray* const probas = enc->proba_.coeffs_[coeff_type];
CostArrayPtr const costs =
@@ -767,9 +769,9 @@ static int TrellisQuantizeBlock(const VP8Encoder* const enc,
// all at once. Output is the reconstructed block in *yuv_out, and the
// quantized levels in *levels.
-static int ReconstructIntra16(VP8EncIterator* const it,
- VP8ModeScore* const rd,
- uint8_t* const yuv_out,
+static int ReconstructIntra16(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT const rd,
+ uint8_t* WEBP_RESTRICT const yuv_out,
int mode) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode];
@@ -819,10 +821,10 @@ static int ReconstructIntra16(VP8EncIterator* const it,
return nz;
}
-static int ReconstructIntra4(VP8EncIterator* const it,
+static int ReconstructIntra4(VP8EncIterator* WEBP_RESTRICT const it,
int16_t levels[16],
- const uint8_t* const src,
- uint8_t* const yuv_out,
+ const uint8_t* WEBP_RESTRICT const src,
+ uint8_t* WEBP_RESTRICT const yuv_out,
int mode) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode];
@@ -855,7 +857,8 @@ static int ReconstructIntra4(VP8EncIterator* const it,
// Quantize as usual, but also compute and return the quantization error.
// Error is already divided by DSHIFT.
-static int QuantizeSingle(int16_t* const v, const VP8Matrix* const mtx) {
+static int QuantizeSingle(int16_t* WEBP_RESTRICT const v,
+ const VP8Matrix* WEBP_RESTRICT const mtx) {
int V = *v;
const int sign = (V < 0);
if (sign) V = -V;
@@ -869,9 +872,10 @@ static int QuantizeSingle(int16_t* const v, const VP8Matrix* const mtx) {
return (sign ? -V : V) >> DSCALE;
}
-static void CorrectDCValues(const VP8EncIterator* const it,
- const VP8Matrix* const mtx,
- int16_t tmp[][16], VP8ModeScore* const rd) {
+static void CorrectDCValues(const VP8EncIterator* WEBP_RESTRICT const it,
+ const VP8Matrix* WEBP_RESTRICT const mtx,
+ int16_t tmp[][16],
+ VP8ModeScore* WEBP_RESTRICT const rd) {
// | top[0] | top[1]
// --------+--------+---------
// left[0] | tmp[0] tmp[1] <-> err0 err1
@@ -902,8 +906,8 @@ static void CorrectDCValues(const VP8EncIterator* const it,
}
}
-static void StoreDiffusionErrors(VP8EncIterator* const it,
- const VP8ModeScore* const rd) {
+static void StoreDiffusionErrors(VP8EncIterator* WEBP_RESTRICT const it,
+ const VP8ModeScore* WEBP_RESTRICT const rd) {
int ch;
for (ch = 0; ch <= 1; ++ch) {
int8_t* const top = it->top_derr_[it->x_][ch];
@@ -922,8 +926,9 @@ static void StoreDiffusionErrors(VP8EncIterator* const it,
//------------------------------------------------------------------------------
-static int ReconstructUV(VP8EncIterator* const it, VP8ModeScore* const rd,
- uint8_t* const yuv_out, int mode) {
+static int ReconstructUV(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT const rd,
+ uint8_t* WEBP_RESTRICT const yuv_out, int mode) {
const VP8Encoder* const enc = it->enc_;
const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode];
const uint8_t* const src = it->yuv_in_ + U_OFF_ENC;
@@ -994,7 +999,8 @@ static void SwapOut(VP8EncIterator* const it) {
SwapPtr(&it->yuv_out_, &it->yuv_out2_);
}
-static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
+static void PickBestIntra16(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT rd) {
const int kNumBlocks = 16;
VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_i16_;
@@ -1054,7 +1060,7 @@ static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
//------------------------------------------------------------------------------
// return the cost array corresponding to the surrounding prediction modes.
-static const uint16_t* GetCostModeI4(VP8EncIterator* const it,
+static const uint16_t* GetCostModeI4(VP8EncIterator* WEBP_RESTRICT const it,
const uint8_t modes[16]) {
const int preds_w = it->enc_->preds_w_;
const int x = (it->i4_ & 3), y = it->i4_ >> 2;
@@ -1063,7 +1069,8 @@ static const uint16_t* GetCostModeI4(VP8EncIterator* const it,
return VP8FixedCostsI4[top][left];
}
-static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
+static int PickBestIntra4(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT const rd) {
const VP8Encoder* const enc = it->enc_;
const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_i4_;
@@ -1159,7 +1166,8 @@ static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) {
//------------------------------------------------------------------------------
-static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
+static void PickBestUV(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT const rd) {
const int kNumBlocks = 8;
const VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
const int lambda = dqm->lambda_uv_;
@@ -1211,7 +1219,8 @@ static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) {
//------------------------------------------------------------------------------
// Final reconstruction and quantization.
-static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) {
+static void SimpleQuantize(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT const rd) {
const VP8Encoder* const enc = it->enc_;
const int is_i16 = (it->mb_->type_ == 1);
int nz = 0;
@@ -1236,9 +1245,9 @@ static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) {
}
// Refine intra16/intra4 sub-modes based on distortion only (not rate).
-static void RefineUsingDistortion(VP8EncIterator* const it,
+static void RefineUsingDistortion(VP8EncIterator* WEBP_RESTRICT const it,
int try_both_modes, int refine_uv_mode,
- VP8ModeScore* const rd) {
+ VP8ModeScore* WEBP_RESTRICT const rd) {
score_t best_score = MAX_COST;
int nz = 0;
int mode;
@@ -1352,7 +1361,8 @@ static void RefineUsingDistortion(VP8EncIterator* const it,
//------------------------------------------------------------------------------
// Entry point
-int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd,
+int VP8Decimate(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT const rd,
VP8RDLevel rd_opt) {
int is_skipped;
const int method = it->enc_->method_;
diff --git a/thirdparty/libwebp/src/enc/vp8i_enc.h b/thirdparty/libwebp/src/enc/vp8i_enc.h
index b4bba08f27..71f76702ae 100644
--- a/thirdparty/libwebp/src/enc/vp8i_enc.h
+++ b/thirdparty/libwebp/src/enc/vp8i_enc.h
@@ -32,7 +32,7 @@ extern "C" {
// version numbers
#define ENC_MAJ_VERSION 1
#define ENC_MIN_VERSION 2
-#define ENC_REV_VERSION 2
+#define ENC_REV_VERSION 4
enum { MAX_LF_LEVELS = 64, // Maximum loop filter level
MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost
@@ -470,7 +470,8 @@ int VP8EncAnalyze(VP8Encoder* const enc);
// Sets up segment's quantization values, base_quant_ and filter strengths.
void VP8SetSegmentParams(VP8Encoder* const enc, float quality);
// Pick best modes and fills the levels. Returns true if skipped.
-int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd,
+int VP8Decimate(VP8EncIterator* WEBP_RESTRICT const it,
+ VP8ModeScore* WEBP_RESTRICT const rd,
VP8RDLevel rd_opt);
// in alpha.c
@@ -490,19 +491,24 @@ int VP8FilterStrengthFromDelta(int sharpness, int delta);
// misc utils for picture_*.c:
+// Returns true if 'picture' is non-NULL and dimensions/colorspace are within
+// their valid ranges. If returning false, the 'error_code' in 'picture' is
+// updated.
+int WebPValidatePicture(const WebPPicture* const picture);
+
// Remove reference to the ARGB/YUVA buffer (doesn't free anything).
void WebPPictureResetBuffers(WebPPicture* const picture);
-// Allocates ARGB buffer of given dimension (previous one is always free'd).
-// Preserves the YUV(A) buffer. Returns false in case of error (invalid param,
-// out-of-memory).
-int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height);
+// Allocates ARGB buffer according to set width/height (previous one is
+// always free'd). Preserves the YUV(A) buffer. Returns false in case of error
+// (invalid param, out-of-memory).
+int WebPPictureAllocARGB(WebPPicture* const picture);
-// Allocates YUVA buffer of given dimension (previous one is always free'd).
-// Uses picture->csp to determine whether an alpha buffer is needed.
+// Allocates YUVA buffer according to set width/height (previous one is always
+// free'd). Uses picture->csp to determine whether an alpha buffer is needed.
// Preserves the ARGB buffer.
// Returns false in case of error (invalid param, out-of-memory).
-int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height);
+int WebPPictureAllocYUVA(WebPPicture* const picture);
// Replace samples that are fully transparent by 'color' to help compressibility
// (no guarantee, though). Assumes pic->use_argb is true.
diff --git a/thirdparty/libwebp/src/enc/vp8l_enc.c b/thirdparty/libwebp/src/enc/vp8l_enc.c
index e330e716f1..2b345df610 100644
--- a/thirdparty/libwebp/src/enc/vp8l_enc.c
+++ b/thirdparty/libwebp/src/enc/vp8l_enc.c
@@ -15,15 +15,16 @@
#include <assert.h>
#include <stdlib.h>
+#include "src/dsp/lossless.h"
+#include "src/dsp/lossless_common.h"
#include "src/enc/backward_references_enc.h"
#include "src/enc/histogram_enc.h"
#include "src/enc/vp8i_enc.h"
#include "src/enc/vp8li_enc.h"
-#include "src/dsp/lossless.h"
-#include "src/dsp/lossless_common.h"
#include "src/utils/bit_writer_utils.h"
#include "src/utils/huffman_encode_utils.h"
#include "src/utils/utils.h"
+#include "src/webp/encode.h"
#include "src/webp/format_constants.h"
// Maximum number of histogram images (sub-blocks).
@@ -183,10 +184,9 @@ static void CoOccurrenceFindMax(const uint32_t* const cooccurrence,
}
// Builds the cooccurrence matrix
-static WebPEncodingError CoOccurrenceBuild(const WebPPicture* const pic,
- const uint32_t* const palette,
- uint32_t num_colors,
- uint32_t* cooccurrence) {
+static int CoOccurrenceBuild(const WebPPicture* const pic,
+ const uint32_t* const palette, uint32_t num_colors,
+ uint32_t* cooccurrence) {
uint32_t *lines, *line_top, *line_current, *line_tmp;
int x, y;
const uint32_t* src = pic->argb;
@@ -195,7 +195,10 @@ static WebPEncodingError CoOccurrenceBuild(const WebPPicture* const pic,
uint32_t idx_map[MAX_PALETTE_SIZE] = {0};
uint32_t palette_sorted[MAX_PALETTE_SIZE];
lines = (uint32_t*)WebPSafeMalloc(2 * pic->width, sizeof(*lines));
- if (lines == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY;
+ if (lines == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
+ }
line_top = &lines[0];
line_current = &lines[pic->width];
PrepareMapToPalette(palette, num_colors, palette_sorted, idx_map);
@@ -226,7 +229,7 @@ static WebPEncodingError CoOccurrenceBuild(const WebPPicture* const pic,
src += pic->argb_stride;
}
WebPSafeFree(lines);
- return VP8_ENC_OK;
+ return 1;
}
struct Sum {
@@ -237,7 +240,7 @@ struct Sum {
// Implements the modified Zeng method from "A Survey on Palette Reordering
// Methods for Improving the Compression of Color-Indexed Images" by Armando J.
// Pinho and Antonio J. R. Neves.
-static WebPEncodingError PaletteSortModifiedZeng(
+static int PaletteSortModifiedZeng(
const WebPPicture* const pic, const uint32_t* const palette_sorted,
uint32_t num_colors, uint32_t* const palette) {
uint32_t i, j, ind;
@@ -247,15 +250,16 @@ static WebPEncodingError PaletteSortModifiedZeng(
uint32_t first, last;
uint32_t num_sums;
// TODO(vrabaud) check whether one color images should use palette or not.
- if (num_colors <= 1) return VP8_ENC_OK;
+ if (num_colors <= 1) return 1;
// Build the co-occurrence matrix.
cooccurrence =
(uint32_t*)WebPSafeCalloc(num_colors * num_colors, sizeof(*cooccurrence));
- if (cooccurrence == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY;
- if (CoOccurrenceBuild(pic, palette_sorted, num_colors, cooccurrence) !=
- VP8_ENC_OK) {
- WebPSafeFree(cooccurrence);
- return VP8_ENC_ERROR_OUT_OF_MEMORY;
+ if (cooccurrence == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
+ }
+ if (!CoOccurrenceBuild(pic, palette_sorted, num_colors, cooccurrence)) {
+ return 0;
}
// Initialize the mapping list with the two best indices.
@@ -316,7 +320,7 @@ static WebPEncodingError PaletteSortModifiedZeng(
for (i = 0; i < num_colors; ++i) {
palette[i] = palette_sorted[remapping[(first + i) % num_colors]];
}
- return VP8_ENC_OK;
+ return 1;
}
// -----------------------------------------------------------------------------
@@ -434,8 +438,8 @@ static int AnalyzeEntropy(const uint32_t* argb,
curr_row += argb_stride;
}
{
- double entropy_comp[kHistoTotal];
- double entropy[kNumEntropyIx];
+ float entropy_comp[kHistoTotal];
+ float entropy[kNumEntropyIx];
int k;
int last_mode_to_analyze = use_palette ? kPalette : kSpatialSubGreen;
int j;
@@ -949,11 +953,11 @@ static WEBP_INLINE void WriteHuffmanCodeWithExtraBits(
VP8LPutBits(bw, (bits << depth) | symbol, depth + n_bits);
}
-static WebPEncodingError StoreImageToBitMask(
+static int StoreImageToBitMask(
VP8LBitWriter* const bw, int width, int histo_bits,
const VP8LBackwardRefs* const refs,
const uint16_t* histogram_symbols,
- const HuffmanTreeCode* const huffman_codes) {
+ const HuffmanTreeCode* const huffman_codes, const WebPPicture* const pic) {
const int histo_xsize = histo_bits ? VP8LSubSampleSize(width, histo_bits) : 1;
const int tile_mask = (histo_bits == 0) ? 0 : -(1 << histo_bits);
// x and y trace the position in the image.
@@ -1006,44 +1010,53 @@ static WebPEncodingError StoreImageToBitMask(
}
VP8LRefsCursorNext(&c);
}
- return bw->error_ ? VP8_ENC_ERROR_OUT_OF_MEMORY : VP8_ENC_OK;
+ if (bw->error_) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
+ }
+ return 1;
}
-// Special case of EncodeImageInternal() for cache-bits=0, histo_bits=31
-static WebPEncodingError EncodeImageNoHuffman(
- VP8LBitWriter* const bw, const uint32_t* const argb,
- VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs_array,
- int width, int height, int quality, int low_effort) {
+// Special case of EncodeImageInternal() for cache-bits=0, histo_bits=31.
+// pic and percent are for progress.
+static int EncodeImageNoHuffman(VP8LBitWriter* const bw,
+ const uint32_t* const argb,
+ VP8LHashChain* const hash_chain,
+ VP8LBackwardRefs* const refs_array, int width,
+ int height, int quality, int low_effort,
+ const WebPPicture* const pic, int percent_range,
+ int* const percent) {
int i;
int max_tokens = 0;
- WebPEncodingError err = VP8_ENC_OK;
VP8LBackwardRefs* refs;
HuffmanTreeToken* tokens = NULL;
- HuffmanTreeCode huffman_codes[5] = { { 0, NULL, NULL } };
- const uint16_t histogram_symbols[1] = { 0 }; // only one tree, one symbol
+ HuffmanTreeCode huffman_codes[5] = {{0, NULL, NULL}};
+ const uint16_t histogram_symbols[1] = {0}; // only one tree, one symbol
int cache_bits = 0;
VP8LHistogramSet* histogram_image = NULL;
HuffmanTree* const huff_tree = (HuffmanTree*)WebPSafeMalloc(
- 3ULL * CODE_LENGTH_CODES, sizeof(*huff_tree));
+ 3ULL * CODE_LENGTH_CODES, sizeof(*huff_tree));
if (huff_tree == NULL) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
// Calculate backward references from ARGB image.
- if (!VP8LHashChainFill(hash_chain, quality, argb, width, height,
- low_effort)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ if (!VP8LHashChainFill(hash_chain, quality, argb, width, height, low_effort,
+ pic, percent_range / 2, percent)) {
+ goto Error;
+ }
+ if (!VP8LGetBackwardReferences(width, height, argb, quality, /*low_effort=*/0,
+ kLZ77Standard | kLZ77RLE, cache_bits,
+ /*do_no_cache=*/0, hash_chain, refs_array,
+ &cache_bits, pic,
+ percent_range - percent_range / 2, percent)) {
goto Error;
}
- err = VP8LGetBackwardReferences(
- width, height, argb, quality, /*low_effort=*/0, kLZ77Standard | kLZ77RLE,
- cache_bits, /*do_no_cache=*/0, hash_chain, refs_array, &cache_bits);
- if (err != VP8_ENC_OK) goto Error;
refs = &refs_array[0];
histogram_image = VP8LAllocateHistogramSet(1, cache_bits);
if (histogram_image == NULL) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
VP8LHistogramSetClear(histogram_image);
@@ -1054,7 +1067,7 @@ static WebPEncodingError EncodeImageNoHuffman(
// Create Huffman bit lengths and codes for each histogram image.
assert(histogram_image->size == 1);
if (!GetHuffBitLengthsAndCodes(histogram_image, huffman_codes)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
@@ -1071,7 +1084,7 @@ static WebPEncodingError EncodeImageNoHuffman(
tokens = (HuffmanTreeToken*)WebPSafeMalloc(max_tokens, sizeof(*tokens));
if (tokens == NULL) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
@@ -1083,27 +1096,32 @@ static WebPEncodingError EncodeImageNoHuffman(
}
// Store actual literals.
- err = StoreImageToBitMask(bw, width, 0, refs, histogram_symbols,
- huffman_codes);
+ if (!StoreImageToBitMask(bw, width, 0, refs, histogram_symbols, huffman_codes,
+ pic)) {
+ goto Error;
+ }
Error:
WebPSafeFree(tokens);
WebPSafeFree(huff_tree);
VP8LFreeHistogramSet(histogram_image);
WebPSafeFree(huffman_codes[0].codes);
- return err;
+ return (pic->error_code == VP8_ENC_OK);
}
-static WebPEncodingError EncodeImageInternal(
+// pic and percent are for progress.
+static int EncodeImageInternal(
VP8LBitWriter* const bw, const uint32_t* const argb,
VP8LHashChain* const hash_chain, VP8LBackwardRefs refs_array[4], int width,
int height, int quality, int low_effort, int use_cache,
const CrunchConfig* const config, int* cache_bits, int histogram_bits,
- size_t init_byte_position, int* const hdr_size, int* const data_size) {
- WebPEncodingError err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ size_t init_byte_position, int* const hdr_size, int* const data_size,
+ const WebPPicture* const pic, int percent_range, int* const percent) {
const uint32_t histogram_image_xysize =
VP8LSubSampleSize(width, histogram_bits) *
VP8LSubSampleSize(height, histogram_bits);
+ int remaining_percent = percent_range;
+ int percent_start = *percent;
VP8LHistogramSet* histogram_image = NULL;
VP8LHistogram* tmp_histo = NULL;
int histogram_image_size = 0;
@@ -1112,9 +1130,8 @@ static WebPEncodingError EncodeImageInternal(
3ULL * CODE_LENGTH_CODES, sizeof(*huff_tree));
HuffmanTreeToken* tokens = NULL;
HuffmanTreeCode* huffman_codes = NULL;
- uint16_t* const histogram_symbols =
- (uint16_t*)WebPSafeMalloc(histogram_image_xysize,
- sizeof(*histogram_symbols));
+ uint16_t* const histogram_symbols = (uint16_t*)WebPSafeMalloc(
+ histogram_image_xysize, sizeof(*histogram_symbols));
int sub_configs_idx;
int cache_bits_init, write_histogram_image;
VP8LBitWriter bw_init = *bw, bw_best;
@@ -1126,14 +1143,27 @@ static WebPEncodingError EncodeImageInternal(
assert(hdr_size != NULL);
assert(data_size != NULL);
- // Make sure we can allocate the different objects.
memset(&hash_chain_histogram, 0, sizeof(hash_chain_histogram));
+ if (!VP8LBitWriterInit(&bw_best, 0)) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ goto Error;
+ }
+
+ // Make sure we can allocate the different objects.
if (huff_tree == NULL || histogram_symbols == NULL ||
- !VP8LHashChainInit(&hash_chain_histogram, histogram_image_xysize) ||
- !VP8LHashChainFill(hash_chain, quality, argb, width, height,
- low_effort)) {
+ !VP8LHashChainInit(&hash_chain_histogram, histogram_image_xysize)) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ goto Error;
+ }
+
+ percent_range = remaining_percent / 5;
+ if (!VP8LHashChainFill(hash_chain, quality, argb, width, height,
+ low_effort, pic, percent_range, percent)) {
goto Error;
}
+ percent_start += percent_range;
+ remaining_percent -= percent_range;
+
if (use_cache) {
// If the value is different from zero, it has been set during the
// palette analysis.
@@ -1142,22 +1172,27 @@ static WebPEncodingError EncodeImageInternal(
cache_bits_init = 0;
}
// If several iterations will happen, clone into bw_best.
- if (!VP8LBitWriterInit(&bw_best, 0) ||
- ((config->sub_configs_size_ > 1 ||
- config->sub_configs_[0].do_no_cache_) &&
- !VP8LBitWriterClone(bw, &bw_best))) {
+ if ((config->sub_configs_size_ > 1 || config->sub_configs_[0].do_no_cache_) &&
+ !VP8LBitWriterClone(bw, &bw_best)) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
+
for (sub_configs_idx = 0; sub_configs_idx < config->sub_configs_size_;
++sub_configs_idx) {
const CrunchSubConfig* const sub_config =
&config->sub_configs_[sub_configs_idx];
int cache_bits_best, i_cache;
- err = VP8LGetBackwardReferences(width, height, argb, quality, low_effort,
- sub_config->lz77_, cache_bits_init,
- sub_config->do_no_cache_, hash_chain,
- &refs_array[0], &cache_bits_best);
- if (err != VP8_ENC_OK) goto Error;
+ int i_remaining_percent = remaining_percent / config->sub_configs_size_;
+ int i_percent_range = i_remaining_percent / 4;
+ i_remaining_percent -= i_percent_range;
+
+ if (!VP8LGetBackwardReferences(
+ width, height, argb, quality, low_effort, sub_config->lz77_,
+ cache_bits_init, sub_config->do_no_cache_, hash_chain,
+ &refs_array[0], &cache_bits_best, pic, i_percent_range, percent)) {
+ goto Error;
+ }
for (i_cache = 0; i_cache < (sub_config->do_no_cache_ ? 2 : 1); ++i_cache) {
const int cache_bits_tmp = (i_cache == 0) ? cache_bits_best : 0;
@@ -1172,11 +1207,17 @@ static WebPEncodingError EncodeImageInternal(
histogram_image =
VP8LAllocateHistogramSet(histogram_image_xysize, cache_bits_tmp);
tmp_histo = VP8LAllocateHistogram(cache_bits_tmp);
- if (histogram_image == NULL || tmp_histo == NULL ||
- !VP8LGetHistoImageSymbols(width, height, &refs_array[i_cache],
- quality, low_effort, histogram_bits,
- cache_bits_tmp, histogram_image, tmp_histo,
- histogram_symbols)) {
+ if (histogram_image == NULL || tmp_histo == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ goto Error;
+ }
+
+ i_percent_range = i_remaining_percent / 3;
+ i_remaining_percent -= i_percent_range;
+ if (!VP8LGetHistoImageSymbols(
+ width, height, &refs_array[i_cache], quality, low_effort,
+ histogram_bits, cache_bits_tmp, histogram_image, tmp_histo,
+ histogram_symbols, pic, i_percent_range, percent)) {
goto Error;
}
// Create Huffman bit lengths and codes for each histogram image.
@@ -1189,6 +1230,7 @@ static WebPEncodingError EncodeImageInternal(
// GetHuffBitLengthsAndCodes().
if (huffman_codes == NULL ||
!GetHuffBitLengthsAndCodes(histogram_image, huffman_codes)) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
// Free combined histograms.
@@ -1211,12 +1253,14 @@ static WebPEncodingError EncodeImageInternal(
write_histogram_image = (histogram_image_size > 1);
VP8LPutBits(bw, write_histogram_image, 1);
if (write_histogram_image) {
- uint32_t* const histogram_argb =
- (uint32_t*)WebPSafeMalloc(histogram_image_xysize,
- sizeof(*histogram_argb));
+ uint32_t* const histogram_argb = (uint32_t*)WebPSafeMalloc(
+ histogram_image_xysize, sizeof(*histogram_argb));
int max_index = 0;
uint32_t i;
- if (histogram_argb == NULL) goto Error;
+ if (histogram_argb == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ goto Error;
+ }
for (i = 0; i < histogram_image_xysize; ++i) {
const int symbol_index = histogram_symbols[i] & 0xffff;
histogram_argb[i] = (symbol_index << 8);
@@ -1227,12 +1271,17 @@ static WebPEncodingError EncodeImageInternal(
histogram_image_size = max_index;
VP8LPutBits(bw, histogram_bits - 2, 3);
- err = EncodeImageNoHuffman(
- bw, histogram_argb, &hash_chain_histogram, &refs_array[2],
- VP8LSubSampleSize(width, histogram_bits),
- VP8LSubSampleSize(height, histogram_bits), quality, low_effort);
+ i_percent_range = i_remaining_percent / 2;
+ i_remaining_percent -= i_percent_range;
+ if (!EncodeImageNoHuffman(
+ bw, histogram_argb, &hash_chain_histogram, &refs_array[2],
+ VP8LSubSampleSize(width, histogram_bits),
+ VP8LSubSampleSize(height, histogram_bits), quality, low_effort,
+ pic, i_percent_range, percent)) {
+ WebPSafeFree(histogram_argb);
+ goto Error;
+ }
WebPSafeFree(histogram_argb);
- if (err != VP8_ENC_OK) goto Error;
}
// Store Huffman codes.
@@ -1256,9 +1305,10 @@ static WebPEncodingError EncodeImageInternal(
}
// Store actual literals.
hdr_size_tmp = (int)(VP8LBitWriterNumBytes(bw) - init_byte_position);
- err = StoreImageToBitMask(bw, width, histogram_bits, &refs_array[i_cache],
- histogram_symbols, huffman_codes);
- if (err != VP8_ENC_OK) goto Error;
+ if (!StoreImageToBitMask(bw, width, histogram_bits, &refs_array[i_cache],
+ histogram_symbols, huffman_codes, pic)) {
+ goto Error;
+ }
// Keep track of the smallest image so far.
if (VP8LBitWriterNumBytes(bw) < bw_size_best) {
bw_size_best = VP8LBitWriterNumBytes(bw);
@@ -1278,7 +1328,10 @@ static WebPEncodingError EncodeImageInternal(
}
}
VP8LBitWriterSwap(bw, &bw_best);
- err = VP8_ENC_OK;
+
+ if (!WebPReportProgress(pic, percent_start + remaining_percent, percent)) {
+ goto Error;
+ }
Error:
WebPSafeFree(tokens);
@@ -1292,7 +1345,7 @@ static WebPEncodingError EncodeImageInternal(
}
WebPSafeFree(histogram_symbols);
VP8LBitWriterWipeOut(&bw_best);
- return err;
+ return (pic->error_code == VP8_ENC_OK);
}
// -----------------------------------------------------------------------------
@@ -1305,22 +1358,23 @@ static void ApplySubtractGreen(VP8LEncoder* const enc, int width, int height,
VP8LSubtractGreenFromBlueAndRed(enc->argb_, width * height);
}
-static WebPEncodingError ApplyPredictFilter(const VP8LEncoder* const enc,
- int width, int height,
- int quality, int low_effort,
- int used_subtract_green,
- VP8LBitWriter* const bw) {
+static int ApplyPredictFilter(const VP8LEncoder* const enc, int width,
+ int height, int quality, int low_effort,
+ int used_subtract_green, VP8LBitWriter* const bw,
+ int percent_range, int* const percent) {
const int pred_bits = enc->transform_bits_;
const int transform_width = VP8LSubSampleSize(width, pred_bits);
const int transform_height = VP8LSubSampleSize(height, pred_bits);
// we disable near-lossless quantization if palette is used.
- const int near_lossless_strength = enc->use_palette_ ? 100
- : enc->config_->near_lossless;
+ const int near_lossless_strength =
+ enc->use_palette_ ? 100 : enc->config_->near_lossless;
- VP8LResidualImage(width, height, pred_bits, low_effort, enc->argb_,
- enc->argb_scratch_, enc->transform_data_,
- near_lossless_strength, enc->config_->exact,
- used_subtract_green);
+ if (!VP8LResidualImage(
+ width, height, pred_bits, low_effort, enc->argb_, enc->argb_scratch_,
+ enc->transform_data_, near_lossless_strength, enc->config_->exact,
+ used_subtract_green, enc->pic_, percent_range / 2, percent)) {
+ return 0;
+ }
VP8LPutBits(bw, TRANSFORM_PRESENT, 1);
VP8LPutBits(bw, PREDICTOR_TRANSFORM, 2);
assert(pred_bits >= 2);
@@ -1328,19 +1382,23 @@ static WebPEncodingError ApplyPredictFilter(const VP8LEncoder* const enc,
return EncodeImageNoHuffman(
bw, enc->transform_data_, (VP8LHashChain*)&enc->hash_chain_,
(VP8LBackwardRefs*)&enc->refs_[0], transform_width, transform_height,
- quality, low_effort);
+ quality, low_effort, enc->pic_, percent_range - percent_range / 2,
+ percent);
}
-static WebPEncodingError ApplyCrossColorFilter(const VP8LEncoder* const enc,
- int width, int height,
- int quality, int low_effort,
- VP8LBitWriter* const bw) {
+static int ApplyCrossColorFilter(const VP8LEncoder* const enc, int width,
+ int height, int quality, int low_effort,
+ VP8LBitWriter* const bw, int percent_range,
+ int* const percent) {
const int ccolor_transform_bits = enc->transform_bits_;
const int transform_width = VP8LSubSampleSize(width, ccolor_transform_bits);
const int transform_height = VP8LSubSampleSize(height, ccolor_transform_bits);
- VP8LColorSpaceTransform(width, height, ccolor_transform_bits, quality,
- enc->argb_, enc->transform_data_);
+ if (!VP8LColorSpaceTransform(width, height, ccolor_transform_bits, quality,
+ enc->argb_, enc->transform_data_, enc->pic_,
+ percent_range / 2, percent)) {
+ return 0;
+ }
VP8LPutBits(bw, TRANSFORM_PRESENT, 1);
VP8LPutBits(bw, CROSS_COLOR_TRANSFORM, 2);
assert(ccolor_transform_bits >= 2);
@@ -1348,23 +1406,21 @@ static WebPEncodingError ApplyCrossColorFilter(const VP8LEncoder* const enc,
return EncodeImageNoHuffman(
bw, enc->transform_data_, (VP8LHashChain*)&enc->hash_chain_,
(VP8LBackwardRefs*)&enc->refs_[0], transform_width, transform_height,
- quality, low_effort);
+ quality, low_effort, enc->pic_, percent_range - percent_range / 2,
+ percent);
}
// -----------------------------------------------------------------------------
-static WebPEncodingError WriteRiffHeader(const WebPPicture* const pic,
- size_t riff_size, size_t vp8l_size) {
+static int WriteRiffHeader(const WebPPicture* const pic, size_t riff_size,
+ size_t vp8l_size) {
uint8_t riff[RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE + VP8L_SIGNATURE_SIZE] = {
'R', 'I', 'F', 'F', 0, 0, 0, 0, 'W', 'E', 'B', 'P',
'V', 'P', '8', 'L', 0, 0, 0, 0, VP8L_MAGIC_BYTE,
};
PutLE32(riff + TAG_SIZE, (uint32_t)riff_size);
PutLE32(riff + RIFF_HEADER_SIZE + TAG_SIZE, (uint32_t)vp8l_size);
- if (!pic->writer(riff, sizeof(riff), pic)) {
- return VP8_ENC_ERROR_BAD_WRITE;
- }
- return VP8_ENC_OK;
+ return pic->writer(riff, sizeof(riff), pic);
}
static int WriteImageSize(const WebPPicture* const pic,
@@ -1384,36 +1440,29 @@ static int WriteRealAlphaAndVersion(VP8LBitWriter* const bw, int has_alpha) {
return !bw->error_;
}
-static WebPEncodingError WriteImage(const WebPPicture* const pic,
- VP8LBitWriter* const bw,
- size_t* const coded_size) {
- WebPEncodingError err = VP8_ENC_OK;
+static int WriteImage(const WebPPicture* const pic, VP8LBitWriter* const bw,
+ size_t* const coded_size) {
const uint8_t* const webpll_data = VP8LBitWriterFinish(bw);
const size_t webpll_size = VP8LBitWriterNumBytes(bw);
const size_t vp8l_size = VP8L_SIGNATURE_SIZE + webpll_size;
const size_t pad = vp8l_size & 1;
const size_t riff_size = TAG_SIZE + CHUNK_HEADER_SIZE + vp8l_size + pad;
- err = WriteRiffHeader(pic, riff_size, vp8l_size);
- if (err != VP8_ENC_OK) goto Error;
-
- if (!pic->writer(webpll_data, webpll_size, pic)) {
- err = VP8_ENC_ERROR_BAD_WRITE;
- goto Error;
+ if (!WriteRiffHeader(pic, riff_size, vp8l_size) ||
+ !pic->writer(webpll_data, webpll_size, pic)) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_WRITE);
+ return 0;
}
if (pad) {
const uint8_t pad_byte[1] = { 0 };
if (!pic->writer(pad_byte, 1, pic)) {
- err = VP8_ENC_ERROR_BAD_WRITE;
- goto Error;
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_WRITE);
+ return 0;
}
}
*coded_size = CHUNK_HEADER_SIZE + riff_size;
- return VP8_ENC_OK;
-
- Error:
- return err;
+ return 1;
}
// -----------------------------------------------------------------------------
@@ -1429,18 +1478,16 @@ static void ClearTransformBuffer(VP8LEncoder* const enc) {
// Flags influencing the memory allocated:
// enc->transform_bits_
// enc->use_predict_, enc->use_cross_color_
-static WebPEncodingError AllocateTransformBuffer(VP8LEncoder* const enc,
- int width, int height) {
- WebPEncodingError err = VP8_ENC_OK;
+static int AllocateTransformBuffer(VP8LEncoder* const enc, int width,
+ int height) {
const uint64_t image_size = width * height;
// VP8LResidualImage needs room for 2 scanlines of uint32 pixels with an extra
// pixel in each, plus 2 regular scanlines of bytes.
// TODO(skal): Clean up by using arithmetic in bytes instead of words.
const uint64_t argb_scratch_size =
- enc->use_predict_
- ? (width + 1) * 2 +
- (width * 2 + sizeof(uint32_t) - 1) / sizeof(uint32_t)
- : 0;
+ enc->use_predict_ ? (width + 1) * 2 + (width * 2 + sizeof(uint32_t) - 1) /
+ sizeof(uint32_t)
+ : 0;
const uint64_t transform_data_size =
(enc->use_predict_ || enc->use_cross_color_)
? VP8LSubSampleSize(width, enc->transform_bits_) *
@@ -1448,17 +1495,16 @@ static WebPEncodingError AllocateTransformBuffer(VP8LEncoder* const enc,
: 0;
const uint64_t max_alignment_in_words =
(WEBP_ALIGN_CST + sizeof(uint32_t) - 1) / sizeof(uint32_t);
- const uint64_t mem_size =
- image_size + max_alignment_in_words +
- argb_scratch_size + max_alignment_in_words +
- transform_data_size;
+ const uint64_t mem_size = image_size + max_alignment_in_words +
+ argb_scratch_size + max_alignment_in_words +
+ transform_data_size;
uint32_t* mem = enc->transform_mem_;
if (mem == NULL || mem_size > enc->transform_mem_size_) {
ClearTransformBuffer(enc);
mem = (uint32_t*)WebPSafeMalloc(mem_size, sizeof(*mem));
if (mem == NULL) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
- goto Error;
+ WebPEncodingSetError(enc->pic_, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
}
enc->transform_mem_ = mem;
enc->transform_mem_size_ = (size_t)mem_size;
@@ -1471,19 +1517,16 @@ static WebPEncodingError AllocateTransformBuffer(VP8LEncoder* const enc,
enc->transform_data_ = mem;
enc->current_width_ = width;
- Error:
- return err;
+ return 1;
}
-static WebPEncodingError MakeInputImageCopy(VP8LEncoder* const enc) {
- WebPEncodingError err = VP8_ENC_OK;
+static int MakeInputImageCopy(VP8LEncoder* const enc) {
const WebPPicture* const picture = enc->pic_;
const int width = picture->width;
const int height = picture->height;
- err = AllocateTransformBuffer(enc, width, height);
- if (err != VP8_ENC_OK) return err;
- if (enc->argb_content_ == kEncoderARGB) return VP8_ENC_OK;
+ if (!AllocateTransformBuffer(enc, width, height)) return 0;
+ if (enc->argb_content_ == kEncoderARGB) return 1;
{
uint32_t* dst = enc->argb_;
@@ -1497,7 +1540,7 @@ static WebPEncodingError MakeInputImageCopy(VP8LEncoder* const enc) {
}
enc->argb_content_ = kEncoderARGB;
assert(enc->current_width_ == width);
- return VP8_ENC_OK;
+ return 1;
}
// -----------------------------------------------------------------------------
@@ -1559,16 +1602,19 @@ static WEBP_INLINE uint32_t ApplyPaletteHash2(uint32_t color) {
// using 'row' as a temporary buffer of size 'width'.
// We assume that all src[] values have a corresponding entry in the palette.
// Note: src[] can be the same as dst[]
-static WebPEncodingError ApplyPalette(const uint32_t* src, uint32_t src_stride,
- uint32_t* dst, uint32_t dst_stride,
- const uint32_t* palette, int palette_size,
- int width, int height, int xbits) {
+static int ApplyPalette(const uint32_t* src, uint32_t src_stride, uint32_t* dst,
+ uint32_t dst_stride, const uint32_t* palette,
+ int palette_size, int width, int height, int xbits,
+ const WebPPicture* const pic) {
// TODO(skal): this tmp buffer is not needed if VP8LBundleColorMap() can be
// made to work in-place.
uint8_t* const tmp_row = (uint8_t*)WebPSafeMalloc(width, sizeof(*tmp_row));
int x, y;
- if (tmp_row == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY;
+ if (tmp_row == NULL) {
+ WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return 0;
+ }
if (palette_size < APPLY_PALETTE_GREEDY_MAX) {
APPLY_PALETTE_FOR(SearchColorGreedy(palette, palette_size, pix));
@@ -1613,7 +1659,7 @@ static WebPEncodingError ApplyPalette(const uint32_t* src, uint32_t src_stride,
}
}
WebPSafeFree(tmp_row);
- return VP8_ENC_OK;
+ return 1;
}
#undef APPLY_PALETTE_FOR
#undef PALETTE_INV_SIZE_BITS
@@ -1621,9 +1667,7 @@ static WebPEncodingError ApplyPalette(const uint32_t* src, uint32_t src_stride,
#undef APPLY_PALETTE_GREEDY_MAX
// Note: Expects "enc->palette_" to be set properly.
-static WebPEncodingError MapImageFromPalette(VP8LEncoder* const enc,
- int in_place) {
- WebPEncodingError err = VP8_ENC_OK;
+static int MapImageFromPalette(VP8LEncoder* const enc, int in_place) {
const WebPPicture* const pic = enc->pic_;
const int width = pic->width;
const int height = pic->height;
@@ -1641,19 +1685,22 @@ static WebPEncodingError MapImageFromPalette(VP8LEncoder* const enc,
xbits = (palette_size <= 16) ? 1 : 0;
}
- err = AllocateTransformBuffer(enc, VP8LSubSampleSize(width, xbits), height);
- if (err != VP8_ENC_OK) return err;
-
- err = ApplyPalette(src, src_stride,
+ if (!AllocateTransformBuffer(enc, VP8LSubSampleSize(width, xbits), height)) {
+ return 0;
+ }
+ if (!ApplyPalette(src, src_stride,
enc->argb_, enc->current_width_,
- palette, palette_size, width, height, xbits);
+ palette, palette_size, width, height, xbits, pic)) {
+ return 0;
+ }
enc->argb_content_ = kEncoderPalette;
- return err;
+ return 1;
}
// Save palette_[] to bitstream.
static WebPEncodingError EncodePalette(VP8LBitWriter* const bw, int low_effort,
- VP8LEncoder* const enc) {
+ VP8LEncoder* const enc,
+ int percent_range, int* const percent) {
int i;
uint32_t tmp_palette[MAX_PALETTE_SIZE];
const int palette_size = enc->palette_size_;
@@ -1668,7 +1715,7 @@ static WebPEncodingError EncodePalette(VP8LBitWriter* const bw, int low_effort,
tmp_palette[0] = palette[0];
return EncodeImageNoHuffman(bw, tmp_palette, &enc->hash_chain_,
&enc->refs_[0], palette_size, 1, /*quality=*/20,
- low_effort);
+ low_effort, enc->pic_, percent_range, percent);
}
// -----------------------------------------------------------------------------
@@ -1712,7 +1759,6 @@ typedef struct {
CrunchConfig crunch_configs_[CRUNCH_CONFIGS_MAX];
int num_crunch_configs_;
int red_and_blue_always_zero_;
- WebPEncodingError err_;
WebPAuxStats* stats_;
} StreamEncodeContext;
@@ -1729,7 +1775,6 @@ static int EncodeStreamHook(void* input, void* data2) {
#if !defined(WEBP_DISABLE_STATS)
WebPAuxStats* const stats = params->stats_;
#endif
- WebPEncodingError err = VP8_ENC_OK;
const int quality = (int)config->quality;
const int low_effort = (config->method == 0);
#if (WEBP_NEAR_LOSSLESS == 1)
@@ -1737,6 +1782,7 @@ static int EncodeStreamHook(void* input, void* data2) {
#endif
const int height = picture->height;
const size_t byte_position = VP8LBitWriterNumBytes(bw);
+ int percent = 2; // for WebPProgressHook
#if (WEBP_NEAR_LOSSLESS == 1)
int use_near_lossless = 0;
#endif
@@ -1750,12 +1796,13 @@ static int EncodeStreamHook(void* input, void* data2) {
if (!VP8LBitWriterInit(&bw_best, 0) ||
(num_crunch_configs > 1 && !VP8LBitWriterClone(bw, &bw_best))) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
for (idx = 0; idx < num_crunch_configs; ++idx) {
const int entropy_idx = crunch_configs[idx].entropy_idx_;
+ int remaining_percent = 97 / num_crunch_configs, percent_range;
enc->use_palette_ =
(entropy_idx == kPalette) || (entropy_idx == kPaletteAndSpatial);
enc->use_subtract_green_ =
@@ -1779,11 +1826,10 @@ static int EncodeStreamHook(void* input, void* data2) {
use_near_lossless = (config->near_lossless < 100) && !enc->use_palette_ &&
!enc->use_predict_;
if (use_near_lossless) {
- err = AllocateTransformBuffer(enc, width, height);
- if (err != VP8_ENC_OK) goto Error;
+ if (!AllocateTransformBuffer(enc, width, height)) goto Error;
if ((enc->argb_content_ != kEncoderNearLossless) &&
!VP8ApplyNearLossless(picture, config->near_lossless, enc->argb_)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
enc->argb_content_ = kEncoderNearLossless;
@@ -1805,14 +1851,17 @@ static int EncodeStreamHook(void* input, void* data2) {
enc->palette_);
} else {
assert(crunch_configs[idx].palette_sorting_type_ == kModifiedZeng);
- err = PaletteSortModifiedZeng(enc->pic_, enc->palette_sorted_,
- enc->palette_size_, enc->palette_);
- if (err != VP8_ENC_OK) goto Error;
+ if (!PaletteSortModifiedZeng(enc->pic_, enc->palette_sorted_,
+ enc->palette_size_, enc->palette_)) {
+ goto Error;
+ }
}
- err = EncodePalette(bw, low_effort, enc);
- if (err != VP8_ENC_OK) goto Error;
- err = MapImageFromPalette(enc, use_delta_palette);
- if (err != VP8_ENC_OK) goto Error;
+ percent_range = remaining_percent / 4;
+ if (!EncodePalette(bw, low_effort, enc, percent_range, &percent)) {
+ goto Error;
+ }
+ remaining_percent -= percent_range;
+ if (!MapImageFromPalette(enc, use_delta_palette)) goto Error;
// If using a color cache, do not have it bigger than the number of
// colors.
if (use_cache && enc->palette_size_ < (1 << MAX_COLOR_CACHE_BITS)) {
@@ -1823,8 +1872,7 @@ static int EncodeStreamHook(void* input, void* data2) {
// In case image is not packed.
if (enc->argb_content_ != kEncoderNearLossless &&
enc->argb_content_ != kEncoderPalette) {
- err = MakeInputImageCopy(enc);
- if (err != VP8_ENC_OK) goto Error;
+ if (!MakeInputImageCopy(enc)) goto Error;
}
// -----------------------------------------------------------------------
@@ -1835,15 +1883,22 @@ static int EncodeStreamHook(void* input, void* data2) {
}
if (enc->use_predict_) {
- err = ApplyPredictFilter(enc, enc->current_width_, height, quality,
- low_effort, enc->use_subtract_green_, bw);
- if (err != VP8_ENC_OK) goto Error;
+ percent_range = remaining_percent / 3;
+ if (!ApplyPredictFilter(enc, enc->current_width_, height, quality,
+ low_effort, enc->use_subtract_green_, bw,
+ percent_range, &percent)) {
+ goto Error;
+ }
+ remaining_percent -= percent_range;
}
if (enc->use_cross_color_) {
- err = ApplyCrossColorFilter(enc, enc->current_width_, height, quality,
- low_effort, bw);
- if (err != VP8_ENC_OK) goto Error;
+ percent_range = remaining_percent / 2;
+ if (!ApplyCrossColorFilter(enc, enc->current_width_, height, quality,
+ low_effort, bw, percent_range, &percent)) {
+ goto Error;
+ }
+ remaining_percent -= percent_range;
}
}
@@ -1851,12 +1906,13 @@ static int EncodeStreamHook(void* input, void* data2) {
// -------------------------------------------------------------------------
// Encode and write the transformed image.
- err = EncodeImageInternal(bw, enc->argb_, &enc->hash_chain_, enc->refs_,
- enc->current_width_, height, quality, low_effort,
- use_cache, &crunch_configs[idx],
- &enc->cache_bits_, enc->histo_bits_,
- byte_position, &hdr_size, &data_size);
- if (err != VP8_ENC_OK) goto Error;
+ if (!EncodeImageInternal(
+ bw, enc->argb_, &enc->hash_chain_, enc->refs_, enc->current_width_,
+ height, quality, low_effort, use_cache, &crunch_configs[idx],
+ &enc->cache_bits_, enc->histo_bits_, byte_position, &hdr_size,
+ &data_size, picture, remaining_percent, &percent)) {
+ goto Error;
+ }
// If we are better than what we already have.
if (VP8LBitWriterNumBytes(bw) < best_size) {
@@ -1886,18 +1942,15 @@ static int EncodeStreamHook(void* input, void* data2) {
}
VP8LBitWriterSwap(&bw_best, bw);
-Error:
+ Error:
VP8LBitWriterWipeOut(&bw_best);
- params->err_ = err;
// The hook should return false in case of error.
- return (err == VP8_ENC_OK);
+ return (params->picture_->error_code == VP8_ENC_OK);
}
-WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
- const WebPPicture* const picture,
- VP8LBitWriter* const bw_main,
- int use_cache) {
- WebPEncodingError err = VP8_ENC_OK;
+int VP8LEncodeStream(const WebPConfig* const config,
+ const WebPPicture* const picture,
+ VP8LBitWriter* const bw_main, int use_cache) {
VP8LEncoder* const enc_main = VP8LEncoderNew(config, picture);
VP8LEncoder* enc_side = NULL;
CrunchConfig crunch_configs[CRUNCH_CONFIGS_MAX];
@@ -1909,15 +1962,24 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
// The main thread uses picture->stats, the side thread uses stats_side.
WebPAuxStats stats_side;
VP8LBitWriter bw_side;
+ WebPPicture picture_side;
const WebPWorkerInterface* const worker_interface = WebPGetWorkerInterface();
int ok_main;
+ if (enc_main == NULL || !VP8LBitWriterInit(&bw_side, 0)) {
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
+ VP8LEncoderDelete(enc_main);
+ return 0;
+ }
+
+ // Avoid "garbage value" error from Clang's static analysis tool.
+ WebPPictureInit(&picture_side);
+
// Analyze image (entropy, num_palettes etc)
- if (enc_main == NULL ||
- !EncoderAnalyze(enc_main, crunch_configs, &num_crunch_configs_main,
+ if (!EncoderAnalyze(enc_main, crunch_configs, &num_crunch_configs_main,
&red_and_blue_always_zero) ||
- !EncoderInit(enc_main) || !VP8LBitWriterInit(&bw_side, 0)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ !EncoderInit(enc_main)) {
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
@@ -1946,25 +2008,32 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
StreamEncodeContext* const param =
(idx == 0) ? &params_main : &params_side;
param->config_ = config;
- param->picture_ = picture;
param->use_cache_ = use_cache;
param->red_and_blue_always_zero_ = red_and_blue_always_zero;
if (idx == 0) {
+ param->picture_ = picture;
param->stats_ = picture->stats;
param->bw_ = bw_main;
param->enc_ = enc_main;
} else {
+ // Create a side picture (error_code is not thread-safe).
+ if (!WebPPictureView(picture, /*left=*/0, /*top=*/0, picture->width,
+ picture->height, &picture_side)) {
+ assert(0);
+ }
+ picture_side.progress_hook = NULL; // Progress hook is not thread-safe.
+ param->picture_ = &picture_side; // No need to free a view afterwards.
param->stats_ = (picture->stats == NULL) ? NULL : &stats_side;
// Create a side bit writer.
if (!VP8LBitWriterClone(bw_main, &bw_side)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
param->bw_ = &bw_side;
// Create a side encoder.
- enc_side = VP8LEncoderNew(config, picture);
+ enc_side = VP8LEncoderNew(config, &picture_side);
if (enc_side == NULL || !EncoderInit(enc_side)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
// Copy the values that were computed for the main encoder.
@@ -1988,7 +2057,7 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
// Start the second thread if needed.
if (num_crunch_configs_side != 0) {
if (!worker_interface->Reset(&worker_side)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
#if !defined(WEBP_DISABLE_STATS)
@@ -1998,8 +2067,6 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
memcpy(&stats_side, picture->stats, sizeof(stats_side));
}
#endif
- // This line is only useful to remove a Clang static analyzer warning.
- params_side.err_ = VP8_ENC_OK;
worker_interface->Launch(&worker_side);
}
// Execute the main thread.
@@ -2011,7 +2078,10 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
const int ok_side = worker_interface->Sync(&worker_side);
worker_interface->End(&worker_side);
if (!ok_main || !ok_side) {
- err = ok_main ? params_side.err_ : params_main.err_;
+ if (picture->error_code == VP8_ENC_OK) {
+ assert(picture_side.error_code != VP8_ENC_OK);
+ WebPEncodingSetError(picture, picture_side.error_code);
+ }
goto Error;
}
if (VP8LBitWriterNumBytes(&bw_side) < VP8LBitWriterNumBytes(bw_main)) {
@@ -2022,18 +2092,13 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
}
#endif
}
- } else {
- if (!ok_main) {
- err = params_main.err_;
- goto Error;
- }
}
-Error:
+ Error:
VP8LBitWriterWipeOut(&bw_side);
VP8LEncoderDelete(enc_main);
VP8LEncoderDelete(enc_side);
- return err;
+ return (picture->error_code == VP8_ENC_OK);
}
#undef CRUNCH_CONFIGS_MAX
@@ -2046,14 +2111,12 @@ int VP8LEncodeImage(const WebPConfig* const config,
size_t coded_size;
int percent = 0;
int initial_size;
- WebPEncodingError err = VP8_ENC_OK;
VP8LBitWriter bw;
if (picture == NULL) return 0;
if (config == NULL || picture->argb == NULL) {
- err = VP8_ENC_ERROR_NULL_PARAMETER;
- WebPEncodingSetError(picture, err);
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
return 0;
}
@@ -2064,13 +2127,13 @@ int VP8LEncodeImage(const WebPConfig* const config,
initial_size = (config->image_hint == WEBP_HINT_GRAPH) ?
width * height : width * height * 2;
if (!VP8LBitWriterInit(&bw, initial_size)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
if (!WebPReportProgress(picture, 1, &percent)) {
UserAbort:
- err = VP8_ENC_ERROR_USER_ABORT;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_USER_ABORT);
goto Error;
}
// Reset stats (for pure lossless coding)
@@ -2086,28 +2149,26 @@ int VP8LEncodeImage(const WebPConfig* const config,
// Write image size.
if (!WriteImageSize(picture, &bw)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
has_alpha = WebPPictureHasTransparency(picture);
// Write the non-trivial Alpha flag and lossless version.
if (!WriteRealAlphaAndVersion(&bw, has_alpha)) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
goto Error;
}
- if (!WebPReportProgress(picture, 5, &percent)) goto UserAbort;
+ if (!WebPReportProgress(picture, 2, &percent)) goto UserAbort;
// Encode main image stream.
- err = VP8LEncodeStream(config, picture, &bw, 1 /*use_cache*/);
- if (err != VP8_ENC_OK) goto Error;
+ if (!VP8LEncodeStream(config, picture, &bw, 1 /*use_cache*/)) goto Error;
- if (!WebPReportProgress(picture, 90, &percent)) goto UserAbort;
+ if (!WebPReportProgress(picture, 99, &percent)) goto UserAbort;
// Finish the RIFF chunk.
- err = WriteImage(picture, &bw, &coded_size);
- if (err != VP8_ENC_OK) goto Error;
+ if (!WriteImage(picture, &bw, &coded_size)) goto Error;
if (!WebPReportProgress(picture, 100, &percent)) goto UserAbort;
@@ -2126,13 +2187,11 @@ int VP8LEncodeImage(const WebPConfig* const config,
}
Error:
- if (bw.error_) err = VP8_ENC_ERROR_OUT_OF_MEMORY;
- VP8LBitWriterWipeOut(&bw);
- if (err != VP8_ENC_OK) {
- WebPEncodingSetError(picture, err);
- return 0;
+ if (bw.error_) {
+ WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
}
- return 1;
+ VP8LBitWriterWipeOut(&bw);
+ return (picture->error_code == VP8_ENC_OK);
}
//------------------------------------------------------------------------------
diff --git a/thirdparty/libwebp/src/enc/vp8li_enc.h b/thirdparty/libwebp/src/enc/vp8li_enc.h
index 00de48946c..3d35e1612d 100644
--- a/thirdparty/libwebp/src/enc/vp8li_enc.h
+++ b/thirdparty/libwebp/src/enc/vp8li_enc.h
@@ -89,9 +89,10 @@ int VP8LEncodeImage(const WebPConfig* const config,
// Encodes the main image stream using the supplied bit writer.
// If 'use_cache' is false, disables the use of color cache.
-WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
- const WebPPicture* const picture,
- VP8LBitWriter* const bw, int use_cache);
+// Returns false in case of error (stored in picture->error_code).
+int VP8LEncodeStream(const WebPConfig* const config,
+ const WebPPicture* const picture, VP8LBitWriter* const bw,
+ int use_cache);
#if (WEBP_NEAR_LOSSLESS == 1)
// in near_lossless.c
@@ -103,13 +104,18 @@ int VP8ApplyNearLossless(const WebPPicture* const picture, int quality,
//------------------------------------------------------------------------------
// Image transforms in predictor.c.
-void VP8LResidualImage(int width, int height, int bits, int low_effort,
- uint32_t* const argb, uint32_t* const argb_scratch,
- uint32_t* const image, int near_lossless, int exact,
- int used_subtract_green);
-
-void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
- uint32_t* const argb, uint32_t* image);
+// pic and percent are for progress.
+// Returns false in case of error (stored in pic->error_code).
+int VP8LResidualImage(int width, int height, int bits, int low_effort,
+ uint32_t* const argb, uint32_t* const argb_scratch,
+ uint32_t* const image, int near_lossless, int exact,
+ int used_subtract_green, const WebPPicture* const pic,
+ int percent_range, int* const percent);
+
+int VP8LColorSpaceTransform(int width, int height, int bits, int quality,
+ uint32_t* const argb, uint32_t* image,
+ const WebPPicture* const pic, int percent_range,
+ int* const percent);
//------------------------------------------------------------------------------
diff --git a/thirdparty/libwebp/src/enc/webp_enc.c b/thirdparty/libwebp/src/enc/webp_enc.c
index ce2db2e94b..9620e05070 100644
--- a/thirdparty/libwebp/src/enc/webp_enc.c
+++ b/thirdparty/libwebp/src/enc/webp_enc.c
@@ -336,9 +336,7 @@ int WebPEncode(const WebPConfig* config, WebPPicture* pic) {
if (!WebPValidateConfig(config)) {
return WebPEncodingSetError(pic, VP8_ENC_ERROR_INVALID_CONFIGURATION);
}
- if (pic->width <= 0 || pic->height <= 0) {
- return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION);
- }
+ if (!WebPValidatePicture(pic)) return 0;
if (pic->width > WEBP_MAX_DIMENSION || pic->height > WEBP_MAX_DIMENSION) {
return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION);
}