summaryrefslogtreecommitdiff
path: root/thirdparty
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty')
-rw-r--r--thirdparty/README.md2
-rw-r--r--thirdparty/libwebp/dec/alpha.c2
-rw-r--r--thirdparty/libwebp/dec/vp8i.h2
-rw-r--r--thirdparty/libwebp/demux/anim_decode.c48
-rw-r--r--thirdparty/libwebp/demux/demux.c2
-rw-r--r--thirdparty/libwebp/dsp/dec.c2
-rw-r--r--thirdparty/libwebp/dsp/enc.c2
-rw-r--r--thirdparty/libwebp/dsp/rescaler.c4
-rw-r--r--thirdparty/libwebp/enc/analysis.c5
-rw-r--r--thirdparty/libwebp/enc/cost.c42
-rw-r--r--thirdparty/libwebp/enc/cost.h14
-rw-r--r--thirdparty/libwebp/enc/frame.c22
-rw-r--r--thirdparty/libwebp/enc/histogram.c7
-rw-r--r--thirdparty/libwebp/enc/picture.c5
-rw-r--r--thirdparty/libwebp/enc/picture_csp.c200
-rw-r--r--thirdparty/libwebp/enc/picture_psnr.c2
-rw-r--r--thirdparty/libwebp/enc/quant.c13
-rw-r--r--thirdparty/libwebp/enc/token.c54
-rw-r--r--thirdparty/libwebp/enc/vp8enci.h6
-rw-r--r--thirdparty/libwebp/enc/vp8l.c11
-rw-r--r--thirdparty/libwebp/mux/anim_encode.c40
-rw-r--r--thirdparty/libwebp/mux/muxi.h2
-rw-r--r--thirdparty/libwebp/mux/muxinternal.c4
-rw-r--r--thirdparty/libwebp/utils/rescaler.c8
-rw-r--r--thirdparty/libwebp/utils/utils.c8
-rw-r--r--thirdparty/libwebp/utils/utils.h10
-rw-r--r--thirdparty/libwebp/webp/decode.h16
-rw-r--r--thirdparty/libwebp/webp/encode.h8
28 files changed, 311 insertions, 230 deletions
diff --git a/thirdparty/README.md b/thirdparty/README.md
index 4b8e6b01ad..7ed9a11c75 100644
--- a/thirdparty/README.md
+++ b/thirdparty/README.md
@@ -125,7 +125,7 @@ Files extracted from upstream source:
## libwebp
- Upstream: https://chromium.googlesource.com/webm/libwebp/
-- Version: 0.5.1
+- Version: 0.5.2
- License: BSD-3-Clause
Files extracted from upstream source:
diff --git a/thirdparty/libwebp/dec/alpha.c b/thirdparty/libwebp/dec/alpha.c
index 028eb3d50b..d88f01d8de 100644
--- a/thirdparty/libwebp/dec/alpha.c
+++ b/thirdparty/libwebp/dec/alpha.c
@@ -67,7 +67,7 @@ static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data,
}
dec->method_ = (data[0] >> 0) & 0x03;
- dec->filter_ = (data[0] >> 2) & 0x03;
+ dec->filter_ = (WEBP_FILTER_TYPE)((data[0] >> 2) & 0x03);
dec->pre_processing_ = (data[0] >> 4) & 0x03;
rsrv = (data[0] >> 6) & 0x03;
if (dec->method_ < ALPHA_NO_COMPRESSION ||
diff --git a/thirdparty/libwebp/dec/vp8i.h b/thirdparty/libwebp/dec/vp8i.h
index 00da02badc..313d8a7b94 100644
--- a/thirdparty/libwebp/dec/vp8i.h
+++ b/thirdparty/libwebp/dec/vp8i.h
@@ -32,7 +32,7 @@ extern "C" {
// version numbers
#define DEC_MAJ_VERSION 0
#define DEC_MIN_VERSION 5
-#define DEC_REV_VERSION 1
+#define DEC_REV_VERSION 2
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
// Constraints are: We need to store one 16x16 block of luma samples (y),
diff --git a/thirdparty/libwebp/demux/anim_decode.c b/thirdparty/libwebp/demux/anim_decode.c
index 1989eb4ab4..f1cf176e72 100644
--- a/thirdparty/libwebp/demux/anim_decode.c
+++ b/thirdparty/libwebp/demux/anim_decode.c
@@ -112,18 +112,15 @@ WebPAnimDecoder* WebPAnimDecoderNewInternal(
dec->info_.bgcolor = WebPDemuxGetI(dec->demux_, WEBP_FF_BACKGROUND_COLOR);
dec->info_.frame_count = WebPDemuxGetI(dec->demux_, WEBP_FF_FRAME_COUNT);
- {
- const int canvas_bytes =
- dec->info_.canvas_width * NUM_CHANNELS * dec->info_.canvas_height;
- // Note: calloc() because we fill frame with zeroes as well.
- dec->curr_frame_ = WebPSafeCalloc(1ULL, canvas_bytes);
- if (dec->curr_frame_ == NULL) goto Error;
- dec->prev_frame_disposed_ = WebPSafeCalloc(1ULL, canvas_bytes);
- if (dec->prev_frame_disposed_ == NULL) goto Error;
- }
+ // Note: calloc() because we fill frame with zeroes as well.
+ dec->curr_frame_ = (uint8_t*)WebPSafeCalloc(
+ dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
+ if (dec->curr_frame_ == NULL) goto Error;
+ dec->prev_frame_disposed_ = (uint8_t*)WebPSafeCalloc(
+ dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height);
+ if (dec->prev_frame_disposed_ == NULL) goto Error;
WebPAnimDecoderReset(dec);
-
return dec;
Error:
@@ -144,9 +141,13 @@ static int IsFullFrame(int width, int height, int canvas_width,
}
// Clear the canvas to transparent.
-static void ZeroFillCanvas(uint8_t* buf, uint32_t canvas_width,
- uint32_t canvas_height) {
- memset(buf, 0, canvas_width * NUM_CHANNELS * canvas_height);
+static int ZeroFillCanvas(uint8_t* buf, uint32_t canvas_width,
+ uint32_t canvas_height) {
+ const uint64_t size =
+ (uint64_t)canvas_width * canvas_height * NUM_CHANNELS * sizeof(*buf);
+ if (size != (size_t)size) return 0;
+ memset(buf, 0, (size_t)size);
+ return 1;
}
// Clear given frame rectangle to transparent.
@@ -162,10 +163,13 @@ static void ZeroFillFrameRect(uint8_t* buf, int buf_stride, int x_offset,
}
// Copy width * height pixels from 'src' to 'dst'.
-static void CopyCanvas(const uint8_t* src, uint8_t* dst,
- uint32_t width, uint32_t height) {
+static int CopyCanvas(const uint8_t* src, uint8_t* dst,
+ uint32_t width, uint32_t height) {
+ const uint64_t size = (uint64_t)width * height * NUM_CHANNELS;
+ if (size != (size_t)size) return 0;
assert(src != NULL && dst != NULL);
- memcpy(dst, src, width * NUM_CHANNELS * height);
+ memcpy(dst, src, (size_t)size);
+ return 1;
}
// Returns true if the current frame is a key-frame.
@@ -328,9 +332,14 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
is_key_frame = IsKeyFrame(&iter, &dec->prev_iter_,
dec->prev_frame_was_keyframe_, width, height);
if (is_key_frame) {
- ZeroFillCanvas(dec->curr_frame_, width, height);
+ if (!ZeroFillCanvas(dec->curr_frame_, width, height)) {
+ goto Error;
+ }
} else {
- CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_, width, height);
+ if (!CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_,
+ width, height)) {
+ goto Error;
+ }
}
// Decode.
@@ -393,6 +402,7 @@ int WebPAnimDecoderGetNext(WebPAnimDecoder* dec,
// Update info of the previous frame and dispose it for the next iteration.
dec->prev_frame_timestamp_ = timestamp;
+ WebPDemuxReleaseIterator(&dec->prev_iter_);
dec->prev_iter_ = iter;
dec->prev_frame_was_keyframe_ = is_key_frame;
CopyCanvas(dec->curr_frame_, dec->prev_frame_disposed_, width, height);
@@ -421,6 +431,7 @@ int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) {
void WebPAnimDecoderReset(WebPAnimDecoder* dec) {
if (dec != NULL) {
dec->prev_frame_timestamp_ = 0;
+ WebPDemuxReleaseIterator(&dec->prev_iter_);
memset(&dec->prev_iter_, 0, sizeof(dec->prev_iter_));
dec->prev_frame_was_keyframe_ = 0;
dec->next_frame_ = 1;
@@ -434,6 +445,7 @@ const WebPDemuxer* WebPAnimDecoderGetDemuxer(const WebPAnimDecoder* dec) {
void WebPAnimDecoderDelete(WebPAnimDecoder* dec) {
if (dec != NULL) {
+ WebPDemuxReleaseIterator(&dec->prev_iter_);
WebPDemuxDelete(dec->demux_);
WebPSafeFree(dec->curr_frame_);
WebPSafeFree(dec->prev_frame_disposed_);
diff --git a/thirdparty/libwebp/demux/demux.c b/thirdparty/libwebp/demux/demux.c
index 0d2989f6f4..1cb9bd5780 100644
--- a/thirdparty/libwebp/demux/demux.c
+++ b/thirdparty/libwebp/demux/demux.c
@@ -25,7 +25,7 @@
#define DMUX_MAJ_VERSION 0
#define DMUX_MIN_VERSION 3
-#define DMUX_REV_VERSION 0
+#define DMUX_REV_VERSION 1
typedef struct {
size_t start_; // start location of the data
diff --git a/thirdparty/libwebp/dsp/dec.c b/thirdparty/libwebp/dsp/dec.c
index e92d693362..49bd16d976 100644
--- a/thirdparty/libwebp/dsp/dec.c
+++ b/thirdparty/libwebp/dsp/dec.c
@@ -239,7 +239,7 @@ VP8PredFunc VP8PredLuma16[NUM_B_DC_MODES];
//------------------------------------------------------------------------------
// 4x4
-#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
+#define AVG3(a, b, c) ((uint8_t)(((a) + 2 * (b) + (c) + 2) >> 2))
#define AVG2(a, b) (((a) + (b) + 1) >> 1)
static void VE4(uint8_t* dst) { // vertical
diff --git a/thirdparty/libwebp/dsp/enc.c b/thirdparty/libwebp/dsp/enc.c
index f639f5570c..db0e9e70ae 100644
--- a/thirdparty/libwebp/dsp/enc.c
+++ b/thirdparty/libwebp/dsp/enc.c
@@ -335,7 +335,7 @@ static void Intra16Preds(uint8_t* dst,
// luma 4x4 prediction
#define DST(x, y) dst[(x) + (y) * BPS]
-#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
+#define AVG3(a, b, c) ((uint8_t)(((a) + 2 * (b) + (c) + 2) >> 2))
#define AVG2(a, b) (((a) + (b) + 1) >> 1)
static void VE4(uint8_t* dst, const uint8_t* top) { // vertical
diff --git a/thirdparty/libwebp/dsp/rescaler.c b/thirdparty/libwebp/dsp/rescaler.c
index bc743d5dc5..f5b07756cf 100644
--- a/thirdparty/libwebp/dsp/rescaler.c
+++ b/thirdparty/libwebp/dsp/rescaler.c
@@ -173,10 +173,10 @@ void WebPRescalerExportRow(WebPRescaler* const wrk) {
WebPRescalerExportRowExpand(wrk);
} else if (wrk->fxy_scale) {
WebPRescalerExportRowShrink(wrk);
- } else { // very special case for src = dst = 1x1
+ } else { // special case
int i;
+ assert(wrk->src_height == wrk->dst_height && wrk->x_add == 1);
assert(wrk->src_width == 1 && wrk->dst_width <= 2);
- assert(wrk->src_height == 1 && wrk->dst_height == 1);
for (i = 0; i < wrk->num_channels * wrk->dst_width; ++i) {
wrk->dst[i] = wrk->irow[i];
wrk->irow[i] = 0;
diff --git a/thirdparty/libwebp/enc/analysis.c b/thirdparty/libwebp/enc/analysis.c
index b55128fd48..136c331289 100644
--- a/thirdparty/libwebp/enc/analysis.c
+++ b/thirdparty/libwebp/enc/analysis.c
@@ -307,6 +307,7 @@ static int MBAnalyzeBestIntra4Mode(VP8EncIterator* const it,
static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
int best_alpha = DEFAULT_ALPHA;
+ int smallest_alpha = 0;
int best_mode = 0;
const int max_mode = MAX_UV_MODE;
int mode;
@@ -322,6 +323,10 @@ static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
alpha = GetAlpha(&histo);
if (IS_BETTER_ALPHA(alpha, best_alpha)) {
best_alpha = alpha;
+ }
+ // The best prediction mode tends to be the one with the smallest alpha.
+ if (mode == 0 || alpha < smallest_alpha) {
+ smallest_alpha = alpha;
best_mode = mode;
}
}
diff --git a/thirdparty/libwebp/enc/cost.c b/thirdparty/libwebp/enc/cost.c
index ae7fe01388..87f89378a7 100644
--- a/thirdparty/libwebp/enc/cost.c
+++ b/thirdparty/libwebp/enc/cost.c
@@ -281,18 +281,6 @@ int VP8GetCostUV(VP8EncIterator* const it, const VP8ModeScore* const rd) {
//------------------------------------------------------------------------------
// Recording of token probabilities.
-// Record proba context used
-static int Record(int bit, proba_t* const stats) {
- proba_t p = *stats;
- if (p >= 0xffff0000u) { // an overflow is inbound.
- p = ((p + 1u) >> 1) & 0x7fff7fffu; // -> divide the stats by 2.
- }
- // record bit count (lower 16 bits) and increment total count (upper 16 bits).
- p += 0x00010000u + bit;
- *stats = p;
- return bit;
-}
-
// We keep the table-free variant around for reference, in case.
#define USE_LEVEL_CODE_TABLE
@@ -303,31 +291,31 @@ int VP8RecordCoeffs(int ctx, const VP8Residual* const res) {
// should be stats[VP8EncBands[n]], but it's equivalent for n=0 or 1
proba_t* s = res->stats[n][ctx];
if (res->last < 0) {
- Record(0, s + 0);
+ VP8RecordStats(0, s + 0);
return 0;
}
while (n <= res->last) {
int v;
- Record(1, s + 0); // order of record doesn't matter
+ VP8RecordStats(1, s + 0); // order of record doesn't matter
while ((v = res->coeffs[n++]) == 0) {
- Record(0, s + 1);
+ VP8RecordStats(0, s + 1);
s = res->stats[VP8EncBands[n]][0];
}
- Record(1, s + 1);
- if (!Record(2u < (unsigned int)(v + 1), s + 2)) { // v = -1 or 1
+ VP8RecordStats(1, s + 1);
+ if (!VP8RecordStats(2u < (unsigned int)(v + 1), s + 2)) { // v = -1 or 1
s = res->stats[VP8EncBands[n]][1];
} else {
v = abs(v);
#if !defined(USE_LEVEL_CODE_TABLE)
- if (!Record(v > 4, s + 3)) {
- if (Record(v != 2, s + 4))
- Record(v == 4, s + 5);
- } else if (!Record(v > 10, s + 6)) {
- Record(v > 6, s + 7);
- } else if (!Record((v >= 3 + (8 << 2)), s + 8)) {
- Record((v >= 3 + (8 << 1)), s + 9);
+ if (!VP8RecordStats(v > 4, s + 3)) {
+ if (VP8RecordStats(v != 2, s + 4))
+ VP8RecordStats(v == 4, s + 5);
+ } else if (!VP8RecordStats(v > 10, s + 6)) {
+ VP8RecordStats(v > 6, s + 7);
+ } else if (!VP8RecordStats((v >= 3 + (8 << 2)), s + 8)) {
+ VP8RecordStats((v >= 3 + (8 << 1)), s + 9);
} else {
- Record((v >= 3 + (8 << 3)), s + 10);
+ VP8RecordStats((v >= 3 + (8 << 3)), s + 10);
}
#else
if (v > MAX_VARIABLE_LEVEL) {
@@ -340,14 +328,14 @@ int VP8RecordCoeffs(int ctx, const VP8Residual* const res) {
int i;
for (i = 0; (pattern >>= 1) != 0; ++i) {
const int mask = 2 << i;
- if (pattern & 1) Record(!!(bits & mask), s + 3 + i);
+ if (pattern & 1) VP8RecordStats(!!(bits & mask), s + 3 + i);
}
}
#endif
s = res->stats[VP8EncBands[n]][2];
}
}
- if (n < 16) Record(0, s + 0);
+ if (n < 16) VP8RecordStats(0, s + 0);
return 1;
}
diff --git a/thirdparty/libwebp/enc/cost.h b/thirdparty/libwebp/enc/cost.h
index 20960d6d74..ad7959feb4 100644
--- a/thirdparty/libwebp/enc/cost.h
+++ b/thirdparty/libwebp/enc/cost.h
@@ -41,6 +41,20 @@ void VP8InitResidual(int first, int coeff_type,
int VP8RecordCoeffs(int ctx, const VP8Residual* const res);
+// Record proba context used.
+static WEBP_INLINE int VP8RecordStats(int bit, proba_t* const stats) {
+ proba_t p = *stats;
+ // An overflow is inbound. Note we handle this at 0xfffe0000u instead of
+ // 0xffff0000u to make sure p + 1u does not overflow.
+ if (p >= 0xfffe0000u) {
+ p = ((p + 1u) >> 1) & 0x7fff7fffu; // -> divide the stats by 2.
+ }
+ // record bit count (lower 16 bits) and increment total count (upper 16 bits).
+ p += 0x00010000u + bit;
+ *stats = p;
+ return bit;
+}
+
// Cost of coding one event with probability 'proba'.
static WEBP_INLINE int VP8BitCost(int bit, uint8_t proba) {
return !bit ? VP8EntropyCost[proba] : VP8EntropyCost[255 - proba];
diff --git a/thirdparty/libwebp/enc/frame.c b/thirdparty/libwebp/enc/frame.c
index 5b7a40b9ad..57fc471d17 100644
--- a/thirdparty/libwebp/enc/frame.c
+++ b/thirdparty/libwebp/enc/frame.c
@@ -185,6 +185,13 @@ static int GetProba(int a, int b) {
: (255 * a + total / 2) / total; // rounded proba
}
+static void ResetSegments(VP8Encoder* const enc) {
+ int n;
+ for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) {
+ enc->mb_info_[n].segment_ = 0;
+ }
+}
+
static void SetSegmentProbas(VP8Encoder* const enc) {
int p[NUM_MB_SEGMENTS] = { 0 };
int n;
@@ -206,6 +213,7 @@ static void SetSegmentProbas(VP8Encoder* const enc) {
enc->segment_hdr_.update_map_ =
(probas[0] != 255) || (probas[1] != 255) || (probas[2] != 255);
+ if (!enc->segment_hdr_.update_map_) ResetSegments(enc);
enc->segment_hdr_.size_ =
p[0] * (VP8BitCost(0, probas[0]) + VP8BitCost(0, probas[1])) +
p[1] * (VP8BitCost(0, probas[0]) + VP8BitCost(1, probas[1])) +
@@ -406,9 +414,7 @@ static int RecordTokens(VP8EncIterator* const it, const VP8ModeScore* const rd,
VP8InitResidual(0, 1, enc, &res);
VP8SetResidualCoeffs(rd->y_dc_levels, &res);
it->top_nz_[8] = it->left_nz_[8] =
- VP8RecordCoeffTokens(ctx, 1,
- res.first, res.last, res.coeffs, tokens);
- VP8RecordCoeffs(ctx, &res);
+ VP8RecordCoeffTokens(ctx, &res, tokens);
VP8InitResidual(1, 0, enc, &res);
} else {
VP8InitResidual(0, 3, enc, &res);
@@ -420,9 +426,7 @@ static int RecordTokens(VP8EncIterator* const it, const VP8ModeScore* const rd,
const int ctx = it->top_nz_[x] + it->left_nz_[y];
VP8SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res);
it->top_nz_[x] = it->left_nz_[y] =
- VP8RecordCoeffTokens(ctx, res.coeff_type,
- res.first, res.last, res.coeffs, tokens);
- VP8RecordCoeffs(ctx, &res);
+ VP8RecordCoeffTokens(ctx, &res, tokens);
}
}
@@ -434,9 +438,7 @@ static int RecordTokens(VP8EncIterator* const it, const VP8ModeScore* const rd,
const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y];
VP8SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res);
it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] =
- VP8RecordCoeffTokens(ctx, 2,
- res.first, res.last, res.coeffs, tokens);
- VP8RecordCoeffs(ctx, &res);
+ VP8RecordCoeffTokens(ctx, &res, tokens);
}
}
}
@@ -814,7 +816,7 @@ int VP8EncTokenLoop(VP8Encoder* const enc) {
num_pass_left, stats.last_value, stats.value,
stats.last_q, stats.q, stats.dq);
#endif
- if (size_p0 > PARTITION0_SIZE_LIMIT) {
+ if (enc->max_i4_header_bits_ > 0 && size_p0 > PARTITION0_SIZE_LIMIT) {
++num_pass_left;
enc->max_i4_header_bits_ >>= 1; // strengthen header bit limitation...
continue; // ...and start over
diff --git a/thirdparty/libwebp/enc/histogram.c b/thirdparty/libwebp/enc/histogram.c
index 395372b245..36b7f22625 100644
--- a/thirdparty/libwebp/enc/histogram.c
+++ b/thirdparty/libwebp/enc/histogram.c
@@ -592,8 +592,8 @@ static int HistoQueueInit(HistoQueue* const histo_queue, const int max_index) {
histo_queue->max_size = max_index * max_index;
// We allocate max_size + 1 because the last element at index "size" is
// used as temporary data (and it could be up to max_size).
- histo_queue->queue = WebPSafeMalloc(histo_queue->max_size + 1,
- sizeof(*histo_queue->queue));
+ histo_queue->queue = (HistogramPair*)WebPSafeMalloc(
+ histo_queue->max_size + 1, sizeof(*histo_queue->queue));
return histo_queue->queue != NULL;
}
@@ -659,7 +659,8 @@ static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) {
int i, j;
VP8LHistogram** const histograms = image_histo->histograms;
// Indexes of remaining histograms.
- int* const clusters = WebPSafeMalloc(image_histo_size, sizeof(*clusters));
+ int* const clusters =
+ (int*)WebPSafeMalloc(image_histo_size, sizeof(*clusters));
// Priority queue of histogram pairs.
HistoQueue histo_queue;
diff --git a/thirdparty/libwebp/enc/picture.c b/thirdparty/libwebp/enc/picture.c
index d9befbc47d..28c56cd6e5 100644
--- a/thirdparty/libwebp/enc/picture.c
+++ b/thirdparty/libwebp/enc/picture.c
@@ -88,8 +88,9 @@ int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height) {
}
int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) {
- const WebPEncCSP uv_csp = picture->colorspace & WEBP_CSP_UV_MASK;
- const int has_alpha = picture->colorspace & WEBP_CSP_ALPHA_BIT;
+ const WebPEncCSP uv_csp =
+ (WebPEncCSP)((int)picture->colorspace & WEBP_CSP_UV_MASK);
+ const int has_alpha = (int)picture->colorspace & WEBP_CSP_ALPHA_BIT;
const int y_stride = width;
const int uv_width = (width + 1) >> 1;
const int uv_height = (height + 1) >> 1;
diff --git a/thirdparty/libwebp/enc/picture_csp.c b/thirdparty/libwebp/enc/picture_csp.c
index 607a6240b0..188a3ca55b 100644
--- a/thirdparty/libwebp/enc/picture_csp.c
+++ b/thirdparty/libwebp/enc/picture_csp.c
@@ -381,36 +381,42 @@ static WEBP_INLINE uint8_t ConvertRGBToV(int r, int g, int b) {
return clip_8b(128 + (v >> (YUV_FIX + SFIX)));
}
-static int ConvertWRGBToYUV(const fixed_y_t* const best_y,
- const fixed_t* const best_uv,
+static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv,
WebPPicture* const picture) {
int i, j;
+ uint8_t* dst_y = picture->y;
+ uint8_t* dst_u = picture->u;
+ uint8_t* dst_v = picture->v;
+ const fixed_t* const best_uv_base = best_uv;
const int w = (picture->width + 1) & ~1;
const int h = (picture->height + 1) & ~1;
const int uv_w = w >> 1;
const int uv_h = h >> 1;
- for (j = 0; j < picture->height; ++j) {
+ for (best_uv = best_uv_base, j = 0; j < picture->height; ++j) {
for (i = 0; i < picture->width; ++i) {
- const int off = 3 * ((i >> 1) + (j >> 1) * uv_w);
- const int off2 = i + j * picture->y_stride;
- const int W = best_y[i + j * w];
+ const int off = 3 * (i >> 1);
+ const int W = best_y[i];
const int r = best_uv[off + 0] + W;
const int g = best_uv[off + 1] + W;
const int b = best_uv[off + 2] + W;
- picture->y[off2] = ConvertRGBToY(r, g, b);
+ dst_y[i] = ConvertRGBToY(r, g, b);
}
+ best_y += w;
+ best_uv += (j & 1) * 3 * uv_w;
+ dst_y += picture->y_stride;
}
- for (j = 0; j < uv_h; ++j) {
- uint8_t* const dst_u = picture->u + j * picture->uv_stride;
- uint8_t* const dst_v = picture->v + j * picture->uv_stride;
+ for (best_uv = best_uv_base, j = 0; j < uv_h; ++j) {
for (i = 0; i < uv_w; ++i) {
- const int off = 3 * (i + j * uv_w);
+ const int off = 3 * i;
const int r = best_uv[off + 0];
const int g = best_uv[off + 1];
const int b = best_uv[off + 2];
dst_u[i] = ConvertRGBToU(r, g, b);
dst_v[i] = ConvertRGBToV(r, g, b);
}
+ best_uv += 3 * uv_w;
+ dst_u += picture->uv_stride;
+ dst_v += picture->uv_stride;
}
return 1;
}
@@ -420,9 +426,9 @@ static int ConvertWRGBToYUV(const fixed_y_t* const best_y,
#define SAFE_ALLOC(W, H, T) ((T*)WebPSafeMalloc((W) * (H), sizeof(T)))
-static int PreprocessARGB(const uint8_t* const r_ptr,
- const uint8_t* const g_ptr,
- const uint8_t* const b_ptr,
+static int PreprocessARGB(const uint8_t* r_ptr,
+ const uint8_t* g_ptr,
+ const uint8_t* b_ptr,
int step, int rgb_stride,
WebPPicture* const picture) {
// we expand the right/bottom border if needed
@@ -435,20 +441,24 @@ static int PreprocessARGB(const uint8_t* const r_ptr,
// TODO(skal): allocate one big memory chunk. But for now, it's easier
// for valgrind debugging to have several chunks.
fixed_y_t* const tmp_buffer = SAFE_ALLOC(w * 3, 2, fixed_y_t); // scratch
- fixed_y_t* const best_y = SAFE_ALLOC(w, h, fixed_y_t);
- fixed_y_t* const target_y = SAFE_ALLOC(w, h, fixed_y_t);
+ fixed_y_t* const best_y_base = SAFE_ALLOC(w, h, fixed_y_t);
+ fixed_y_t* const target_y_base = SAFE_ALLOC(w, h, fixed_y_t);
fixed_y_t* const best_rgb_y = SAFE_ALLOC(w, 2, fixed_y_t);
- fixed_t* const best_uv = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
- fixed_t* const target_uv = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
+ fixed_t* const best_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
+ fixed_t* const target_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t);
fixed_t* const best_rgb_uv = SAFE_ALLOC(uv_w * 3, 1, fixed_t);
+ fixed_y_t* best_y = best_y_base;
+ fixed_y_t* target_y = target_y_base;
+ fixed_t* best_uv = best_uv_base;
+ fixed_t* target_uv = target_uv_base;
int ok;
int diff_sum = 0;
const int first_diff_threshold = (int)(2.5 * w * h);
const int min_improvement = 5; // stop if improvement is below this %
const int min_first_improvement = 80;
- if (best_y == NULL || best_uv == NULL ||
- target_y == NULL || target_uv == NULL ||
+ if (best_y_base == NULL || best_uv_base == NULL ||
+ target_y_base == NULL || target_uv_base == NULL ||
best_rgb_y == NULL || best_rgb_uv == NULL ||
tmp_buffer == NULL) {
ok = WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
@@ -462,41 +472,47 @@ static int PreprocessARGB(const uint8_t* const r_ptr,
const int is_last_row = (j == picture->height - 1);
fixed_y_t* const src1 = tmp_buffer;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
- const int off1 = j * rgb_stride;
- const int off2 = off1 + rgb_stride;
- const int uv_off = (j >> 1) * 3 * uv_w;
- fixed_y_t* const dst_y = best_y + j * w;
// prepare two rows of input
- ImportOneRow(r_ptr + off1, g_ptr + off1, b_ptr + off1,
- step, picture->width, src1);
+ ImportOneRow(r_ptr, g_ptr, b_ptr, step, picture->width, src1);
if (!is_last_row) {
- ImportOneRow(r_ptr + off2, g_ptr + off2, b_ptr + off2,
+ ImportOneRow(r_ptr + rgb_stride, g_ptr + rgb_stride, b_ptr + rgb_stride,
step, picture->width, src2);
} else {
memcpy(src2, src1, 3 * w * sizeof(*src2));
}
- UpdateW(src1, target_y + (j + 0) * w, w);
- UpdateW(src2, target_y + (j + 1) * w, w);
- diff_sum += UpdateChroma(src1, src2, target_uv + uv_off, dst_y, uv_w);
- memcpy(best_uv + uv_off, target_uv + uv_off, 3 * uv_w * sizeof(*best_uv));
- memcpy(dst_y + w, dst_y, w * sizeof(*dst_y));
+ UpdateW(src1, target_y, w);
+ UpdateW(src2, target_y + w, w);
+ diff_sum += UpdateChroma(src1, src2, target_uv, best_y, uv_w);
+ memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv));
+ memcpy(best_y + w, best_y, w * sizeof(*best_y));
+ best_y += 2 * w;
+ best_uv += 3 * uv_w;
+ target_y += 2 * w;
+ target_uv += 3 * uv_w;
+ r_ptr += 2 * rgb_stride;
+ g_ptr += 2 * rgb_stride;
+ b_ptr += 2 * rgb_stride;
}
// Iterate and resolve clipping conflicts.
for (iter = 0; iter < kNumIterations; ++iter) {
int k;
- const fixed_t* cur_uv = best_uv;
- const fixed_t* prev_uv = best_uv;
+ const fixed_t* cur_uv = best_uv_base;
+ const fixed_t* prev_uv = best_uv_base;
const int old_diff_sum = diff_sum;
diff_sum = 0;
+
+ best_y = best_y_base;
+ best_uv = best_uv_base;
+ target_y = target_y_base;
+ target_uv = target_uv_base;
for (j = 0; j < h; j += 2) {
fixed_y_t* const src1 = tmp_buffer;
fixed_y_t* const src2 = tmp_buffer + 3 * w;
{
const fixed_t* const next_uv = cur_uv + ((j < h - 2) ? 3 * uv_w : 0);
- InterpolateTwoRows(best_y + j * w, prev_uv, cur_uv, next_uv,
- w, src1, src2);
+ InterpolateTwoRows(best_y, prev_uv, cur_uv, next_uv, w, src1, src2);
prev_uv = cur_uv;
cur_uv = next_uv;
}
@@ -507,16 +523,15 @@ static int PreprocessARGB(const uint8_t* const r_ptr,
// update two rows of Y and one row of RGB
for (i = 0; i < 2 * w; ++i) {
- const int off = i + j * w;
- const int diff_y = target_y[off] - best_rgb_y[i];
- const int new_y = (int)best_y[off] + diff_y;
- best_y[off] = clip_y(new_y);
+ const int diff_y = target_y[i] - best_rgb_y[i];
+ const int new_y = (int)best_y[i] + diff_y;
+ best_y[i] = clip_y(new_y);
}
for (i = 0; i < uv_w; ++i) {
- const int off = 3 * (i + (j >> 1) * uv_w);
+ const int off = 3 * i;
int W;
for (k = 0; k <= 2; ++k) {
- const int diff_uv = (int)target_uv[off + k] - best_rgb_uv[3 * i + k];
+ const int diff_uv = (int)target_uv[off + k] - best_rgb_uv[off + k];
best_uv[off + k] += diff_uv;
}
W = RGBToGray(best_uv[off + 0], best_uv[off + 1], best_uv[off + 2]);
@@ -524,6 +539,10 @@ static int PreprocessARGB(const uint8_t* const r_ptr,
best_uv[off + k] -= W;
}
}
+ best_y += 2 * w;
+ best_uv += 3 * uv_w;
+ target_y += 2 * w;
+ target_uv += 3 * uv_w;
}
// test exit condition
if (diff_sum > 0) {
@@ -545,13 +564,13 @@ static int PreprocessARGB(const uint8_t* const r_ptr,
}
// final reconstruction
- ok = ConvertWRGBToYUV(best_y, best_uv, picture);
+ ok = ConvertWRGBToYUV(best_y_base, best_uv_base, picture);
End:
- WebPSafeFree(best_y);
- WebPSafeFree(best_uv);
- WebPSafeFree(target_y);
- WebPSafeFree(target_uv);
+ WebPSafeFree(best_y_base);
+ WebPSafeFree(best_uv_base);
+ WebPSafeFree(target_y_base);
+ WebPSafeFree(target_uv_base);
WebPSafeFree(best_rgb_y);
WebPSafeFree(best_rgb_uv);
WebPSafeFree(tmp_buffer);
@@ -830,10 +849,10 @@ static WEBP_INLINE void ConvertRowsToUV(const uint16_t* rgb,
}
}
-static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
- const uint8_t* const g_ptr,
- const uint8_t* const b_ptr,
- const uint8_t* const a_ptr,
+static int ImportYUVAFromRGBA(const uint8_t* r_ptr,
+ const uint8_t* g_ptr,
+ const uint8_t* b_ptr,
+ const uint8_t* a_ptr,
int step, // bytes per pixel
int rgb_stride, // bytes per scanline
float dithering,
@@ -900,36 +919,34 @@ static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
// Downsample Y/U/V planes, two rows at a time
for (y = 0; y < (height >> 1); ++y) {
int rows_have_alpha = has_alpha;
- const int off1 = (2 * y + 0) * rgb_stride;
- const int off2 = (2 * y + 1) * rgb_stride;
if (use_dsp) {
if (is_rgb) {
- WebPConvertRGB24ToY(r_ptr + off1, dst_y, width);
- WebPConvertRGB24ToY(r_ptr + off2, dst_y + picture->y_stride, width);
+ WebPConvertRGB24ToY(r_ptr, dst_y, width);
+ WebPConvertRGB24ToY(r_ptr + rgb_stride,
+ dst_y + picture->y_stride, width);
} else {
- WebPConvertBGR24ToY(b_ptr + off1, dst_y, width);
- WebPConvertBGR24ToY(b_ptr + off2, dst_y + picture->y_stride, width);
+ WebPConvertBGR24ToY(b_ptr, dst_y, width);
+ WebPConvertBGR24ToY(b_ptr + rgb_stride,
+ dst_y + picture->y_stride, width);
}
} else {
- ConvertRowToY(r_ptr + off1, g_ptr + off1, b_ptr + off1, step,
- dst_y, width, rg);
- ConvertRowToY(r_ptr + off2, g_ptr + off2, b_ptr + off2, step,
+ ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg);
+ ConvertRowToY(r_ptr + rgb_stride,
+ g_ptr + rgb_stride,
+ b_ptr + rgb_stride, step,
dst_y + picture->y_stride, width, rg);
}
dst_y += 2 * picture->y_stride;
if (has_alpha) {
- rows_have_alpha &= !WebPExtractAlpha(a_ptr + off1, rgb_stride,
- width, 2,
+ rows_have_alpha &= !WebPExtractAlpha(a_ptr, rgb_stride, width, 2,
dst_a, picture->a_stride);
dst_a += 2 * picture->a_stride;
}
// Collect averaged R/G/B(/A)
if (!rows_have_alpha) {
- AccumulateRGB(r_ptr + off1, g_ptr + off1, b_ptr + off1,
- step, rgb_stride, tmp_rgb, width);
+ AccumulateRGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, tmp_rgb, width);
} else {
- AccumulateRGBA(r_ptr + off1, g_ptr + off1, b_ptr + off1, a_ptr + off1,
- rgb_stride, tmp_rgb, width);
+ AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, rgb_stride, tmp_rgb, width);
}
// Convert to U/V
if (rg == NULL) {
@@ -939,31 +956,33 @@ static int ImportYUVAFromRGBA(const uint8_t* const r_ptr,
}
dst_u += picture->uv_stride;
dst_v += picture->uv_stride;
+ r_ptr += 2 * rgb_stride;
+ b_ptr += 2 * rgb_stride;
+ g_ptr += 2 * rgb_stride;
+ if (has_alpha) a_ptr += 2 * rgb_stride;
}
if (height & 1) { // extra last row
- const int off = 2 * y * rgb_stride;
int row_has_alpha = has_alpha;
if (use_dsp) {
if (r_ptr < b_ptr) {
- WebPConvertRGB24ToY(r_ptr + off, dst_y, width);
+ WebPConvertRGB24ToY(r_ptr, dst_y, width);
} else {
- WebPConvertBGR24ToY(b_ptr + off, dst_y, width);
+ WebPConvertBGR24ToY(b_ptr, dst_y, width);
}
} else {
- ConvertRowToY(r_ptr + off, g_ptr + off, b_ptr + off, step,
- dst_y, width, rg);
+ ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg);
}
if (row_has_alpha) {
- row_has_alpha &= !WebPExtractAlpha(a_ptr + off, 0, width, 1, dst_a, 0);
+ row_has_alpha &= !WebPExtractAlpha(a_ptr, 0, width, 1, dst_a, 0);
}
// Collect averaged R/G/B(/A)
if (!row_has_alpha) {
// Collect averaged R/G/B
- AccumulateRGB(r_ptr + off, g_ptr + off, b_ptr + off,
- step, /* rgb_stride = */ 0, tmp_rgb, width);
+ AccumulateRGB(r_ptr, g_ptr, b_ptr, step, /* rgb_stride = */ 0,
+ tmp_rgb, width);
} else {
- AccumulateRGBA(r_ptr + off, g_ptr + off, b_ptr + off, a_ptr + off,
- /* rgb_stride = */ 0, tmp_rgb, width);
+ AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, /* rgb_stride = */ 0,
+ tmp_rgb, width);
}
if (rg == NULL) {
WebPConvertRGBA32ToUV(tmp_rgb, dst_u, dst_v, uv_width);
@@ -1086,10 +1105,10 @@ static int Import(WebPPicture* const picture,
const uint8_t* const rgb, int rgb_stride,
int step, int swap_rb, int import_alpha) {
int y;
- const uint8_t* const r_ptr = rgb + (swap_rb ? 2 : 0);
- const uint8_t* const g_ptr = rgb + 1;
- const uint8_t* const b_ptr = rgb + (swap_rb ? 0 : 2);
- const uint8_t* const a_ptr = import_alpha ? rgb + 3 : NULL;
+ const uint8_t* r_ptr = rgb + (swap_rb ? 2 : 0);
+ const uint8_t* g_ptr = rgb + 1;
+ const uint8_t* b_ptr = rgb + (swap_rb ? 0 : 2);
+ const uint8_t* a_ptr = import_alpha ? rgb + 3 : NULL;
const int width = picture->width;
const int height = picture->height;
@@ -1102,20 +1121,25 @@ static int Import(WebPPicture* const picture,
VP8EncDspARGBInit();
if (import_alpha) {
+ uint32_t* dst = picture->argb;
assert(step == 4);
for (y = 0; y < height; ++y) {
- uint32_t* const dst = &picture->argb[y * picture->argb_stride];
- const int offset = y * rgb_stride;
- VP8PackARGB(a_ptr + offset, r_ptr + offset, g_ptr + offset,
- b_ptr + offset, width, dst);
+ VP8PackARGB(a_ptr, r_ptr, g_ptr, b_ptr, width, dst);
+ a_ptr += rgb_stride;
+ r_ptr += rgb_stride;
+ g_ptr += rgb_stride;
+ b_ptr += rgb_stride;
+ dst += picture->argb_stride;
}
} else {
+ uint32_t* dst = picture->argb;
assert(step >= 3);
for (y = 0; y < height; ++y) {
- uint32_t* const dst = &picture->argb[y * picture->argb_stride];
- const int offset = y * rgb_stride;
- VP8PackRGB(r_ptr + offset, g_ptr + offset, b_ptr + offset,
- width, step, dst);
+ VP8PackRGB(r_ptr, g_ptr, b_ptr, width, step, dst);
+ r_ptr += rgb_stride;
+ g_ptr += rgb_stride;
+ b_ptr += rgb_stride;
+ dst += picture->argb_stride;
}
}
return 1;
diff --git a/thirdparty/libwebp/enc/picture_psnr.c b/thirdparty/libwebp/enc/picture_psnr.c
index 81ab1b5ca1..329757deb1 100644
--- a/thirdparty/libwebp/enc/picture_psnr.c
+++ b/thirdparty/libwebp/enc/picture_psnr.c
@@ -110,7 +110,7 @@ int WebPPictureDistortion(const WebPPicture* src, const WebPPicture* ref,
VP8SSIMAccumulatePlane(tmp1, w, tmp2, w, w, h, &stats[c]);
}
}
- free(tmp_plane);
+ WebPSafeFree(tmp_plane);
}
} else {
int has_alpha, uv_w, uv_h;
diff --git a/thirdparty/libwebp/enc/quant.c b/thirdparty/libwebp/enc/quant.c
index 549ad26f93..07ffaf0aeb 100644
--- a/thirdparty/libwebp/enc/quant.c
+++ b/thirdparty/libwebp/enc/quant.c
@@ -278,7 +278,7 @@ static void SetupMatrices(VP8Encoder* enc) {
CheckLambdaValue(&m->lambda_trellis_uv_);
CheckLambdaValue(&m->tlambda_);
- m->min_disto_ = 10 * m->y1_.q_[0]; // quantization-aware min disto
+ m->min_disto_ = 20 * m->y1_.q_[0]; // quantization-aware min disto
m->max_edge_ = 0;
m->i4_penalty_ = 1000 * q_i4 * q_i4;
@@ -874,9 +874,9 @@ static void StoreMaxDelta(VP8SegmentInfo* const dqm, const int16_t DCs[16]) {
// We look at the first three AC coefficients to determine what is the average
// delta between each sub-4x4 block.
const int v0 = abs(DCs[1]);
- const int v1 = abs(DCs[4]);
- const int v2 = abs(DCs[5]);
- int max_v = (v0 > v1) ? v1 : v0;
+ const int v1 = abs(DCs[2]);
+ const int v2 = abs(DCs[4]);
+ int max_v = (v1 > v0) ? v1 : v0;
max_v = (v2 > max_v) ? v2 : max_v;
if (max_v > dqm->max_edge_) dqm->max_edge_ = max_v;
}
@@ -957,7 +957,7 @@ static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
// we have a blocky macroblock (only DCs are non-zero) with fairly high
// distortion, record max delta so we can later adjust the minimal filtering
// strength needed to smooth these blocks out.
- if ((rd->nz & 0xffff) == 0 && rd->D > dqm->min_disto_) {
+ if ((rd->nz & 0x100ffff) == 0x1000000 && rd->D > dqm->min_disto_) {
StoreMaxDelta(dqm, rd->y_dc_levels);
}
}
@@ -1155,7 +1155,8 @@ static void RefineUsingDistortion(VP8EncIterator* const it,
const int lambda_d_uv = 120;
score_t score_i4 = dqm->i4_penalty_;
score_t i4_bit_sum = 0;
- const score_t bit_limit = it->enc_->mb_header_limit_;
+ const score_t bit_limit = try_both_modes ? it->enc_->mb_header_limit_
+ : MAX_COST; // no early-out allowed
if (is_i16) { // First, evaluate Intra16 distortion
int best_mode = -1;
diff --git a/thirdparty/libwebp/enc/token.c b/thirdparty/libwebp/enc/token.c
index e73256b37e..087940e5ff 100644
--- a/thirdparty/libwebp/enc/token.c
+++ b/thirdparty/libwebp/enc/token.c
@@ -87,14 +87,16 @@ static int TBufferNewPage(VP8TBuffer* const b) {
#define TOKEN_ID(t, b, ctx) \
(NUM_PROBAS * ((ctx) + NUM_CTX * ((b) + NUM_BANDS * (t))))
-static WEBP_INLINE uint32_t AddToken(VP8TBuffer* const b,
- uint32_t bit, uint32_t proba_idx) {
+static WEBP_INLINE uint32_t AddToken(VP8TBuffer* const b, uint32_t bit,
+ uint32_t proba_idx,
+ proba_t* const stats) {
assert(proba_idx < FIXED_PROBA_BIT);
assert(bit <= 1);
if (b->left_ > 0 || TBufferNewPage(b)) {
const int slot = --b->left_;
b->tokens_[slot] = (bit << 15) | proba_idx;
}
+ VP8RecordStats(bit, stats);
return bit;
}
@@ -108,13 +110,16 @@ static WEBP_INLINE void AddConstantToken(VP8TBuffer* const b,
}
}
-int VP8RecordCoeffTokens(const int ctx, const int coeff_type,
- int first, int last,
- const int16_t* const coeffs,
+int VP8RecordCoeffTokens(int ctx, const struct VP8Residual* const res,
VP8TBuffer* const tokens) {
- int n = first;
+ const int16_t* const coeffs = res->coeffs;
+ const int coeff_type = res->coeff_type;
+ const int last = res->last;
+ int n = res->first;
uint32_t base_id = TOKEN_ID(coeff_type, n, ctx);
- if (!AddToken(tokens, last >= 0, base_id + 0)) {
+ // should be stats[VP8EncBands[n]], but it's equivalent for n=0 or 1
+ proba_t* s = res->stats[n][ctx];
+ if (!AddToken(tokens, last >= 0, base_id + 0, s + 0)) {
return 0;
}
@@ -122,18 +127,20 @@ int VP8RecordCoeffTokens(const int ctx, const int coeff_type,
const int c = coeffs[n++];
const int sign = c < 0;
const uint32_t v = sign ? -c : c;
- if (!AddToken(tokens, v != 0, base_id + 1)) {
+ if (!AddToken(tokens, v != 0, base_id + 1, s + 1)) {
base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 0); // ctx=0
+ s = res->stats[VP8EncBands[n]][0];
continue;
}
- if (!AddToken(tokens, v > 1, base_id + 2)) {
+ if (!AddToken(tokens, v > 1, base_id + 2, s + 2)) {
base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 1); // ctx=1
+ s = res->stats[VP8EncBands[n]][1];
} else {
- if (!AddToken(tokens, v > 4, base_id + 3)) {
- if (AddToken(tokens, v != 2, base_id + 4))
- AddToken(tokens, v == 4, base_id + 5);
- } else if (!AddToken(tokens, v > 10, base_id + 6)) {
- if (!AddToken(tokens, v > 6, base_id + 7)) {
+ if (!AddToken(tokens, v > 4, base_id + 3, s + 3)) {
+ if (AddToken(tokens, v != 2, base_id + 4, s + 4))
+ AddToken(tokens, v == 4, base_id + 5, s + 5);
+ } else if (!AddToken(tokens, v > 10, base_id + 6, s + 6)) {
+ if (!AddToken(tokens, v > 6, base_id + 7, s + 7)) {
AddConstantToken(tokens, v == 6, 159);
} else {
AddConstantToken(tokens, v >= 9, 165);
@@ -144,26 +151,26 @@ int VP8RecordCoeffTokens(const int ctx, const int coeff_type,
const uint8_t* tab;
uint32_t residue = v - 3;
if (residue < (8 << 1)) { // VP8Cat3 (3b)
- AddToken(tokens, 0, base_id + 8);
- AddToken(tokens, 0, base_id + 9);
+ AddToken(tokens, 0, base_id + 8, s + 8);
+ AddToken(tokens, 0, base_id + 9, s + 9);
residue -= (8 << 0);
mask = 1 << 2;
tab = VP8Cat3;
} else if (residue < (8 << 2)) { // VP8Cat4 (4b)
- AddToken(tokens, 0, base_id + 8);
- AddToken(tokens, 1, base_id + 9);
+ AddToken(tokens, 0, base_id + 8, s + 8);
+ AddToken(tokens, 1, base_id + 9, s + 9);
residue -= (8 << 1);
mask = 1 << 3;
tab = VP8Cat4;
} else if (residue < (8 << 3)) { // VP8Cat5 (5b)
- AddToken(tokens, 1, base_id + 8);
- AddToken(tokens, 0, base_id + 10);
+ AddToken(tokens, 1, base_id + 8, s + 8);
+ AddToken(tokens, 0, base_id + 10, s + 9);
residue -= (8 << 2);
mask = 1 << 4;
tab = VP8Cat5;
} else { // VP8Cat6 (11b)
- AddToken(tokens, 1, base_id + 8);
- AddToken(tokens, 1, base_id + 10);
+ AddToken(tokens, 1, base_id + 8, s + 8);
+ AddToken(tokens, 1, base_id + 10, s + 9);
residue -= (8 << 3);
mask = 1 << 10;
tab = VP8Cat6;
@@ -174,9 +181,10 @@ int VP8RecordCoeffTokens(const int ctx, const int coeff_type,
}
}
base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 2); // ctx=2
+ s = res->stats[VP8EncBands[n]][2];
}
AddConstantToken(tokens, sign, 128);
- if (n == 16 || !AddToken(tokens, n <= last, base_id + 0)) {
+ if (n == 16 || !AddToken(tokens, n <= last, base_id + 0, s + 0)) {
return 1; // EOB
}
}
diff --git a/thirdparty/libwebp/enc/vp8enci.h b/thirdparty/libwebp/enc/vp8enci.h
index c1fbd7644e..5b4e162a58 100644
--- a/thirdparty/libwebp/enc/vp8enci.h
+++ b/thirdparty/libwebp/enc/vp8enci.h
@@ -32,7 +32,7 @@ extern "C" {
// version numbers
#define ENC_MAJ_VERSION 0
#define ENC_MIN_VERSION 5
-#define ENC_REV_VERSION 1
+#define ENC_REV_VERSION 2
enum { MAX_LF_LEVELS = 64, // Maximum loop filter level
MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost
@@ -325,9 +325,7 @@ int VP8EmitTokens(VP8TBuffer* const b, VP8BitWriter* const bw,
const uint8_t* const probas, int final_pass);
// record the coding of coefficients without knowing the probabilities yet
-int VP8RecordCoeffTokens(const int ctx, const int coeff_type,
- int first, int last,
- const int16_t* const coeffs,
+int VP8RecordCoeffTokens(int ctx, const struct VP8Residual* const res,
VP8TBuffer* const tokens);
// Estimate the final coded size given a set of 'probas'.
diff --git a/thirdparty/libwebp/enc/vp8l.c b/thirdparty/libwebp/enc/vp8l.c
index c16e2560ec..e4ad2959b8 100644
--- a/thirdparty/libwebp/enc/vp8l.c
+++ b/thirdparty/libwebp/enc/vp8l.c
@@ -34,8 +34,8 @@
// Palette reordering for smaller sum of deltas (and for smaller storage).
static int PaletteCompareColorsForQsort(const void* p1, const void* p2) {
- const uint32_t a = WebPMemToUint32(p1);
- const uint32_t b = WebPMemToUint32(p2);
+ const uint32_t a = WebPMemToUint32((uint8_t*)p1);
+ const uint32_t b = WebPMemToUint32((uint8_t*)p2);
assert(a != b);
return (a < b) ? -1 : 1;
}
@@ -224,9 +224,8 @@ static int AnalyzeEntropy(const uint32_t* argb,
{
double entropy_comp[kHistoTotal];
double entropy[kNumEntropyIx];
- EntropyIx k;
- EntropyIx last_mode_to_analyze =
- use_palette ? kPalette : kSpatialSubGreen;
+ int k;
+ int last_mode_to_analyze = use_palette ? kPalette : kSpatialSubGreen;
int j;
// Let's add one zero to the predicted histograms. The zeros are removed
// too efficiently by the pix_diff == 0 comparison, at least one of the
@@ -263,7 +262,7 @@ static int AnalyzeEntropy(const uint32_t* argb,
*min_entropy_ix = kDirect;
for (k = kDirect + 1; k <= last_mode_to_analyze; ++k) {
if (entropy[*min_entropy_ix] > entropy[k]) {
- *min_entropy_ix = k;
+ *min_entropy_ix = (EntropyIx)k;
}
}
*red_and_blue_always_zero = 1;
diff --git a/thirdparty/libwebp/mux/anim_encode.c b/thirdparty/libwebp/mux/anim_encode.c
index 53e2906a82..398ba8d850 100644
--- a/thirdparty/libwebp/mux/anim_encode.c
+++ b/thirdparty/libwebp/mux/anim_encode.c
@@ -829,8 +829,8 @@ static WebPEncodingError GenerateCandidates(
WebPPicture* const curr_canvas = &enc->curr_canvas_copy_;
const WebPPicture* const prev_canvas =
is_dispose_none ? &enc->prev_canvas_ : &enc->prev_canvas_disposed_;
- int use_blending_ll;
- int use_blending_lossy;
+ int use_blending_ll, use_blending_lossy;
+ int evaluate_ll, evaluate_lossy;
CopyCurrentCanvas(enc);
use_blending_ll =
@@ -843,16 +843,19 @@ static WebPEncodingError GenerateCandidates(
// Pick candidates to be tried.
if (!enc->options_.allow_mixed) {
- candidate_ll->evaluate_ = is_lossless;
- candidate_lossy->evaluate_ = !is_lossless;
+ evaluate_ll = is_lossless;
+ evaluate_lossy = !is_lossless;
+ } else if (enc->options_.minimize_size) {
+ evaluate_ll = 1;
+ evaluate_lossy = 1;
} else { // Use a heuristic for trying lossless and/or lossy compression.
const int num_colors = WebPGetColorPalette(&params->sub_frame_ll_, NULL);
- candidate_ll->evaluate_ = (num_colors < MAX_COLORS_LOSSLESS);
- candidate_lossy->evaluate_ = (num_colors >= MIN_COLORS_LOSSY);
+ evaluate_ll = (num_colors < MAX_COLORS_LOSSLESS);
+ evaluate_lossy = (num_colors >= MIN_COLORS_LOSSY);
}
// Generate candidates.
- if (candidate_ll->evaluate_) {
+ if (evaluate_ll) {
CopyCurrentCanvas(enc);
if (use_blending_ll) {
enc->curr_canvas_copy_modified_ =
@@ -862,7 +865,7 @@ static WebPEncodingError GenerateCandidates(
config_ll, use_blending_ll, candidate_ll);
if (error_code != VP8_ENC_OK) return error_code;
}
- if (candidate_lossy->evaluate_) {
+ if (evaluate_lossy) {
CopyCurrentCanvas(enc);
if (use_blending_lossy) {
enc->curr_canvas_copy_modified_ =
@@ -1029,6 +1032,8 @@ static WebPEncodingError SetFrame(WebPAnimEncoder* const enc,
const WebPPicture* const prev_canvas = &enc->prev_canvas_;
Candidate candidates[CANDIDATE_COUNT];
const int is_lossless = config->lossless;
+ const int consider_lossless = is_lossless || enc->options_.allow_mixed;
+ const int consider_lossy = !is_lossless || enc->options_.allow_mixed;
const int is_first_frame = enc->is_first_frame_;
// First frame cannot be skipped as there is no 'previous frame' to merge it
@@ -1066,9 +1071,7 @@ static WebPEncodingError SetFrame(WebPAnimEncoder* const enc,
return VP8_ENC_ERROR_INVALID_CONFIGURATION;
}
- for (i = 0; i < CANDIDATE_COUNT; ++i) {
- candidates[i].evaluate_ = 0;
- }
+ memset(candidates, 0, sizeof(candidates));
// Change-rectangle assuming previous frame was DISPOSE_NONE.
if (!GetSubRects(prev_canvas, curr_canvas, is_key_frame, is_first_frame,
@@ -1077,8 +1080,8 @@ static WebPEncodingError SetFrame(WebPAnimEncoder* const enc,
goto Err;
}
- if ((is_lossless && IsEmptyRect(&dispose_none_params.rect_ll_)) ||
- (!is_lossless && IsEmptyRect(&dispose_none_params.rect_lossy_))) {
+ if ((consider_lossless && IsEmptyRect(&dispose_none_params.rect_ll_)) ||
+ (consider_lossy && IsEmptyRect(&dispose_none_params.rect_lossy_))) {
// Don't encode the frame at all. Instead, the duration of the previous
// frame will be increased later.
assert(empty_rect_allowed_none);
@@ -1187,16 +1190,20 @@ static int CacheFrame(WebPAnimEncoder* const enc,
enc->prev_candidate_undecided_ = 0;
} else {
int64_t curr_delta;
+ FrameRect prev_rect_key, prev_rect_sub;
// Add this as a frame rectangle to enc.
error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped);
if (error_code != VP8_ENC_OK) goto End;
if (frame_skipped) goto Skip;
+ prev_rect_sub = enc->prev_rect_;
+
// Add this as a key-frame to enc, too.
error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped);
if (error_code != VP8_ENC_OK) goto End;
assert(frame_skipped == 0); // Key-frame cannot be an empty rectangle.
+ prev_rect_key = enc->prev_rect_;
// Analyze size difference of the two variants.
curr_delta = KeyFramePenalty(encoded_frame);
@@ -1207,11 +1214,13 @@ static int CacheFrame(WebPAnimEncoder* const enc,
old_keyframe->is_key_frame_ = 0;
}
encoded_frame->is_key_frame_ = 1;
+ enc->prev_candidate_undecided_ = 1;
enc->keyframe_ = (int)position;
enc->best_delta_ = curr_delta;
enc->flush_count_ = enc->count_ - 1; // We can flush previous frames.
} else {
encoded_frame->is_key_frame_ = 0;
+ enc->prev_candidate_undecided_ = 0;
}
// Note: We need '>=' below because when kmin and kmax are both zero,
// count_since_key_frame will always be > kmax.
@@ -1221,7 +1230,10 @@ static int CacheFrame(WebPAnimEncoder* const enc,
enc->keyframe_ = KEYFRAME_NONE;
enc->best_delta_ = DELTA_INFINITY;
}
- enc->prev_candidate_undecided_ = 1;
+ if (!enc->prev_candidate_undecided_) {
+ enc->prev_rect_ =
+ encoded_frame->is_key_frame_ ? prev_rect_key : prev_rect_sub;
+ }
}
}
diff --git a/thirdparty/libwebp/mux/muxi.h b/thirdparty/libwebp/mux/muxi.h
index d4d5cbad91..b4865fe36f 100644
--- a/thirdparty/libwebp/mux/muxi.h
+++ b/thirdparty/libwebp/mux/muxi.h
@@ -28,7 +28,7 @@ extern "C" {
#define MUX_MAJ_VERSION 0
#define MUX_MIN_VERSION 3
-#define MUX_REV_VERSION 1
+#define MUX_REV_VERSION 2
// Chunk object.
typedef struct WebPChunk WebPChunk;
diff --git a/thirdparty/libwebp/mux/muxinternal.c b/thirdparty/libwebp/mux/muxinternal.c
index 4babbe82fc..372c6a9674 100644
--- a/thirdparty/libwebp/mux/muxinternal.c
+++ b/thirdparty/libwebp/mux/muxinternal.c
@@ -16,7 +16,7 @@
#include "./muxi.h"
#include "../utils/utils.h"
-#define UNDEFINED_CHUNK_SIZE (-1)
+#define UNDEFINED_CHUNK_SIZE ((uint32_t)(-1))
const ChunkInfo kChunks[] = {
{ MKFOURCC('V', 'P', '8', 'X'), WEBP_CHUNK_VP8X, VP8X_CHUNK_SIZE },
@@ -439,7 +439,7 @@ static int IsNotCompatible(int feature, int num_items) {
return (feature != 0) != (num_items > 0);
}
-#define NO_FLAG 0
+#define NO_FLAG ((WebPFeatureFlags)0)
// Test basic constraints:
// retrieval, maximum number of chunks by index (use -1 to skip)
diff --git a/thirdparty/libwebp/utils/rescaler.c b/thirdparty/libwebp/utils/rescaler.c
index 00c9300bfb..d2278a52ff 100644
--- a/thirdparty/libwebp/utils/rescaler.c
+++ b/thirdparty/libwebp/utils/rescaler.c
@@ -48,11 +48,15 @@ void WebPRescalerInit(WebPRescaler* const wrk, int src_width, int src_height,
wrk->y_sub = wrk->y_expand ? y_sub - 1 : y_sub;
wrk->y_accum = wrk->y_expand ? wrk->y_sub : wrk->y_add;
if (!wrk->y_expand) {
- // this is WEBP_RESCALER_FRAC(dst_height, x_add * y_add) without the cast.
+ // This is WEBP_RESCALER_FRAC(dst_height, x_add * y_add) without the cast.
+ // Its value is <= WEBP_RESCALER_ONE, because dst_height <= wrk->y_add, and
+ // wrk->x_add >= 1;
const uint64_t ratio =
(uint64_t)dst_height * WEBP_RESCALER_ONE / (wrk->x_add * wrk->y_add);
if (ratio != (uint32_t)ratio) {
- // We can't represent the ratio with the current fixed-point precision.
+ // When ratio == WEBP_RESCALER_ONE, we can't represent the ratio with the
+ // current fixed-point precision. This happens when src_height ==
+ // wrk->y_add (which == src_height), and wrk->x_add == 1.
// => We special-case fxy_scale = 0, in WebPRescalerExportRow().
wrk->fxy_scale = 0;
} else {
diff --git a/thirdparty/libwebp/utils/utils.c b/thirdparty/libwebp/utils/utils.c
index 2602ca3c9f..82dbf8d5e5 100644
--- a/thirdparty/libwebp/utils/utils.c
+++ b/thirdparty/libwebp/utils/utils.c
@@ -175,8 +175,12 @@ static int CheckSizeArgumentsOverflow(uint64_t nmemb, size_t size) {
}
#endif
#if defined(MALLOC_LIMIT)
- if (mem_limit > 0 && total_mem + total_size >= mem_limit) {
- return 0; // fake fail!
+ if (mem_limit > 0) {
+ const uint64_t new_total_mem = (uint64_t)total_mem + total_size;
+ if (new_total_mem != (size_t)new_total_mem ||
+ new_total_mem > mem_limit) {
+ return 0; // fake fail!
+ }
}
#endif
diff --git a/thirdparty/libwebp/utils/utils.h b/thirdparty/libwebp/utils/utils.h
index e0a81126df..3a5d4e6a78 100644
--- a/thirdparty/libwebp/utils/utils.h
+++ b/thirdparty/libwebp/utils/utils.h
@@ -20,6 +20,7 @@
#endif
#include <assert.h>
+#include <limits.h>
#include "../dsp/dsp.h"
#include "../webp/types.h"
@@ -32,7 +33,14 @@ extern "C" {
// Memory allocation
// This is the maximum memory amount that libwebp will ever try to allocate.
-#define WEBP_MAX_ALLOCABLE_MEMORY (1ULL << 40)
+#ifndef WEBP_MAX_ALLOCABLE_MEMORY
+#if SIZE_MAX > (1ULL << 34)
+#define WEBP_MAX_ALLOCABLE_MEMORY (1ULL << 34)
+#else
+// For 32-bit targets keep this below INT_MAX to avoid valgrind warnings.
+#define WEBP_MAX_ALLOCABLE_MEMORY ((1ULL << 31) - (1 << 16))
+#endif
+#endif // WEBP_MAX_ALLOCABLE_MEMORY
// size-checking safe malloc/calloc: verify that the requested size is not too
// large, or return NULL. You don't need to call these for constructs like
diff --git a/thirdparty/libwebp/webp/decode.h b/thirdparty/libwebp/webp/decode.h
index 7a3bed93a4..4c5e74ac36 100644
--- a/thirdparty/libwebp/webp/decode.h
+++ b/thirdparty/libwebp/webp/decode.h
@@ -248,19 +248,19 @@ typedef enum VP8StatusCode {
// picture is only partially decoded, pending additional input.
// Code example:
//
-// WebPInitDecBuffer(&buffer);
-// buffer.colorspace = mode;
+// WebPInitDecBuffer(&output_buffer);
+// output_buffer.colorspace = mode;
// ...
-// WebPIDecoder* idec = WebPINewDecoder(&buffer);
-// while (has_more_data) {
-// // ... (get additional data)
+// WebPIDecoder* idec = WebPINewDecoder(&output_buffer);
+// while (additional_data_is_available) {
+// // ... (get additional data in some new_data[] buffer)
// status = WebPIAppend(idec, new_data, new_data_size);
-// if (status != VP8_STATUS_SUSPENDED ||
-// break;
+// if (status != VP8_STATUS_OK && status != VP8_STATUS_SUSPENDED) {
+// break; // an error occurred.
// }
//
// // The above call decodes the current available buffer.
-// // Part of the image can now be refreshed by calling to
+// // Part of the image can now be refreshed by calling
// // WebPIDecGetRGB()/WebPIDecGetYUVA() etc.
// }
// WebPIDelete(idec);
diff --git a/thirdparty/libwebp/webp/encode.h b/thirdparty/libwebp/webp/encode.h
index 9291b7195c..b65e27e7fd 100644
--- a/thirdparty/libwebp/webp/encode.h
+++ b/thirdparty/libwebp/webp/encode.h
@@ -481,10 +481,10 @@ WEBP_EXTERN(int) WebPPictureARGBToYUVADithered(
WEBP_EXTERN(int) WebPPictureSmartARGBToYUVA(WebPPicture* picture);
// Converts picture->yuv to picture->argb and sets picture->use_argb to true.
-// The input format must be YUV_420 or YUV_420A.
-// Note that the use of this method is discouraged if one has access to the
-// raw ARGB samples, since using YUV420 is comparatively lossy. Also, the
-// conversion from YUV420 to ARGB incurs a small loss too.
+// The input format must be YUV_420 or YUV_420A. The conversion from YUV420 to
+// ARGB incurs a small loss too.
+// Note that the use of this colorspace is discouraged if one has access to the
+// raw ARGB samples, since using YUV420 is comparatively lossy.
// Returns false in case of error.
WEBP_EXTERN(int) WebPPictureYUVAToARGB(WebPPicture* picture);