summaryrefslogtreecommitdiff
path: root/thirdparty/libwebp/src/enc
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/libwebp/src/enc')
-rw-r--r--thirdparty/libwebp/src/enc/analysis_enc.c2
-rw-r--r--thirdparty/libwebp/src/enc/backward_references_cost_enc.c2
-rw-r--r--thirdparty/libwebp/src/enc/backward_references_enc.c1
-rw-r--r--thirdparty/libwebp/src/enc/cost_enc.h2
-rw-r--r--thirdparty/libwebp/src/enc/delta_palettization_enc.c455
-rw-r--r--thirdparty/libwebp/src/enc/delta_palettization_enc.h25
-rw-r--r--thirdparty/libwebp/src/enc/histogram_enc.c196
-rw-r--r--thirdparty/libwebp/src/enc/histogram_enc.h10
-rw-r--r--thirdparty/libwebp/src/enc/iterator_enc.c2
-rw-r--r--thirdparty/libwebp/src/enc/picture_tools_enc.c41
-rw-r--r--thirdparty/libwebp/src/enc/vp8i_enc.h6
-rw-r--r--thirdparty/libwebp/src/enc/vp8l_enc.c17
-rw-r--r--thirdparty/libwebp/src/enc/vp8li_enc.h2
13 files changed, 195 insertions, 566 deletions
diff --git a/thirdparty/libwebp/src/enc/analysis_enc.c b/thirdparty/libwebp/src/enc/analysis_enc.c
index a47ff7d4e8..687757ae03 100644
--- a/thirdparty/libwebp/src/enc/analysis_enc.c
+++ b/thirdparty/libwebp/src/enc/analysis_enc.c
@@ -458,7 +458,7 @@ static void MergeJobs(const SegmentJob* const src, SegmentJob* const dst) {
dst->uv_alpha += src->uv_alpha;
}
-// initialize the job struct with some TODOs
+// initialize the job struct with some tasks to perform
static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job,
int start_row, int end_row) {
WebPGetWorkerInterface()->Init(&job->worker);
diff --git a/thirdparty/libwebp/src/enc/backward_references_cost_enc.c b/thirdparty/libwebp/src/enc/backward_references_cost_enc.c
index 7175496c7f..516abd73eb 100644
--- a/thirdparty/libwebp/src/enc/backward_references_cost_enc.c
+++ b/thirdparty/libwebp/src/enc/backward_references_cost_enc.c
@@ -67,7 +67,7 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
// The following code is similar to VP8LHistogramCreate but converts the
// distance to plane code.
- VP8LHistogramInit(histo, cache_bits);
+ VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 1);
while (VP8LRefsCursorOk(&c)) {
VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos, VP8LDistanceToPlaneCode,
xsize);
diff --git a/thirdparty/libwebp/src/enc/backward_references_enc.c b/thirdparty/libwebp/src/enc/backward_references_enc.c
index 39230188b9..3ab7b0ac7d 100644
--- a/thirdparty/libwebp/src/enc/backward_references_enc.c
+++ b/thirdparty/libwebp/src/enc/backward_references_enc.c
@@ -715,6 +715,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
for (i = 0; i <= cache_bits_max; ++i) {
histos[i] = VP8LAllocateHistogram(i);
if (histos[i] == NULL) goto Error;
+ VP8LHistogramInit(histos[i], i, /*init_arrays=*/ 1);
if (i == 0) continue;
cc_init[i] = VP8LColorCacheInit(&hashers[i], i);
if (!cc_init[i]) goto Error;
diff --git a/thirdparty/libwebp/src/enc/cost_enc.h b/thirdparty/libwebp/src/enc/cost_enc.h
index bdce1e6a3b..a4b177b342 100644
--- a/thirdparty/libwebp/src/enc/cost_enc.h
+++ b/thirdparty/libwebp/src/enc/cost_enc.h
@@ -79,4 +79,4 @@ extern const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES];
} // extern "C"
#endif
-#endif /* WEBP_ENC_COST_ENC_H_ */
+#endif // WEBP_ENC_COST_ENC_H_
diff --git a/thirdparty/libwebp/src/enc/delta_palettization_enc.c b/thirdparty/libwebp/src/enc/delta_palettization_enc.c
deleted file mode 100644
index a61c8e6c93..0000000000
--- a/thirdparty/libwebp/src/enc/delta_palettization_enc.c
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
-// -----------------------------------------------------------------------------
-//
-// Author: Mislav Bradac (mislavm@google.com)
-//
-
-#include "src/enc/delta_palettization_enc.h"
-
-#ifdef WEBP_EXPERIMENTAL_FEATURES
-#include "src/webp/types.h"
-#include "src/dsp/lossless.h"
-
-#define MK_COL(r, g, b) (((r) << 16) + ((g) << 8) + (b))
-
-// Format allows palette up to 256 entries, but more palette entries produce
-// bigger entropy. In the future it will probably be useful to add more entries
-// that are far from the origin of the palette or choose remaining entries
-// dynamically.
-#define DELTA_PALETTE_SIZE 226
-
-// Palette used for delta_palettization. Entries are roughly sorted by distance
-// of their signed equivalents from the origin.
-static const uint32_t kDeltaPalette[DELTA_PALETTE_SIZE] = {
- MK_COL(0u, 0u, 0u),
- MK_COL(255u, 255u, 255u),
- MK_COL(1u, 1u, 1u),
- MK_COL(254u, 254u, 254u),
- MK_COL(2u, 2u, 2u),
- MK_COL(4u, 4u, 4u),
- MK_COL(252u, 252u, 252u),
- MK_COL(250u, 0u, 0u),
- MK_COL(0u, 250u, 0u),
- MK_COL(0u, 0u, 250u),
- MK_COL(6u, 0u, 0u),
- MK_COL(0u, 6u, 0u),
- MK_COL(0u, 0u, 6u),
- MK_COL(0u, 0u, 248u),
- MK_COL(0u, 0u, 8u),
- MK_COL(0u, 248u, 0u),
- MK_COL(0u, 248u, 248u),
- MK_COL(0u, 248u, 8u),
- MK_COL(0u, 8u, 0u),
- MK_COL(0u, 8u, 248u),
- MK_COL(0u, 8u, 8u),
- MK_COL(8u, 8u, 8u),
- MK_COL(248u, 0u, 0u),
- MK_COL(248u, 0u, 248u),
- MK_COL(248u, 0u, 8u),
- MK_COL(248u, 248u, 0u),
- MK_COL(248u, 8u, 0u),
- MK_COL(8u, 0u, 0u),
- MK_COL(8u, 0u, 248u),
- MK_COL(8u, 0u, 8u),
- MK_COL(8u, 248u, 0u),
- MK_COL(8u, 8u, 0u),
- MK_COL(23u, 23u, 23u),
- MK_COL(13u, 13u, 13u),
- MK_COL(232u, 232u, 232u),
- MK_COL(244u, 244u, 244u),
- MK_COL(245u, 245u, 250u),
- MK_COL(50u, 50u, 50u),
- MK_COL(204u, 204u, 204u),
- MK_COL(236u, 236u, 236u),
- MK_COL(16u, 16u, 16u),
- MK_COL(240u, 16u, 16u),
- MK_COL(16u, 240u, 16u),
- MK_COL(240u, 240u, 16u),
- MK_COL(16u, 16u, 240u),
- MK_COL(240u, 16u, 240u),
- MK_COL(16u, 240u, 240u),
- MK_COL(240u, 240u, 240u),
- MK_COL(0u, 0u, 232u),
- MK_COL(0u, 232u, 0u),
- MK_COL(232u, 0u, 0u),
- MK_COL(0u, 0u, 24u),
- MK_COL(0u, 24u, 0u),
- MK_COL(24u, 0u, 0u),
- MK_COL(32u, 32u, 32u),
- MK_COL(224u, 32u, 32u),
- MK_COL(32u, 224u, 32u),
- MK_COL(224u, 224u, 32u),
- MK_COL(32u, 32u, 224u),
- MK_COL(224u, 32u, 224u),
- MK_COL(32u, 224u, 224u),
- MK_COL(224u, 224u, 224u),
- MK_COL(0u, 0u, 176u),
- MK_COL(0u, 0u, 80u),
- MK_COL(0u, 176u, 0u),
- MK_COL(0u, 176u, 176u),
- MK_COL(0u, 176u, 80u),
- MK_COL(0u, 80u, 0u),
- MK_COL(0u, 80u, 176u),
- MK_COL(0u, 80u, 80u),
- MK_COL(176u, 0u, 0u),
- MK_COL(176u, 0u, 176u),
- MK_COL(176u, 0u, 80u),
- MK_COL(176u, 176u, 0u),
- MK_COL(176u, 80u, 0u),
- MK_COL(80u, 0u, 0u),
- MK_COL(80u, 0u, 176u),
- MK_COL(80u, 0u, 80u),
- MK_COL(80u, 176u, 0u),
- MK_COL(80u, 80u, 0u),
- MK_COL(0u, 0u, 152u),
- MK_COL(0u, 0u, 104u),
- MK_COL(0u, 152u, 0u),
- MK_COL(0u, 152u, 152u),
- MK_COL(0u, 152u, 104u),
- MK_COL(0u, 104u, 0u),
- MK_COL(0u, 104u, 152u),
- MK_COL(0u, 104u, 104u),
- MK_COL(152u, 0u, 0u),
- MK_COL(152u, 0u, 152u),
- MK_COL(152u, 0u, 104u),
- MK_COL(152u, 152u, 0u),
- MK_COL(152u, 104u, 0u),
- MK_COL(104u, 0u, 0u),
- MK_COL(104u, 0u, 152u),
- MK_COL(104u, 0u, 104u),
- MK_COL(104u, 152u, 0u),
- MK_COL(104u, 104u, 0u),
- MK_COL(216u, 216u, 216u),
- MK_COL(216u, 216u, 40u),
- MK_COL(216u, 216u, 176u),
- MK_COL(216u, 216u, 80u),
- MK_COL(216u, 40u, 216u),
- MK_COL(216u, 40u, 40u),
- MK_COL(216u, 40u, 176u),
- MK_COL(216u, 40u, 80u),
- MK_COL(216u, 176u, 216u),
- MK_COL(216u, 176u, 40u),
- MK_COL(216u, 176u, 176u),
- MK_COL(216u, 176u, 80u),
- MK_COL(216u, 80u, 216u),
- MK_COL(216u, 80u, 40u),
- MK_COL(216u, 80u, 176u),
- MK_COL(216u, 80u, 80u),
- MK_COL(40u, 216u, 216u),
- MK_COL(40u, 216u, 40u),
- MK_COL(40u, 216u, 176u),
- MK_COL(40u, 216u, 80u),
- MK_COL(40u, 40u, 216u),
- MK_COL(40u, 40u, 40u),
- MK_COL(40u, 40u, 176u),
- MK_COL(40u, 40u, 80u),
- MK_COL(40u, 176u, 216u),
- MK_COL(40u, 176u, 40u),
- MK_COL(40u, 176u, 176u),
- MK_COL(40u, 176u, 80u),
- MK_COL(40u, 80u, 216u),
- MK_COL(40u, 80u, 40u),
- MK_COL(40u, 80u, 176u),
- MK_COL(40u, 80u, 80u),
- MK_COL(80u, 216u, 216u),
- MK_COL(80u, 216u, 40u),
- MK_COL(80u, 216u, 176u),
- MK_COL(80u, 216u, 80u),
- MK_COL(80u, 40u, 216u),
- MK_COL(80u, 40u, 40u),
- MK_COL(80u, 40u, 176u),
- MK_COL(80u, 40u, 80u),
- MK_COL(80u, 176u, 216u),
- MK_COL(80u, 176u, 40u),
- MK_COL(80u, 176u, 176u),
- MK_COL(80u, 176u, 80u),
- MK_COL(80u, 80u, 216u),
- MK_COL(80u, 80u, 40u),
- MK_COL(80u, 80u, 176u),
- MK_COL(80u, 80u, 80u),
- MK_COL(0u, 0u, 192u),
- MK_COL(0u, 0u, 64u),
- MK_COL(0u, 0u, 128u),
- MK_COL(0u, 192u, 0u),
- MK_COL(0u, 192u, 192u),
- MK_COL(0u, 192u, 64u),
- MK_COL(0u, 192u, 128u),
- MK_COL(0u, 64u, 0u),
- MK_COL(0u, 64u, 192u),
- MK_COL(0u, 64u, 64u),
- MK_COL(0u, 64u, 128u),
- MK_COL(0u, 128u, 0u),
- MK_COL(0u, 128u, 192u),
- MK_COL(0u, 128u, 64u),
- MK_COL(0u, 128u, 128u),
- MK_COL(176u, 216u, 216u),
- MK_COL(176u, 216u, 40u),
- MK_COL(176u, 216u, 176u),
- MK_COL(176u, 216u, 80u),
- MK_COL(176u, 40u, 216u),
- MK_COL(176u, 40u, 40u),
- MK_COL(176u, 40u, 176u),
- MK_COL(176u, 40u, 80u),
- MK_COL(176u, 176u, 216u),
- MK_COL(176u, 176u, 40u),
- MK_COL(176u, 176u, 176u),
- MK_COL(176u, 176u, 80u),
- MK_COL(176u, 80u, 216u),
- MK_COL(176u, 80u, 40u),
- MK_COL(176u, 80u, 176u),
- MK_COL(176u, 80u, 80u),
- MK_COL(192u, 0u, 0u),
- MK_COL(192u, 0u, 192u),
- MK_COL(192u, 0u, 64u),
- MK_COL(192u, 0u, 128u),
- MK_COL(192u, 192u, 0u),
- MK_COL(192u, 192u, 192u),
- MK_COL(192u, 192u, 64u),
- MK_COL(192u, 192u, 128u),
- MK_COL(192u, 64u, 0u),
- MK_COL(192u, 64u, 192u),
- MK_COL(192u, 64u, 64u),
- MK_COL(192u, 64u, 128u),
- MK_COL(192u, 128u, 0u),
- MK_COL(192u, 128u, 192u),
- MK_COL(192u, 128u, 64u),
- MK_COL(192u, 128u, 128u),
- MK_COL(64u, 0u, 0u),
- MK_COL(64u, 0u, 192u),
- MK_COL(64u, 0u, 64u),
- MK_COL(64u, 0u, 128u),
- MK_COL(64u, 192u, 0u),
- MK_COL(64u, 192u, 192u),
- MK_COL(64u, 192u, 64u),
- MK_COL(64u, 192u, 128u),
- MK_COL(64u, 64u, 0u),
- MK_COL(64u, 64u, 192u),
- MK_COL(64u, 64u, 64u),
- MK_COL(64u, 64u, 128u),
- MK_COL(64u, 128u, 0u),
- MK_COL(64u, 128u, 192u),
- MK_COL(64u, 128u, 64u),
- MK_COL(64u, 128u, 128u),
- MK_COL(128u, 0u, 0u),
- MK_COL(128u, 0u, 192u),
- MK_COL(128u, 0u, 64u),
- MK_COL(128u, 0u, 128u),
- MK_COL(128u, 192u, 0u),
- MK_COL(128u, 192u, 192u),
- MK_COL(128u, 192u, 64u),
- MK_COL(128u, 192u, 128u),
- MK_COL(128u, 64u, 0u),
- MK_COL(128u, 64u, 192u),
- MK_COL(128u, 64u, 64u),
- MK_COL(128u, 64u, 128u),
- MK_COL(128u, 128u, 0u),
- MK_COL(128u, 128u, 192u),
- MK_COL(128u, 128u, 64u),
- MK_COL(128u, 128u, 128u),
-};
-
-#undef MK_COL
-
-//------------------------------------------------------------------------------
-// TODO(skal): move the functions to dsp/lossless.c when the correct
-// granularity is found. For now, we'll just copy-paste some useful bits
-// here instead.
-
-// In-place sum of each component with mod 256.
-static WEBP_INLINE void AddPixelsEq(uint32_t* a, uint32_t b) {
- const uint32_t alpha_and_green = (*a & 0xff00ff00u) + (b & 0xff00ff00u);
- const uint32_t red_and_blue = (*a & 0x00ff00ffu) + (b & 0x00ff00ffu);
- *a = (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu);
-}
-
-static WEBP_INLINE uint32_t Clip255(uint32_t a) {
- if (a < 256) {
- return a;
- }
- // return 0, when a is a negative integer.
- // return 255, when a is positive.
- return ~a >> 24;
-}
-
-// Delta palettization functions.
-static WEBP_INLINE int Square(int x) {
- return x * x;
-}
-
-static WEBP_INLINE uint32_t Intensity(uint32_t a) {
- return
- 30 * ((a >> 16) & 0xff) +
- 59 * ((a >> 8) & 0xff) +
- 11 * ((a >> 0) & 0xff);
-}
-
-static uint32_t CalcDist(uint32_t predicted_value, uint32_t actual_value,
- uint32_t palette_entry) {
- int i;
- uint32_t distance = 0;
- AddPixelsEq(&predicted_value, palette_entry);
- for (i = 0; i < 32; i += 8) {
- const int32_t av = (actual_value >> i) & 0xff;
- const int32_t pv = (predicted_value >> i) & 0xff;
- distance += Square(pv - av);
- }
- // We sum square of intensity difference with factor 10, but because Intensity
- // returns 100 times real intensity we need to multiply differences of colors
- // by 1000.
- distance *= 1000u;
- distance += Square(Intensity(predicted_value)
- - Intensity(actual_value));
- return distance;
-}
-
-static uint32_t Predict(int x, int y, uint32_t* image) {
- const uint32_t t = (y == 0) ? ARGB_BLACK : image[x];
- const uint32_t l = (x == 0) ? ARGB_BLACK : image[x - 1];
- const uint32_t p =
- (((((t >> 24) & 0xff) + ((l >> 24) & 0xff)) / 2) << 24) +
- (((((t >> 16) & 0xff) + ((l >> 16) & 0xff)) / 2) << 16) +
- (((((t >> 8) & 0xff) + ((l >> 8) & 0xff)) / 2) << 8) +
- (((((t >> 0) & 0xff) + ((l >> 0) & 0xff)) / 2) << 0);
- if (x == 0 && y == 0) return ARGB_BLACK;
- if (x == 0) return t;
- if (y == 0) return l;
- return p;
-}
-
-static WEBP_INLINE int AddSubtractComponentFullWithCoefficient(
- int a, int b, int c) {
- return Clip255(a + ((b - c) >> 2));
-}
-
-static WEBP_INLINE uint32_t ClampedAddSubtractFullWithCoefficient(
- uint32_t c0, uint32_t c1, uint32_t c2) {
- const int a = AddSubtractComponentFullWithCoefficient(
- c0 >> 24, c1 >> 24, c2 >> 24);
- const int r = AddSubtractComponentFullWithCoefficient((c0 >> 16) & 0xff,
- (c1 >> 16) & 0xff,
- (c2 >> 16) & 0xff);
- const int g = AddSubtractComponentFullWithCoefficient((c0 >> 8) & 0xff,
- (c1 >> 8) & 0xff,
- (c2 >> 8) & 0xff);
- const int b = AddSubtractComponentFullWithCoefficient(
- c0 & 0xff, c1 & 0xff, c2 & 0xff);
- return ((uint32_t)a << 24) | (r << 16) | (g << 8) | b;
-}
-
-//------------------------------------------------------------------------------
-
-// Find palette entry with minimum error from difference of actual pixel value
-// and predicted pixel value. Propagate error of pixel to its top and left pixel
-// in src array. Write predicted_value + palette_entry to new_image. Return
-// index of best palette entry.
-static int FindBestPaletteEntry(uint32_t src, uint32_t predicted_value,
- const uint32_t palette[], int palette_size) {
- int i;
- int idx = 0;
- uint32_t best_distance = CalcDist(predicted_value, src, palette[0]);
- for (i = 1; i < palette_size; ++i) {
- const uint32_t distance = CalcDist(predicted_value, src, palette[i]);
- if (distance < best_distance) {
- best_distance = distance;
- idx = i;
- }
- }
- return idx;
-}
-
-static void ApplyBestPaletteEntry(int x, int y,
- uint32_t new_value, uint32_t palette_value,
- uint32_t* src, int src_stride,
- uint32_t* new_image) {
- AddPixelsEq(&new_value, palette_value);
- if (x > 0) {
- src[x - 1] = ClampedAddSubtractFullWithCoefficient(src[x - 1],
- new_value, src[x]);
- }
- if (y > 0) {
- src[x - src_stride] =
- ClampedAddSubtractFullWithCoefficient(src[x - src_stride],
- new_value, src[x]);
- }
- new_image[x] = new_value;
-}
-
-//------------------------------------------------------------------------------
-// Main entry point
-
-static WebPEncodingError ApplyDeltaPalette(uint32_t* src, uint32_t* dst,
- uint32_t src_stride,
- uint32_t dst_stride,
- const uint32_t* palette,
- int palette_size,
- int width, int height,
- int num_passes) {
- int x, y;
- WebPEncodingError err = VP8_ENC_OK;
- uint32_t* new_image = (uint32_t*)WebPSafeMalloc(width, sizeof(*new_image));
- uint8_t* const tmp_row = (uint8_t*)WebPSafeMalloc(width, sizeof(*tmp_row));
- if (new_image == NULL || tmp_row == NULL) {
- err = VP8_ENC_ERROR_OUT_OF_MEMORY;
- goto Error;
- }
-
- while (num_passes--) {
- uint32_t* cur_src = src;
- uint32_t* cur_dst = dst;
- for (y = 0; y < height; ++y) {
- for (x = 0; x < width; ++x) {
- const uint32_t predicted_value = Predict(x, y, new_image);
- tmp_row[x] = FindBestPaletteEntry(cur_src[x], predicted_value,
- palette, palette_size);
- ApplyBestPaletteEntry(x, y, predicted_value, palette[tmp_row[x]],
- cur_src, src_stride, new_image);
- }
- for (x = 0; x < width; ++x) {
- cur_dst[x] = palette[tmp_row[x]];
- }
- cur_src += src_stride;
- cur_dst += dst_stride;
- }
- }
- Error:
- WebPSafeFree(new_image);
- WebPSafeFree(tmp_row);
- return err;
-}
-
-// replaces enc->argb_ by a palettizable approximation of it,
-// and generates optimal enc->palette_[]
-WebPEncodingError WebPSearchOptimalDeltaPalette(VP8LEncoder* const enc) {
- const WebPPicture* const pic = enc->pic_;
- uint32_t* src = pic->argb;
- uint32_t* dst = enc->argb_;
- const int width = pic->width;
- const int height = pic->height;
-
- WebPEncodingError err = VP8_ENC_OK;
- memcpy(enc->palette_, kDeltaPalette, sizeof(kDeltaPalette));
- enc->palette_[DELTA_PALETTE_SIZE - 1] = src[0] - 0xff000000u;
- enc->palette_size_ = DELTA_PALETTE_SIZE;
- err = ApplyDeltaPalette(src, dst, pic->argb_stride, enc->current_width_,
- enc->palette_, enc->palette_size_,
- width, height, 2);
- if (err != VP8_ENC_OK) goto Error;
-
- Error:
- return err;
-}
-
-#else // !WEBP_EXPERIMENTAL_FEATURES
-
-WebPEncodingError WebPSearchOptimalDeltaPalette(VP8LEncoder* const enc) {
- (void)enc;
- return VP8_ENC_ERROR_INVALID_CONFIGURATION;
-}
-
-#endif // WEBP_EXPERIMENTAL_FEATURES
diff --git a/thirdparty/libwebp/src/enc/delta_palettization_enc.h b/thirdparty/libwebp/src/enc/delta_palettization_enc.h
deleted file mode 100644
index b15e2cd487..0000000000
--- a/thirdparty/libwebp/src/enc/delta_palettization_enc.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
-// -----------------------------------------------------------------------------
-//
-// Author: Mislav Bradac (mislavm@google.com)
-//
-
-#ifndef WEBP_ENC_DELTA_PALETTIZATION_ENC_H_
-#define WEBP_ENC_DELTA_PALETTIZATION_ENC_H_
-
-#include "src/webp/encode.h"
-#include "src/enc/vp8li_enc.h"
-
-// Replaces enc->argb_[] input by a palettizable approximation of it,
-// and generates optimal enc->palette_[].
-// This function can revert enc->use_palette_ / enc->use_predict_ flag
-// if delta-palettization is not producing expected saving.
-WebPEncodingError WebPSearchOptimalDeltaPalette(VP8LEncoder* const enc);
-
-#endif // WEBP_ENC_DELTA_PALETTIZATION_ENC_H_
diff --git a/thirdparty/libwebp/src/enc/histogram_enc.c b/thirdparty/libwebp/src/enc/histogram_enc.c
index 9fdbc627a1..4e49e0a201 100644
--- a/thirdparty/libwebp/src/enc/histogram_enc.c
+++ b/thirdparty/libwebp/src/enc/histogram_enc.c
@@ -51,10 +51,12 @@ static void HistogramCopy(const VP8LHistogram* const src,
VP8LHistogram* const dst) {
uint32_t* const dst_literal = dst->literal_;
const int dst_cache_bits = dst->palette_code_bits_;
+ const int literal_size = VP8LHistogramNumCodes(dst_cache_bits);
const int histo_size = VP8LGetHistogramSize(dst_cache_bits);
assert(src->palette_code_bits_ == dst_cache_bits);
memcpy(dst, src, histo_size);
dst->literal_ = dst_literal;
+ memcpy(dst->literal_, src->literal_, literal_size * sizeof(*dst->literal_));
}
int VP8LGetHistogramSize(int cache_bits) {
@@ -91,9 +93,19 @@ void VP8LHistogramCreate(VP8LHistogram* const p,
VP8LHistogramStoreRefs(refs, p);
}
-void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits) {
+void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits,
+ int init_arrays) {
p->palette_code_bits_ = palette_code_bits;
- HistogramClear(p);
+ if (init_arrays) {
+ HistogramClear(p);
+ } else {
+ p->trivial_symbol_ = 0;
+ p->bit_cost_ = 0.;
+ p->literal_cost_ = 0.;
+ p->red_cost_ = 0.;
+ p->blue_cost_ = 0.;
+ memset(p->is_used_, 0, sizeof(p->is_used_));
+ }
}
VP8LHistogram* VP8LAllocateHistogram(int cache_bits) {
@@ -104,37 +116,70 @@ VP8LHistogram* VP8LAllocateHistogram(int cache_bits) {
histo = (VP8LHistogram*)memory;
// literal_ won't necessary be aligned.
histo->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
- VP8LHistogramInit(histo, cache_bits);
+ VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 0);
return histo;
}
+// Resets the pointers of the histograms to point to the bit buffer in the set.
+static void HistogramSetResetPointers(VP8LHistogramSet* const set,
+ int cache_bits) {
+ int i;
+ const int histo_size = VP8LGetHistogramSize(cache_bits);
+ uint8_t* memory = (uint8_t*) (set->histograms);
+ memory += set->max_size * sizeof(*set->histograms);
+ for (i = 0; i < set->max_size; ++i) {
+ memory = (uint8_t*) WEBP_ALIGN(memory);
+ set->histograms[i] = (VP8LHistogram*) memory;
+ // literal_ won't necessary be aligned.
+ set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
+ memory += histo_size;
+ }
+}
+
+// Returns the total size of the VP8LHistogramSet.
+static size_t HistogramSetTotalSize(int size, int cache_bits) {
+ const int histo_size = VP8LGetHistogramSize(cache_bits);
+ return (sizeof(VP8LHistogramSet) + size * (sizeof(VP8LHistogram*) +
+ histo_size + WEBP_ALIGN_CST));
+}
+
VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) {
int i;
VP8LHistogramSet* set;
- const int histo_size = VP8LGetHistogramSize(cache_bits);
- const size_t total_size =
- sizeof(*set) + size * (sizeof(*set->histograms) +
- histo_size + WEBP_ALIGN_CST);
+ const size_t total_size = HistogramSetTotalSize(size, cache_bits);
uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));
if (memory == NULL) return NULL;
set = (VP8LHistogramSet*)memory;
memory += sizeof(*set);
set->histograms = (VP8LHistogram**)memory;
- memory += size * sizeof(*set->histograms);
set->max_size = size;
set->size = size;
+ HistogramSetResetPointers(set, cache_bits);
for (i = 0; i < size; ++i) {
- memory = (uint8_t*)WEBP_ALIGN(memory);
- set->histograms[i] = (VP8LHistogram*)memory;
- // literal_ won't necessary be aligned.
- set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
- VP8LHistogramInit(set->histograms[i], cache_bits);
- memory += histo_size;
+ VP8LHistogramInit(set->histograms[i], cache_bits, /*init_arrays=*/ 0);
}
return set;
}
+void VP8LHistogramSetClear(VP8LHistogramSet* const set) {
+ int i;
+ const int cache_bits = set->histograms[0]->palette_code_bits_;
+ const int size = set->size;
+ const size_t total_size = HistogramSetTotalSize(size, cache_bits);
+ uint8_t* memory = (uint8_t*)set;
+
+ memset(memory, 0, total_size);
+ memory += sizeof(*set);
+ set->histograms = (VP8LHistogram**)memory;
+ set->max_size = size;
+ set->size = size;
+ HistogramSetResetPointers(set, cache_bits);
+ for (i = 0; i < size; ++i) {
+ set->histograms[i]->palette_code_bits_ = cache_bits;
+ }
+}
+
// -----------------------------------------------------------------------------
void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
@@ -237,7 +282,8 @@ static double FinalHuffmanCost(const VP8LStreaks* const stats) {
// Get the symbol entropy for the distribution 'population'.
// Set 'trivial_sym', if there's only one symbol present in the distribution.
static double PopulationCost(const uint32_t* const population, int length,
- uint32_t* const trivial_sym) {
+ uint32_t* const trivial_sym,
+ uint8_t* const is_used) {
VP8LBitEntropy bit_entropy;
VP8LStreaks stats;
VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);
@@ -245,6 +291,8 @@ static double PopulationCost(const uint32_t* const population, int length,
*trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code
: VP8L_NON_TRIVIAL_SYM;
}
+ // The histogram is used if there is at least one non-zero streak.
+ *is_used = (stats.streaks[1][0] != 0 || stats.streaks[1][1] != 0);
return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
}
@@ -253,7 +301,9 @@ static double PopulationCost(const uint32_t* const population, int length,
// non-zero: both the zero-th one, or both the last one.
static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
const uint32_t* const Y,
- int length, int trivial_at_end) {
+ int length, int is_X_used,
+ int is_Y_used,
+ int trivial_at_end) {
VP8LStreaks stats;
if (trivial_at_end) {
// This configuration is due to palettization that transforms an indexed
@@ -262,28 +312,43 @@ static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
// Only FinalHuffmanCost needs to be evaluated.
memset(&stats, 0, sizeof(stats));
// Deal with the non-zero value at index 0 or length-1.
- stats.streaks[1][0] += 1;
+ stats.streaks[1][0] = 1;
// Deal with the following/previous zero streak.
- stats.counts[0] += 1;
- stats.streaks[0][1] += length - 1;
+ stats.counts[0] = 1;
+ stats.streaks[0][1] = length - 1;
return FinalHuffmanCost(&stats);
} else {
VP8LBitEntropy bit_entropy;
- VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);
+ if (is_X_used) {
+ if (is_Y_used) {
+ VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);
+ } else {
+ VP8LGetEntropyUnrefined(X, length, &bit_entropy, &stats);
+ }
+ } else {
+ if (is_Y_used) {
+ VP8LGetEntropyUnrefined(Y, length, &bit_entropy, &stats);
+ } else {
+ memset(&stats, 0, sizeof(stats));
+ stats.counts[0] = 1;
+ stats.streaks[0][length > 3] = length;
+ VP8LBitEntropyInit(&bit_entropy);
+ }
+ }
return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
}
}
// Estimates the Entropy + Huffman + other block overhead size cost.
-double VP8LHistogramEstimateBits(const VP8LHistogram* const p) {
+double VP8LHistogramEstimateBits(VP8LHistogram* const p) {
return
- PopulationCost(
- p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), NULL)
- + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL)
- + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL)
- + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL)
- + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL)
+ PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_),
+ NULL, &p->is_used_[0])
+ + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL, &p->is_used_[1])
+ + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL, &p->is_used_[2])
+ + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL, &p->is_used_[3])
+ + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL, &p->is_used_[4])
+ VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES)
+ VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES);
}
@@ -299,7 +364,8 @@ static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
int trivial_at_end = 0;
assert(a->palette_code_bits_ == b->palette_code_bits_);
*cost += GetCombinedEntropy(a->literal_, b->literal_,
- VP8LHistogramNumCodes(palette_code_bits), 0);
+ VP8LHistogramNumCodes(palette_code_bits),
+ a->is_used_[0], b->is_used_[0], 0);
*cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES,
b->literal_ + NUM_LITERAL_CODES,
NUM_LENGTH_CODES);
@@ -319,19 +385,23 @@ static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
}
*cost +=
- GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES, trivial_at_end);
+ GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES, a->is_used_[1],
+ b->is_used_[1], trivial_at_end);
if (*cost > cost_threshold) return 0;
*cost +=
- GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES, trivial_at_end);
+ GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES, a->is_used_[2],
+ b->is_used_[2], trivial_at_end);
if (*cost > cost_threshold) return 0;
- *cost += GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES,
- trivial_at_end);
+ *cost +=
+ GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES,
+ a->is_used_[3], b->is_used_[3], trivial_at_end);
if (*cost > cost_threshold) return 0;
*cost +=
- GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES, 0);
+ GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES,
+ a->is_used_[4], b->is_used_[4], 0);
*cost +=
VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES);
if (*cost > cost_threshold) return 0;
@@ -419,16 +489,19 @@ static void UpdateDominantCostRange(
static void UpdateHistogramCost(VP8LHistogram* const h) {
uint32_t alpha_sym, red_sym, blue_sym;
const double alpha_cost =
- PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym);
+ PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym,
+ &h->is_used_[3]);
const double distance_cost =
- PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL) +
+ PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) +
VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
- h->literal_cost_ = PopulationCost(h->literal_, num_codes, NULL) +
- VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES,
- NUM_LENGTH_CODES);
- h->red_cost_ = PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym);
- h->blue_cost_ = PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym);
+ h->literal_cost_ =
+ PopulationCost(h->literal_, num_codes, NULL, &h->is_used_[0]) +
+ VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES);
+ h->red_cost_ =
+ PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym, &h->is_used_[1]);
+ h->blue_cost_ =
+ PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym, &h->is_used_[2]);
h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ +
alpha_cost + distance_cost;
if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) {
@@ -473,6 +546,7 @@ static void HistogramBuild(
VP8LHistogram** const histograms = image_histo->histograms;
VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs);
assert(histo_bits > 0);
+ VP8LHistogramSetClear(image_histo);
while (VP8LRefsCursorOk(&c)) {
const PixOrCopy* const v = c.cur_pos;
const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits);
@@ -493,11 +567,19 @@ static void HistogramCopyAndAnalyze(
const int histo_size = orig_histo->size;
VP8LHistogram** const orig_histograms = orig_histo->histograms;
VP8LHistogram** const histograms = image_histo->histograms;
+ image_histo->size = 0;
for (i = 0; i < histo_size; ++i) {
VP8LHistogram* const histo = orig_histograms[i];
UpdateHistogramCost(histo);
+
+ // Skip the histogram if it is completely empty, which can happen for tiles
+ // with no information (when they are skipped because of LZ77).
+ if (!histo->is_used_[0] && !histo->is_used_[1] && !histo->is_used_[2]
+ && !histo->is_used_[3] && !histo->is_used_[4]) {
+ continue;
+ }
// Copy histograms from orig_histo[] to image_histo[].
- HistogramCopy(histo, histograms[i]);
+ HistogramCopy(histo, histograms[image_histo->size++]);
}
}
@@ -674,6 +756,18 @@ static void HistoQueueUpdateHead(HistoQueue* const histo_queue,
}
}
+// Update the cost diff and combo of a pair of histograms. This needs to be
+// called when the the histograms have been merged with a third one.
+static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
+ const VP8LHistogram* const h2,
+ double threshold,
+ HistogramPair* const pair) {
+ const double sum_cost = h1->bit_cost_ + h2->bit_cost_;
+ pair->cost_combo = 0.;
+ GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo);
+ pair->cost_diff = pair->cost_combo - sum_cost;
+}
+
// Create a pair from indices "idx1" and "idx2" provided its cost
// is inferior to "threshold", a negative entropy.
// It returns the cost of the pair, or 0. if it superior to threshold.
@@ -683,7 +777,6 @@ static double HistoQueuePush(HistoQueue* const histo_queue,
const VP8LHistogram* h1;
const VP8LHistogram* h2;
HistogramPair pair;
- double sum_cost;
assert(threshold <= 0.);
if (idx1 > idx2) {
@@ -695,10 +788,8 @@ static double HistoQueuePush(HistoQueue* const histo_queue,
pair.idx2 = idx2;
h1 = histograms[idx1];
h2 = histograms[idx2];
- sum_cost = h1->bit_cost_ + h2->bit_cost_;
- pair.cost_combo = 0.;
- GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair.cost_combo);
- pair.cost_diff = pair.cost_combo - sum_cost;
+
+ HistoQueueUpdatePair(h1, h2, threshold, &pair);
// Do not even consider the pair if it does not improve the entropy.
if (pair.cost_diff >= threshold) return 0.;
@@ -891,8 +982,7 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
}
if (do_eval) {
// Re-evaluate the cost of an updated pair.
- GetCombinedHistogramEntropy(histograms[p->idx1], histograms[p->idx2], 0,
- &p->cost_diff);
+ HistoQueueUpdatePair(histograms[p->idx1], histograms[p->idx2], 0., p);
if (p->cost_diff >= 0.) {
HistoQueuePopPair(&histo_queue, p);
continue;
@@ -987,8 +1077,7 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
// histograms of small sizes (as bin_map will be very sparse) and
// maximum quality q==100 (to preserve the compression gains at that level).
const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE;
- const int entropy_combine =
- (orig_histo->size > entropy_combine_num_bins * 2) && (quality < 100);
+ int entropy_combine;
if (orig_histo == NULL) goto Error;
@@ -996,15 +1085,16 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
HistogramBuild(xsize, histo_bits, refs, orig_histo);
// Copies the histograms and computes its bit_cost.
HistogramCopyAndAnalyze(orig_histo, image_histo);
-
+ entropy_combine =
+ (image_histo->size > entropy_combine_num_bins * 2) && (quality < 100);
if (entropy_combine) {
- const int bin_map_size = orig_histo->size;
+ const int bin_map_size = image_histo->size;
// Reuse histogram_symbols storage. By definition, it's guaranteed to be ok.
uint16_t* const bin_map = histogram_symbols;
const double combine_cost_factor =
GetCombineCostFactor(image_histo_raw_size, quality);
- HistogramAnalyzeEntropyBin(orig_histo, bin_map, low_effort);
+ HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort);
// Collapse histograms with similar entropy.
HistogramCombineEntropyBin(image_histo, tmp_histo, bin_map, bin_map_size,
entropy_combine_num_bins, combine_cost_factor,
diff --git a/thirdparty/libwebp/src/enc/histogram_enc.h b/thirdparty/libwebp/src/enc/histogram_enc.h
index e8c4c83f6f..54c2d21783 100644
--- a/thirdparty/libwebp/src/enc/histogram_enc.h
+++ b/thirdparty/libwebp/src/enc/histogram_enc.h
@@ -44,6 +44,7 @@ typedef struct {
double literal_cost_; // Cached values of dominant entropy costs:
double red_cost_; // literal, red & blue.
double blue_cost_;
+ uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance
} VP8LHistogram;
// Collection of histograms with fixed capacity, allocated as one
@@ -67,7 +68,9 @@ void VP8LHistogramCreate(VP8LHistogram* const p,
int VP8LGetHistogramSize(int palette_code_bits);
// Set the palette_code_bits and reset the stats.
-void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits);
+// If init_arrays is true, the arrays are also filled with 0's.
+void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits,
+ int init_arrays);
// Collect all the references into a histogram (without reset)
void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs,
@@ -83,6 +86,9 @@ void VP8LFreeHistogramSet(VP8LHistogramSet* const histo);
// using 'cache_bits'. Return NULL in case of memory error.
VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits);
+// Set the histograms in set to 0.
+void VP8LHistogramSetClear(VP8LHistogramSet* const set);
+
// Allocate and initialize histogram object with specified 'cache_bits'.
// Returns NULL in case of memory error.
// Special case of VP8LAllocateHistogramSet, with size equals 1.
@@ -113,7 +119,7 @@ double VP8LBitsEntropy(const uint32_t* const array, int n);
// Estimate how many bits the combined entropy of literals and distance
// approximately maps to.
-double VP8LHistogramEstimateBits(const VP8LHistogram* const p);
+double VP8LHistogramEstimateBits(VP8LHistogram* const p);
#ifdef __cplusplus
}
diff --git a/thirdparty/libwebp/src/enc/iterator_enc.c b/thirdparty/libwebp/src/enc/iterator_enc.c
index 7c47d51272..29f91d8315 100644
--- a/thirdparty/libwebp/src/enc/iterator_enc.c
+++ b/thirdparty/libwebp/src/enc/iterator_enc.c
@@ -128,7 +128,7 @@ static void ImportLine(const uint8_t* src, int src_stride,
for (; i < total_len; ++i) dst[i] = dst[len - 1];
}
-void VP8IteratorImport(VP8EncIterator* const it, uint8_t* tmp_32) {
+void VP8IteratorImport(VP8EncIterator* const it, uint8_t* const tmp_32) {
const VP8Encoder* const enc = it->enc_;
const int x = it->x_, y = it->y_;
const WebPPicture* const pic = enc->pic_;
diff --git a/thirdparty/libwebp/src/enc/picture_tools_enc.c b/thirdparty/libwebp/src/enc/picture_tools_enc.c
index be292d4391..d0e8a495da 100644
--- a/thirdparty/libwebp/src/enc/picture_tools_enc.c
+++ b/thirdparty/libwebp/src/enc/picture_tools_enc.c
@@ -16,10 +16,6 @@
#include "src/enc/vp8i_enc.h"
#include "src/dsp/yuv.h"
-static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
- return (0xff000000u | (r << 16) | (g << 8) | b);
-}
-
//------------------------------------------------------------------------------
// Helper: clean up fully transparent area to help compressibility.
@@ -195,6 +191,10 @@ void WebPCleanupTransparentAreaLossless(WebPPicture* const pic) {
#define BLEND_10BIT(V0, V1, ALPHA) \
((((V0) * (1020 - (ALPHA)) + (V1) * (ALPHA)) * 0x101 + 1024) >> 18)
+static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
+ return (0xff000000u | (r << 16) | (g << 8) | b);
+}
+
void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
const int red = (background_rgb >> 16) & 0xff;
const int green = (background_rgb >> 8) & 0xff;
@@ -208,39 +208,44 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
- if (!has_alpha || pic->a == NULL) return; // nothing to do
+ uint8_t* y_ptr = pic->y;
+ uint8_t* u_ptr = pic->u;
+ uint8_t* v_ptr = pic->v;
+ uint8_t* a_ptr = pic->a;
+ if (!has_alpha || a_ptr == NULL) return; // nothing to do
for (y = 0; y < pic->height; ++y) {
// Luma blending
- uint8_t* const y_ptr = pic->y + y * pic->y_stride;
- uint8_t* const a_ptr = pic->a + y * pic->a_stride;
for (x = 0; x < pic->width; ++x) {
- const int alpha = a_ptr[x];
+ const uint8_t alpha = a_ptr[x];
if (alpha < 0xff) {
- y_ptr[x] = BLEND(Y0, y_ptr[x], a_ptr[x]);
+ y_ptr[x] = BLEND(Y0, y_ptr[x], alpha);
}
}
// Chroma blending every even line
if ((y & 1) == 0) {
- uint8_t* const u = pic->u + (y >> 1) * pic->uv_stride;
- uint8_t* const v = pic->v + (y >> 1) * pic->uv_stride;
uint8_t* const a_ptr2 =
(y + 1 == pic->height) ? a_ptr : a_ptr + pic->a_stride;
for (x = 0; x < uv_width; ++x) {
// Average four alpha values into a single blending weight.
// TODO(skal): might lead to visible contouring. Can we do better?
- const int alpha =
+ const uint32_t alpha =
a_ptr[2 * x + 0] + a_ptr[2 * x + 1] +
a_ptr2[2 * x + 0] + a_ptr2[2 * x + 1];
- u[x] = BLEND_10BIT(U0, u[x], alpha);
- v[x] = BLEND_10BIT(V0, v[x], alpha);
+ u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
+ v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
}
if (pic->width & 1) { // rightmost pixel
- const int alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]);
- u[x] = BLEND_10BIT(U0, u[x], alpha);
- v[x] = BLEND_10BIT(V0, v[x], alpha);
+ const uint32_t alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]);
+ u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
+ v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
}
+ } else {
+ u_ptr += pic->uv_stride;
+ v_ptr += pic->uv_stride;
}
- memset(a_ptr, 0xff, pic->width);
+ memset(a_ptr, 0xff, pic->width); // reset alpha value to opaque
+ a_ptr += pic->a_stride;
+ y_ptr += pic->y_stride;
}
} else {
uint32_t* argb = pic->argb;
diff --git a/thirdparty/libwebp/src/enc/vp8i_enc.h b/thirdparty/libwebp/src/enc/vp8i_enc.h
index 624e8f8e66..92439febb8 100644
--- a/thirdparty/libwebp/src/enc/vp8i_enc.h
+++ b/thirdparty/libwebp/src/enc/vp8i_enc.h
@@ -32,7 +32,7 @@ extern "C" {
// version numbers
#define ENC_MAJ_VERSION 1
#define ENC_MIN_VERSION 0
-#define ENC_REV_VERSION 0
+#define ENC_REV_VERSION 1
enum { MAX_LF_LEVELS = 64, // Maximum loop filter level
MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost
@@ -278,7 +278,7 @@ int VP8IteratorIsDone(const VP8EncIterator* const it);
// Import uncompressed samples from source.
// If tmp_32 is not NULL, import boundary samples too.
// tmp_32 is a 32-bytes scratch buffer that must be aligned in memory.
-void VP8IteratorImport(VP8EncIterator* const it, uint8_t* tmp_32);
+void VP8IteratorImport(VP8EncIterator* const it, uint8_t* const tmp_32);
// export decimated samples
void VP8IteratorExport(const VP8EncIterator* const it);
// go to next macroblock. Returns false if not finished.
@@ -515,4 +515,4 @@ void WebPCleanupTransparentAreaLossless(WebPPicture* const pic);
} // extern "C"
#endif
-#endif /* WEBP_ENC_VP8I_ENC_H_ */
+#endif // WEBP_ENC_VP8I_ENC_H_
diff --git a/thirdparty/libwebp/src/enc/vp8l_enc.c b/thirdparty/libwebp/src/enc/vp8l_enc.c
index a89184eb08..2713edcd95 100644
--- a/thirdparty/libwebp/src/enc/vp8l_enc.c
+++ b/thirdparty/libwebp/src/enc/vp8l_enc.c
@@ -809,6 +809,7 @@ static WebPEncodingError EncodeImageNoHuffman(VP8LBitWriter* const bw,
err = VP8_ENC_ERROR_OUT_OF_MEMORY;
goto Error;
}
+ VP8LHistogramSetClear(histogram_image);
// Build histogram image and symbols from backward references.
VP8LHistogramStoreRefs(refs, histogram_image->histograms[0]);
@@ -1248,14 +1249,20 @@ static WebPEncodingError MakeInputImageCopy(VP8LEncoder* const enc) {
const WebPPicture* const picture = enc->pic_;
const int width = picture->width;
const int height = picture->height;
- int y;
+
err = AllocateTransformBuffer(enc, width, height);
if (err != VP8_ENC_OK) return err;
if (enc->argb_content_ == kEncoderARGB) return VP8_ENC_OK;
- for (y = 0; y < height; ++y) {
- memcpy(enc->argb_ + y * width,
- picture->argb + y * picture->argb_stride,
- width * sizeof(*enc->argb_));
+
+ {
+ uint32_t* dst = enc->argb_;
+ const uint32_t* src = picture->argb;
+ int y;
+ for (y = 0; y < height; ++y) {
+ memcpy(dst, src, width * sizeof(*dst));
+ dst += width;
+ src += picture->argb_stride;
+ }
}
enc->argb_content_ = kEncoderARGB;
assert(enc->current_width_ == width);
diff --git a/thirdparty/libwebp/src/enc/vp8li_enc.h b/thirdparty/libwebp/src/enc/vp8li_enc.h
index 298a4a0014..d2d0fc509c 100644
--- a/thirdparty/libwebp/src/enc/vp8li_enc.h
+++ b/thirdparty/libwebp/src/enc/vp8li_enc.h
@@ -115,4 +115,4 @@ void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
} // extern "C"
#endif
-#endif /* WEBP_ENC_VP8LI_ENC_H_ */
+#endif // WEBP_ENC_VP8LI_ENC_H_