summaryrefslogtreecommitdiff
path: root/drivers/webp/dsp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/webp/dsp')
-rw-r--r--drivers/webp/dsp/cpu.c15
-rw-r--r--drivers/webp/dsp/dec.c54
-rw-r--r--drivers/webp/dsp/dec_neon.c160
-rw-r--r--drivers/webp/dsp/dec_sse2.c129
-rw-r--r--drivers/webp/dsp/dsp.h42
-rw-r--r--drivers/webp/dsp/enc.c188
-rw-r--r--drivers/webp/dsp/enc_neon.c632
-rw-r--r--drivers/webp/dsp/enc_sse2.c396
-rw-r--r--drivers/webp/dsp/lossless.c654
-rw-r--r--drivers/webp/dsp/lossless.h158
-rw-r--r--drivers/webp/dsp/upsampling.c43
-rw-r--r--drivers/webp/dsp/upsampling_neon.c265
-rw-r--r--drivers/webp/dsp/upsampling_sse2.c139
-rw-r--r--drivers/webp/dsp/yuv.c185
-rw-r--r--drivers/webp/dsp/yuv.h285
15 files changed, 617 insertions, 2728 deletions
diff --git a/drivers/webp/dsp/cpu.c b/drivers/webp/dsp/cpu.c
index 7a1f417a55..0228734457 100644
--- a/drivers/webp/dsp/cpu.c
+++ b/drivers/webp/dsp/cpu.c
@@ -1,10 +1,8 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// CPU detection
@@ -17,6 +15,10 @@
#include <cpu-features.h>
#endif
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
//------------------------------------------------------------------------------
// SSE2 detection.
//
@@ -78,3 +80,6 @@ VP8CPUInfo VP8GetCPUInfo = armCPUInfo;
VP8CPUInfo VP8GetCPUInfo = NULL;
#endif
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/drivers/webp/dsp/dec.c b/drivers/webp/dsp/dec.c
index 8b246fad0a..9ae7b6fa76 100644
--- a/drivers/webp/dsp/dec.c
+++ b/drivers/webp/dsp/dec.c
@@ -1,10 +1,8 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// Speed-critical decoding functions.
@@ -14,6 +12,10 @@
#include "./dsp.h"
#include "../dec/vp8i.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
//------------------------------------------------------------------------------
// run-time tables (~4k)
@@ -57,14 +59,6 @@ static WEBP_INLINE uint8_t clip_8b(int v) {
#define STORE(x, y, v) \
dst[x + y * BPS] = clip_8b(dst[x + y * BPS] + ((v) >> 3))
-#define STORE2(y, dc, d, c) do { \
- const int DC = (dc); \
- STORE(0, y, DC + (d)); \
- STORE(1, y, DC + (c)); \
- STORE(2, y, DC - (c)); \
- STORE(3, y, DC - (d)); \
-} while (0)
-
static const int kC1 = 20091 + (1 << 16);
static const int kC2 = 35468;
#define MUL(a, b) (((a) * (b)) >> 16)
@@ -107,21 +101,7 @@ static void TransformOne(const int16_t* in, uint8_t* dst) {
dst += BPS;
}
}
-
-// Simplified transform when only in[0], in[1] and in[4] are non-zero
-static void TransformAC3(const int16_t* in, uint8_t* dst) {
- const int a = in[0] + 4;
- const int c4 = MUL(in[4], kC2);
- const int d4 = MUL(in[4], kC1);
- const int c1 = MUL(in[1], kC2);
- const int d1 = MUL(in[1], kC1);
- STORE2(0, a + d4, d1, c1);
- STORE2(1, a + c4, d1, c1);
- STORE2(2, a - c4, d1, c1);
- STORE2(3, a - d4, d1, c1);
-}
#undef MUL
-#undef STORE2
static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
TransformOne(in, dst);
@@ -446,16 +426,11 @@ static void HE8uv(uint8_t *dst) { // horizontal
}
// helper for chroma-DC predictions
-static WEBP_INLINE void Put8x8uv(uint8_t value, uint8_t* dst) {
+static WEBP_INLINE void Put8x8uv(uint64_t v, uint8_t* dst) {
int j;
-#ifndef WEBP_REFERENCE_IMPLEMENTATION
- const uint64_t v = (uint64_t)value * 0x0101010101010101ULL;
for (j = 0; j < 8; ++j) {
*(uint64_t*)(dst + j * BPS) = v;
}
-#else
- for (j = 0; j < 8; ++j) memset(dst + j * BPS, value, 8);
-#endif
}
static void DC8uv(uint8_t *dst) { // DC
@@ -464,7 +439,7 @@ static void DC8uv(uint8_t *dst) { // DC
for (i = 0; i < 8; ++i) {
dc0 += dst[i - BPS] + dst[-1 + i * BPS];
}
- Put8x8uv(dc0 >> 4, dst);
+ Put8x8uv((uint64_t)((dc0 >> 4) * 0x0101010101010101ULL), dst);
}
static void DC8uvNoLeft(uint8_t *dst) { // DC with no left samples
@@ -473,7 +448,7 @@ static void DC8uvNoLeft(uint8_t *dst) { // DC with no left samples
for (i = 0; i < 8; ++i) {
dc0 += dst[i - BPS];
}
- Put8x8uv(dc0 >> 3, dst);
+ Put8x8uv((uint64_t)((dc0 >> 3) * 0x0101010101010101ULL), dst);
}
static void DC8uvNoTop(uint8_t *dst) { // DC with no top samples
@@ -482,11 +457,11 @@ static void DC8uvNoTop(uint8_t *dst) { // DC with no top samples
for (i = 0; i < 8; ++i) {
dc0 += dst[-1 + i * BPS];
}
- Put8x8uv(dc0 >> 3, dst);
+ Put8x8uv((uint64_t)((dc0 >> 3) * 0x0101010101010101ULL), dst);
}
static void DC8uvNoTopLeft(uint8_t *dst) { // DC with nothing
- Put8x8uv(0x80, dst);
+ Put8x8uv(0x8080808080808080ULL, dst);
}
//------------------------------------------------------------------------------
@@ -697,7 +672,6 @@ static void HFilter8i(uint8_t* u, uint8_t* v, int stride,
//------------------------------------------------------------------------------
VP8DecIdct2 VP8Transform;
-VP8DecIdct VP8TransformAC3;
VP8DecIdct VP8TransformUV;
VP8DecIdct VP8TransformDC;
VP8DecIdct VP8TransformDCUV;
@@ -725,7 +699,6 @@ void VP8DspInit(void) {
VP8TransformUV = TransformUV;
VP8TransformDC = TransformDC;
VP8TransformDCUV = TransformDCUV;
- VP8TransformAC3 = TransformAC3;
VP8VFilter16 = VFilter16;
VP8HFilter16 = HFilter16;
@@ -754,3 +727,6 @@ void VP8DspInit(void) {
}
}
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/drivers/webp/dsp/dec_neon.c b/drivers/webp/dsp/dec_neon.c
index 9c3d8cc016..ec824b790b 100644
--- a/drivers/webp/dsp/dec_neon.c
+++ b/drivers/webp/dsp/dec_neon.c
@@ -1,10 +1,8 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// ARM NEON version of dsp functions and loop filtering.
@@ -18,7 +16,11 @@
#include "../dec/vp8i.h"
-#define QRegs "q0", "q1", "q2", "q3", \
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#define QRegs "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", \
"q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
#define FLIP_SIGN_BIT2(a, b, s) \
@@ -77,7 +79,7 @@
"vld4.8 {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \
"vld4.8 {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n"
-#define STORE8x2(c1, c2, p, stride) \
+#define STORE8x2(c1, c2, p,stride) \
"vst2.8 {" #c1"[0], " #c2"[0]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[1], " #c2"[1]}," #p "," #stride " \n" \
"vst2.8 {" #c1"[2], " #c2"[2]}," #p "," #stride " \n" \
@@ -97,9 +99,9 @@ static void SimpleVFilter16NEON(uint8_t* p, int stride, int thresh) {
"vld1.u8 {q1}, [%[p]], %[stride] \n" // p1
"vld1.u8 {q2}, [%[p]], %[stride] \n" // p0
"vld1.u8 {q3}, [%[p]], %[stride] \n" // q0
- "vld1.u8 {q12}, [%[p]] \n" // q1
+ "vld1.u8 {q4}, [%[p]] \n" // q1
- DO_FILTER2(q1, q2, q3, q12, %[thresh])
+ DO_FILTER2(q1, q2, q3, q4, %[thresh])
"sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride
@@ -118,18 +120,18 @@ static void SimpleHFilter16NEON(uint8_t* p, int stride, int thresh) {
"add r5, r4, %[stride] \n" // base2 = base1 + stride
LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6)
- LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6)
- "vswp d3, d24 \n" // p1:q1 p0:q3
- "vswp d5, d26 \n" // q0:q2 q1:q4
- "vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4
+ LOAD8x4(d6, d7, d8, d9, [r4], [r5], r6)
+ "vswp d3, d6 \n" // p1:q1 p0:q3
+ "vswp d5, d8 \n" // q0:q2 q1:q4
+ "vswp q2, q3 \n" // p1:q1 p0:q2 q0:q3 q1:q4
- DO_FILTER2(q1, q2, q12, q13, %[thresh])
+ DO_FILTER2(q1, q2, q3, q4, %[thresh])
"sub %[p], %[p], #1 \n" // p - 1
- "vswp d5, d24 \n"
+ "vswp d5, d6 \n"
STORE8x2(d4, d5, [%[p]], %[stride])
- STORE8x2(d24, d25, [%[p]], %[stride])
+ STORE8x2(d6, d7, [%[p]], %[stride])
: [p] "+r"(p)
: [stride] "r"(stride), [thresh] "r"(thresh)
@@ -153,10 +155,7 @@ static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) {
}
}
-//-----------------------------------------------------------------------------
-// Inverse transforms (Paragraph 14.4)
-
-static void TransformOne(const int16_t* in, uint8_t* dst) {
+static void TransformOneNEON(const int16_t *in, uint8_t *dst) {
const int kBPS = BPS;
const int16_t constants[] = {20091, 17734, 0, 0};
/* kC1, kC2. Padded because vld1.16 loads 8 bytes
@@ -305,129 +304,26 @@ static void TransformOne(const int16_t* in, uint8_t* dst) {
);
}
-static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
- TransformOne(in, dst);
+static void TransformTwoNEON(const int16_t* in, uint8_t* dst, int do_two) {
+ TransformOneNEON(in, dst);
if (do_two) {
- TransformOne(in + 16, dst + 4);
+ TransformOneNEON(in + 16, dst + 4);
}
}
-static void TransformDC(const int16_t* in, uint8_t* dst) {
- const int DC = (in[0] + 4) >> 3;
- const int kBPS = BPS;
- __asm__ volatile (
- "vdup.16 q1, %[DC] \n"
-
- "vld1.32 d0[0], [%[dst]], %[kBPS] \n"
- "vld1.32 d1[0], [%[dst]], %[kBPS] \n"
- "vld1.32 d0[1], [%[dst]], %[kBPS] \n"
- "vld1.32 d1[1], [%[dst]], %[kBPS] \n"
-
- "sub %[dst], %[dst], %[kBPS], lsl #2 \n"
-
- // add DC and convert to s16.
- "vaddw.u8 q2, q1, d0 \n"
- "vaddw.u8 q3, q1, d1 \n"
- // convert back to u8 with saturation
- "vqmovun.s16 d0, q2 \n"
- "vqmovun.s16 d1, q3 \n"
-
- "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
- "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
- "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
- "vst1.32 d1[1], [%[dst]] \n"
- : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */
- : [kBPS] "r"(kBPS), /* constants */
- [DC] "r"(DC)
- : "memory", "q0", "q1", "q2", "q3" /* clobbered */
- );
-}
-
-static void TransformWHT(const int16_t* in, int16_t* out) {
- const int kStep = 32; // The store is only incrementing the pointer as if we
- // had stored a single byte.
- __asm__ volatile (
- // part 1
- // load data into q0, q1
- "vld1.16 {q0, q1}, [%[in]] \n"
-
- "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
- "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
- "vsubl.s16 q10, d1, d2 \n" // a2 = in[4] - in[8]
- "vsubl.s16 q11, d0, d3 \n" // a3 = in[0] - in[12]
-
- "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
- "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
- "vadd.s32 q1, q11, q10 \n" // tmp[4] = a3 + a2
- "vsub.s32 q3, q11, q10 \n" // tmp[12] = a3 - a2
-
- // Transpose
- // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
- // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
- "vswp d1, d4 \n" // vtrn.64 q0, q2
- "vswp d3, d6 \n" // vtrn.64 q1, q3
- "vtrn.32 q0, q1 \n"
- "vtrn.32 q2, q3 \n"
-
- "vmov.s32 q10, #3 \n" // dc = 3
- "vadd.s32 q0, q0, q10 \n" // dc = tmp[0] + 3
- "vadd.s32 q12, q0, q3 \n" // a0 = dc + tmp[3]
- "vadd.s32 q13, q1, q2 \n" // a1 = tmp[1] + tmp[2]
- "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
- "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
-
- "vadd.s32 q0, q12, q13 \n"
- "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
- "vadd.s32 q1, q9, q8 \n"
- "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
- "vsub.s32 q2, q12, q13 \n"
- "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
- "vsub.s32 q3, q9, q8 \n"
- "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
-
- // set the results to output
- "vst1.16 d0[0], [%[out]], %[kStep] \n"
- "vst1.16 d1[0], [%[out]], %[kStep] \n"
- "vst1.16 d2[0], [%[out]], %[kStep] \n"
- "vst1.16 d3[0], [%[out]], %[kStep] \n"
- "vst1.16 d0[1], [%[out]], %[kStep] \n"
- "vst1.16 d1[1], [%[out]], %[kStep] \n"
- "vst1.16 d2[1], [%[out]], %[kStep] \n"
- "vst1.16 d3[1], [%[out]], %[kStep] \n"
- "vst1.16 d0[2], [%[out]], %[kStep] \n"
- "vst1.16 d1[2], [%[out]], %[kStep] \n"
- "vst1.16 d2[2], [%[out]], %[kStep] \n"
- "vst1.16 d3[2], [%[out]], %[kStep] \n"
- "vst1.16 d0[3], [%[out]], %[kStep] \n"
- "vst1.16 d1[3], [%[out]], %[kStep] \n"
- "vst1.16 d2[3], [%[out]], %[kStep] \n"
- "vst1.16 d3[3], [%[out]], %[kStep] \n"
-
- : [out] "+r"(out) // modified registers
- : [in] "r"(in), [kStep] "r"(kStep) // constants
- : "memory", "q0", "q1", "q2", "q3",
- "q8", "q9", "q10", "q11", "q12", "q13" // clobbered
- );
-}
-
-#endif // WEBP_USE_NEON
-
-//------------------------------------------------------------------------------
-// Entry point
-
extern void VP8DspInitNEON(void);
void VP8DspInitNEON(void) {
-#if defined(WEBP_USE_NEON)
- VP8Transform = TransformTwo;
- VP8TransformAC3 = TransformOne; // no special code here
- VP8TransformDC = TransformDC;
- VP8TransformWHT = TransformWHT;
+ VP8Transform = TransformTwoNEON;
VP8SimpleVFilter16 = SimpleVFilter16NEON;
VP8SimpleHFilter16 = SimpleHFilter16NEON;
VP8SimpleVFilter16i = SimpleVFilter16iNEON;
VP8SimpleHFilter16i = SimpleHFilter16iNEON;
-#endif // WEBP_USE_NEON
}
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // WEBP_USE_NEON
diff --git a/drivers/webp/dsp/dec_sse2.c b/drivers/webp/dsp/dec_sse2.c
index 150c559f13..472b68ecb8 100644
--- a/drivers/webp/dsp/dec_sse2.c
+++ b/drivers/webp/dsp/dec_sse2.c
@@ -1,10 +1,8 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// SSE2 version of some decoding functions (idct, loop filtering).
@@ -16,13 +14,13 @@
#if defined(WEBP_USE_SSE2)
-// The 3-coeff sparse transform in SSE2 is not really faster than the plain-C
-// one it seems => disable it by default. Uncomment the following to enable:
-// #define USE_TRANSFORM_AC3
-
#include <emmintrin.h>
#include "../dec/vp8i.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -196,21 +194,21 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Add inverse transform to 'dst' and store.
{
- const __m128i zero = _mm_setzero_si128();
+ const __m128i zero = _mm_set1_epi16(0);
// Load the reference(s).
__m128i dst0, dst1, dst2, dst3;
if (do_two) {
// Load eight bytes/pixels per line.
- dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS));
- dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS));
- dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS));
- dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS));
+ dst0 = _mm_loadl_epi64((__m128i*)&dst[0 * BPS]);
+ dst1 = _mm_loadl_epi64((__m128i*)&dst[1 * BPS]);
+ dst2 = _mm_loadl_epi64((__m128i*)&dst[2 * BPS]);
+ dst3 = _mm_loadl_epi64((__m128i*)&dst[3 * BPS]);
} else {
// Load four bytes/pixels per line.
- dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS));
- dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS));
- dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS));
- dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS));
+ dst0 = _mm_cvtsi32_si128(*(int*)&dst[0 * BPS]);
+ dst1 = _mm_cvtsi32_si128(*(int*)&dst[1 * BPS]);
+ dst2 = _mm_cvtsi32_si128(*(int*)&dst[2 * BPS]);
+ dst3 = _mm_cvtsi32_si128(*(int*)&dst[3 * BPS]);
}
// Convert to 16b.
dst0 = _mm_unpacklo_epi8(dst0, zero);
@@ -230,66 +228,20 @@ static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) {
// Store the results.
if (do_two) {
// Store eight bytes/pixels per line.
- _mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0);
- _mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1);
- _mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2);
- _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3);
+ _mm_storel_epi64((__m128i*)&dst[0 * BPS], dst0);
+ _mm_storel_epi64((__m128i*)&dst[1 * BPS], dst1);
+ _mm_storel_epi64((__m128i*)&dst[2 * BPS], dst2);
+ _mm_storel_epi64((__m128i*)&dst[3 * BPS], dst3);
} else {
// Store four bytes/pixels per line.
- *(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0);
- *(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1);
- *(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2);
- *(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3);
+ *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(dst0);
+ *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(dst1);
+ *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(dst2);
+ *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(dst3);
}
}
}
-#if defined(USE_TRANSFORM_AC3)
-#define MUL(a, b) (((a) * (b)) >> 16)
-static void TransformAC3SSE2(const int16_t* in, uint8_t* dst) {
- static const int kC1 = 20091 + (1 << 16);
- static const int kC2 = 35468;
- const __m128i A = _mm_set1_epi16(in[0] + 4);
- const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2));
- const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1));
- const int c1 = MUL(in[1], kC2);
- const int d1 = MUL(in[1], kC1);
- const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1);
- const __m128i B = _mm_adds_epi16(A, CD);
- const __m128i m0 = _mm_adds_epi16(B, d4);
- const __m128i m1 = _mm_adds_epi16(B, c4);
- const __m128i m2 = _mm_subs_epi16(B, c4);
- const __m128i m3 = _mm_subs_epi16(B, d4);
- const __m128i zero = _mm_setzero_si128();
- // Load the source pixels.
- __m128i dst0 = _mm_cvtsi32_si128(*(int*)(dst + 0 * BPS));
- __m128i dst1 = _mm_cvtsi32_si128(*(int*)(dst + 1 * BPS));
- __m128i dst2 = _mm_cvtsi32_si128(*(int*)(dst + 2 * BPS));
- __m128i dst3 = _mm_cvtsi32_si128(*(int*)(dst + 3 * BPS));
- // Convert to 16b.
- dst0 = _mm_unpacklo_epi8(dst0, zero);
- dst1 = _mm_unpacklo_epi8(dst1, zero);
- dst2 = _mm_unpacklo_epi8(dst2, zero);
- dst3 = _mm_unpacklo_epi8(dst3, zero);
- // Add the inverse transform.
- dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3));
- dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3));
- dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3));
- dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3));
- // Unsigned saturate to 8b.
- dst0 = _mm_packus_epi16(dst0, dst0);
- dst1 = _mm_packus_epi16(dst1, dst1);
- dst2 = _mm_packus_epi16(dst2, dst2);
- dst3 = _mm_packus_epi16(dst3, dst3);
- // Store the results.
- *(int*)(dst + 0 * BPS) = _mm_cvtsi128_si32(dst0);
- *(int*)(dst + 1 * BPS) = _mm_cvtsi128_si32(dst1);
- *(int*)(dst + 2 * BPS) = _mm_cvtsi128_si32(dst2);
- *(int*)(dst + 3 * BPS) = _mm_cvtsi128_si32(dst3);
-}
-#undef MUL
-#endif // USE_TRANSFORM_AC3
-
//------------------------------------------------------------------------------
// Loop Filter (Paragraph 15)
@@ -326,14 +278,14 @@ static void TransformAC3SSE2(const int16_t* in, uint8_t* dst) {
#define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) { \
const __m128i zero = _mm_setzero_si128(); \
- const __m128i t_1 = MM_ABS(p1, p0); \
- const __m128i t_2 = MM_ABS(q1, q0); \
+ const __m128i t1 = MM_ABS(p1, p0); \
+ const __m128i t2 = MM_ABS(q1, q0); \
\
const __m128i h = _mm_set1_epi8(hev_thresh); \
- const __m128i t_3 = _mm_subs_epu8(t_1, h); /* abs(p1 - p0) - hev_tresh */ \
- const __m128i t_4 = _mm_subs_epu8(t_2, h); /* abs(q1 - q0) - hev_tresh */ \
+ const __m128i t3 = _mm_subs_epu8(t1, h); /* abs(p1 - p0) - hev_tresh */ \
+ const __m128i t4 = _mm_subs_epu8(t2, h); /* abs(q1 - q0) - hev_tresh */ \
\
- not_hev = _mm_or_si128(t_3, t_4); \
+ not_hev = _mm_or_si128(t3, t4); \
not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\
}
@@ -362,13 +314,13 @@ static void TransformAC3SSE2(const int16_t* in, uint8_t* dst) {
// Updates values of 2 pixels at MB edge during complex filtering.
// Update operations:
-// q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
+// q = q - a and p = p + a; where a = [(a_hi >> 7), (a_lo >> 7)]
#define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) { \
const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7); \
const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7); \
- const __m128i delta = _mm_packs_epi16(a_lo7, a_hi7); \
- pi = _mm_adds_epi8(pi, delta); \
- qi = _mm_subs_epi8(qi, delta); \
+ const __m128i a = _mm_packs_epi16(a_lo7, a_hi7); \
+ pi = _mm_adds_epi8(pi, a); \
+ qi = _mm_subs_epi8(qi, a); \
}
static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0,
@@ -924,19 +876,10 @@ static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride,
Store16x4(u, v, stride, &p1, &p0, &q0, &q1);
}
-#endif // WEBP_USE_SSE2
-
-//------------------------------------------------------------------------------
-// Entry point
-
extern void VP8DspInitSSE2(void);
void VP8DspInitSSE2(void) {
-#if defined(WEBP_USE_SSE2)
VP8Transform = TransformSSE2;
-#if defined(USE_TRANSFORM_AC3)
- VP8TransformAC3 = TransformAC3SSE2;
-#endif
VP8VFilter16 = VFilter16SSE2;
VP8HFilter16 = HFilter16SSE2;
@@ -951,6 +894,10 @@ void VP8DspInitSSE2(void) {
VP8SimpleHFilter16 = SimpleHFilter16SSE2;
VP8SimpleVFilter16i = SimpleVFilter16iSSE2;
VP8SimpleHFilter16i = SimpleHFilter16iSSE2;
-#endif // WEBP_USE_SSE2
}
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // WEBP_USE_SSE2
diff --git a/drivers/webp/dsp/dsp.h b/drivers/webp/dsp/dsp.h
index 3be783afe7..042c98aad2 100644
--- a/drivers/webp/dsp/dsp.h
+++ b/drivers/webp/dsp/dsp.h
@@ -1,10 +1,8 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// Speed-critical functions.
@@ -16,15 +14,14 @@
#include "../webp/types.h"
-#ifdef __cplusplus
+#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
// CPU detection
-#if defined(_MSC_VER) && _MSC_VER > 1310 && \
- (defined(_M_X64) || defined(_M_IX86))
+#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
#define WEBP_MSC_SSE2 // Visual C++ SSE2 targets
#endif
@@ -36,7 +33,7 @@ extern "C" {
#define WEBP_ANDROID_NEON // Android targets that might support NEON
#endif
-#if defined(__ARM_NEON__) || defined(WEBP_ANDROID_NEON)
+#if (defined(__ARM_NEON__) || defined(WEBP_ANDROID_NEON)) && !defined(PSP2_ENABLED)
#define WEBP_USE_NEON
#endif
@@ -52,6 +49,8 @@ extern VP8CPUInfo VP8GetCPUInfo;
//------------------------------------------------------------------------------
// Encoding
+int VP8GetAlpha(const int histo[]);
+
// Transforms
// VP8Idct: Does one of two inverse transforms. If do_two is set, the transforms
// will be done for (ref, in, dst) and (ref + 4, in + 16, dst + 4).
@@ -86,16 +85,10 @@ typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16],
int n, const struct VP8Matrix* const mtx);
extern VP8QuantizeBlock VP8EncQuantizeBlock;
-// specific to 2nd transform:
-typedef int (*VP8QuantizeBlockWHT)(int16_t in[16], int16_t out[16],
- const struct VP8Matrix* const mtx);
-extern VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT;
-
-// Collect histogram for susceptibility calculation and accumulate in histo[].
-struct VP8Histogram;
-typedef void (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block,
- struct VP8Histogram* const histo);
+// Compute susceptibility based on DCT-coeff histograms:
+// the higher, the "easier" the macroblock is to compress.
+typedef int (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block);
extern const int VP8DspScan[16 + 4 + 4];
extern VP8CHisto VP8CollectHistogram;
@@ -108,11 +101,10 @@ typedef void (*VP8DecIdct)(const int16_t* coeffs, uint8_t* dst);
// when doing two transforms, coeffs is actually int16_t[2][16].
typedef void (*VP8DecIdct2)(const int16_t* coeffs, uint8_t* dst, int do_two);
extern VP8DecIdct2 VP8Transform;
-extern VP8DecIdct VP8TransformAC3;
extern VP8DecIdct VP8TransformUV;
extern VP8DecIdct VP8TransformDC;
extern VP8DecIdct VP8TransformDCUV;
-extern VP8WHT VP8TransformWHT;
+extern void (*VP8TransformWHT)(const int16_t* in, int16_t* out);
// *dst is the destination block, with stride BPS. Boundary samples are
// assumed accessible when needed.
@@ -153,8 +145,6 @@ void VP8DspInit(void);
#define FANCY_UPSAMPLING // undefined to remove fancy upsampling support
-// Convert a pair of y/u/v lines together to the output rgb/a colorspace.
-// bottom_y can be NULL if only one line of output is needed (at top/bottom).
typedef void (*WebPUpsampleLinePairFunc)(
const uint8_t* top_y, const uint8_t* bottom_y,
const uint8_t* top_u, const uint8_t* top_v,
@@ -169,9 +159,6 @@ extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
// Initializes SSE2 version of the fancy upsamplers.
void WebPInitUpsamplersSSE2(void);
-// NEON version
-void WebPInitUpsamplersNEON(void);
-
#endif // FANCY_UPSAMPLING
// Point-sampling methods.
@@ -213,11 +200,10 @@ extern void (*WebPApplyAlphaMultiply4444)(
void WebPInitPremultiply(void);
void WebPInitPremultiplySSE2(void); // should not be called directly.
-void WebPInitPremultiplyNEON(void);
//------------------------------------------------------------------------------
-#ifdef __cplusplus
+#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
diff --git a/drivers/webp/dsp/enc.c b/drivers/webp/dsp/enc.c
index fcc6ec8ea2..02234564be 100644
--- a/drivers/webp/dsp/enc.c
+++ b/drivers/webp/dsp/enc.c
@@ -1,34 +1,47 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// Speed-critical encoding functions.
//
// Author: Skal (pascal.massimino@gmail.com)
-#include <assert.h>
#include <stdlib.h> // for abs()
-
#include "./dsp.h"
#include "../enc/vp8enci.h"
-static WEBP_INLINE uint8_t clip_8b(int v) {
- return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255;
-}
-
-static WEBP_INLINE int clip_max(int v, int max) {
- return (v > max) ? max : v;
-}
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
//------------------------------------------------------------------------------
// Compute susceptibility based on DCT-coeff histograms:
// the higher, the "easier" the macroblock is to compress.
+static int ClipAlpha(int alpha) {
+ return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha;
+}
+
+int VP8GetAlpha(const int histo[MAX_COEFF_THRESH + 1]) {
+ int num = 0, den = 0, val = 0;
+ int k;
+ int alpha;
+ // note: changing this loop to avoid the numerous "k + 1" slows things down.
+ for (k = 0; k < MAX_COEFF_THRESH; ++k) {
+ if (histo[k + 1]) {
+ val += histo[k + 1];
+ num += val * (k + 1);
+ den += (k + 1) * (k + 1);
+ }
+ }
+ // we scale the value to a usable [0..255] range
+ alpha = den ? 10 * num / den - 5 : 0;
+ return ClipAlpha(alpha);
+}
+
const int VP8DspScan[16 + 4 + 4] = {
// Luma
0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS,
@@ -40,23 +53,27 @@ const int VP8DspScan[16 + 4 + 4] = {
8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V
};
-static void CollectHistogram(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block,
- VP8Histogram* const histo) {
- int j;
+static int CollectHistogram(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block) {
+ int histo[MAX_COEFF_THRESH + 1] = { 0 };
+ int16_t out[16];
+ int j, k;
for (j = start_block; j < end_block; ++j) {
- int k;
- int16_t out[16];
-
VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
- // Convert coefficients to bin.
+ // Convert coefficients to bin (within out[]).
+ for (k = 0; k < 16; ++k) {
+ const int v = abs(out[k]) >> 2;
+ out[k] = (v > MAX_COEFF_THRESH) ? MAX_COEFF_THRESH : v;
+ }
+
+ // Use bin to update histogram.
for (k = 0; k < 16; ++k) {
- const int v = abs(out[k]) >> 3; // TODO(skal): add rounding?
- const int clipped_value = clip_max(v, MAX_COEFF_THRESH);
- histo->distribution[clipped_value]++;
+ histo[out[k]]++;
}
}
+
+ return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -72,12 +89,15 @@ static void InitTables(void) {
if (!tables_ok) {
int i;
for (i = -255; i <= 255 + 255; ++i) {
- clip1[255 + i] = clip_8b(i);
+ clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i;
}
tables_ok = 1;
}
}
+static WEBP_INLINE uint8_t clip_8b(int v) {
+ return (!(v & ~0xff)) ? v : v < 0 ? 0 : 255;
+}
//------------------------------------------------------------------------------
// Transforms (Paragraph 14.4)
@@ -134,25 +154,25 @@ static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) {
int i;
int tmp[16];
for (i = 0; i < 4; ++i, src += BPS, ref += BPS) {
- const int d0 = src[0] - ref[0]; // 9bit dynamic range ([-255,255])
+ const int d0 = src[0] - ref[0];
const int d1 = src[1] - ref[1];
const int d2 = src[2] - ref[2];
const int d3 = src[3] - ref[3];
- const int a0 = (d0 + d3); // 10b [-510,510]
- const int a1 = (d1 + d2);
- const int a2 = (d1 - d2);
- const int a3 = (d0 - d3);
- tmp[0 + i * 4] = (a0 + a1) * 8; // 14b [-8160,8160]
- tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 1812) >> 9; // [-7536,7542]
- tmp[2 + i * 4] = (a0 - a1) * 8;
- tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 937) >> 9;
+ const int a0 = (d0 + d3) << 3;
+ const int a1 = (d1 + d2) << 3;
+ const int a2 = (d1 - d2) << 3;
+ const int a3 = (d0 - d3) << 3;
+ tmp[0 + i * 4] = (a0 + a1);
+ tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 14500) >> 12;
+ tmp[2 + i * 4] = (a0 - a1);
+ tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 7500) >> 12;
}
for (i = 0; i < 4; ++i) {
- const int a0 = (tmp[0 + i] + tmp[12 + i]); // 15b
+ const int a0 = (tmp[0 + i] + tmp[12 + i]);
const int a1 = (tmp[4 + i] + tmp[ 8 + i]);
const int a2 = (tmp[4 + i] - tmp[ 8 + i]);
const int a3 = (tmp[0 + i] - tmp[12 + i]);
- out[0 + i] = (a0 + a1 + 7) >> 4; // 12b
+ out[0 + i] = (a0 + a1 + 7) >> 4;
out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0);
out[8 + i] = (a0 - a1 + 7) >> 4;
out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16);
@@ -187,32 +207,31 @@ static void ITransformWHT(const int16_t* in, int16_t* out) {
}
static void FTransformWHT(const int16_t* in, int16_t* out) {
- // input is 12b signed
- int32_t tmp[16];
+ int tmp[16];
int i;
for (i = 0; i < 4; ++i, in += 64) {
- const int a0 = (in[0 * 16] + in[2 * 16]); // 13b
- const int a1 = (in[1 * 16] + in[3 * 16]);
- const int a2 = (in[1 * 16] - in[3 * 16]);
- const int a3 = (in[0 * 16] - in[2 * 16]);
- tmp[0 + i * 4] = a0 + a1; // 14b
+ const int a0 = (in[0 * 16] + in[2 * 16]) << 2;
+ const int a1 = (in[1 * 16] + in[3 * 16]) << 2;
+ const int a2 = (in[1 * 16] - in[3 * 16]) << 2;
+ const int a3 = (in[0 * 16] - in[2 * 16]) << 2;
+ tmp[0 + i * 4] = (a0 + a1) + (a0 != 0);
tmp[1 + i * 4] = a3 + a2;
tmp[2 + i * 4] = a3 - a2;
tmp[3 + i * 4] = a0 - a1;
}
for (i = 0; i < 4; ++i) {
- const int a0 = (tmp[0 + i] + tmp[8 + i]); // 15b
+ const int a0 = (tmp[0 + i] + tmp[8 + i]);
const int a1 = (tmp[4 + i] + tmp[12+ i]);
const int a2 = (tmp[4 + i] - tmp[12+ i]);
const int a3 = (tmp[0 + i] - tmp[8 + i]);
- const int b0 = a0 + a1; // 16b
+ const int b0 = a0 + a1;
const int b1 = a3 + a2;
const int b2 = a3 - a2;
const int b3 = a0 - a1;
- out[ 0 + i] = b0 >> 1; // 15b
- out[ 4 + i] = b1 >> 1;
- out[ 8 + i] = b2 >> 1;
- out[12 + i] = b3 >> 1;
+ out[ 0 + i] = (b0 + (b0 > 0) + 3) >> 3;
+ out[ 4 + i] = (b1 + (b1 > 0) + 3) >> 3;
+ out[ 8 + i] = (b2 + (b2 > 0) + 3) >> 3;
+ out[12 + i] = (b3 + (b3 > 0) + 3) >> 3;
}
}
@@ -570,30 +589,30 @@ static int TTransform(const uint8_t* in, const uint16_t* w) {
int i;
// horizontal pass
for (i = 0; i < 4; ++i, in += BPS) {
- const int a0 = in[0] + in[2];
- const int a1 = in[1] + in[3];
- const int a2 = in[1] - in[3];
- const int a3 = in[0] - in[2];
- tmp[0 + i * 4] = a0 + a1;
+ const int a0 = (in[0] + in[2]) << 2;
+ const int a1 = (in[1] + in[3]) << 2;
+ const int a2 = (in[1] - in[3]) << 2;
+ const int a3 = (in[0] - in[2]) << 2;
+ tmp[0 + i * 4] = a0 + a1 + (a0 != 0);
tmp[1 + i * 4] = a3 + a2;
tmp[2 + i * 4] = a3 - a2;
tmp[3 + i * 4] = a0 - a1;
}
// vertical pass
for (i = 0; i < 4; ++i, ++w) {
- const int a0 = tmp[0 + i] + tmp[8 + i];
- const int a1 = tmp[4 + i] + tmp[12+ i];
- const int a2 = tmp[4 + i] - tmp[12+ i];
- const int a3 = tmp[0 + i] - tmp[8 + i];
+ const int a0 = (tmp[0 + i] + tmp[8 + i]);
+ const int a1 = (tmp[4 + i] + tmp[12+ i]);
+ const int a2 = (tmp[4 + i] - tmp[12+ i]);
+ const int a3 = (tmp[0 + i] - tmp[8 + i]);
const int b0 = a0 + a1;
const int b1 = a3 + a2;
const int b2 = a3 - a2;
const int b3 = a0 - a1;
-
- sum += w[ 0] * abs(b0);
- sum += w[ 4] * abs(b1);
- sum += w[ 8] * abs(b2);
- sum += w[12] * abs(b3);
+ // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
+ sum += w[ 0] * ((abs(b0) + 3) >> 3);
+ sum += w[ 4] * ((abs(b1) + 3) >> 3);
+ sum += w[ 8] * ((abs(b2) + 3) >> 3);
+ sum += w[12] * ((abs(b3) + 3) >> 3);
}
return sum;
}
@@ -602,7 +621,7 @@ static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int sum1 = TTransform(a, w);
const int sum2 = TTransform(b, w);
- return abs(sum2 - sum1) >> 5;
+ return (abs(sum2 - sum1) + 8) >> 4;
}
static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
@@ -632,38 +651,13 @@ static int QuantizeBlock(int16_t in[16], int16_t out[16],
for (; n < 16; ++n) {
const int j = kZigzag[n];
const int sign = (in[j] < 0);
- const int coeff = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
+ int coeff = (sign ? -in[j] : in[j]) + mtx->sharpen_[j];
+ if (coeff > 2047) coeff = 2047;
if (coeff > mtx->zthresh_[j]) {
const int Q = mtx->q_[j];
const int iQ = mtx->iq_[j];
const int B = mtx->bias_[j];
out[n] = QUANTDIV(coeff, iQ, B);
- if (out[n] > MAX_LEVEL) out[n] = MAX_LEVEL;
- if (sign) out[n] = -out[n];
- in[j] = out[n] * Q;
- if (out[n]) last = n;
- } else {
- out[n] = 0;
- in[j] = 0;
- }
- }
- return (last >= 0);
-}
-
-static int QuantizeBlockWHT(int16_t in[16], int16_t out[16],
- const VP8Matrix* const mtx) {
- int n, last = -1;
- for (n = 0; n < 16; ++n) {
- const int j = kZigzag[n];
- const int sign = (in[j] < 0);
- const int coeff = sign ? -in[j] : in[j];
- assert(mtx->sharpen_[j] == 0);
- if (coeff > mtx->zthresh_[j]) {
- const int Q = mtx->q_[j];
- const int iQ = mtx->iq_[j];
- const int B = mtx->bias_[j];
- out[n] = QUANTDIV(coeff, iQ, B);
- if (out[n] > MAX_LEVEL) out[n] = MAX_LEVEL;
if (sign) out[n] = -out[n];
in[j] = out[n] * Q;
if (out[n]) last = n;
@@ -709,11 +703,9 @@ VP8Metric VP8SSE4x4;
VP8WMetric VP8TDisto4x4;
VP8WMetric VP8TDisto16x16;
VP8QuantizeBlock VP8EncQuantizeBlock;
-VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT;
VP8BlockCopy VP8Copy4x4;
extern void VP8EncDspInitSSE2(void);
-extern void VP8EncDspInitNEON(void);
void VP8EncDspInit(void) {
InitTables();
@@ -734,7 +726,6 @@ void VP8EncDspInit(void) {
VP8TDisto4x4 = Disto4x4;
VP8TDisto16x16 = Disto16x16;
VP8EncQuantizeBlock = QuantizeBlock;
- VP8EncQuantizeBlockWHT = QuantizeBlockWHT;
VP8Copy4x4 = Copy4x4;
// If defined, use CPUInfo() to overwrite some pointers with faster versions.
@@ -743,11 +734,10 @@ void VP8EncDspInit(void) {
if (VP8GetCPUInfo(kSSE2)) {
VP8EncDspInitSSE2();
}
-#elif defined(WEBP_USE_NEON)
- if (VP8GetCPUInfo(kNEON)) {
- VP8EncDspInitNEON();
- }
#endif
}
}
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/drivers/webp/dsp/enc_neon.c b/drivers/webp/dsp/enc_neon.c
deleted file mode 100644
index 52cca18682..0000000000
--- a/drivers/webp/dsp/enc_neon.c
+++ /dev/null
@@ -1,632 +0,0 @@
-// Copyright 2012 Google Inc. All Rights Reserved.
-//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
-// -----------------------------------------------------------------------------
-//
-// ARM NEON version of speed-critical encoding functions.
-//
-// adapted from libvpx (http://www.webmproject.org/code/)
-
-#include "./dsp.h"
-
-#if defined(WEBP_USE_NEON)
-
-#include "../enc/vp8enci.h"
-
-//------------------------------------------------------------------------------
-// Transforms (Paragraph 14.4)
-
-// Inverse transform.
-// This code is pretty much the same as TransformOneNEON in the decoder, except
-// for subtraction to *ref. See the comments there for algorithmic explanations.
-static void ITransformOne(const uint8_t* ref,
- const int16_t* in, uint8_t* dst) {
- const int kBPS = BPS;
- const int16_t kC1C2[] = { 20091, 17734, 0, 0 }; // kC1 / (kC2 >> 1) / 0 / 0
-
- __asm__ volatile (
- "vld1.16 {q1, q2}, [%[in]] \n"
- "vld1.16 {d0}, [%[kC1C2]] \n"
-
- // d2: in[0]
- // d3: in[8]
- // d4: in[4]
- // d5: in[12]
- "vswp d3, d4 \n"
-
- // q8 = {in[4], in[12]} * kC1 * 2 >> 16
- // q9 = {in[4], in[12]} * kC2 >> 16
- "vqdmulh.s16 q8, q2, d0[0] \n"
- "vqdmulh.s16 q9, q2, d0[1] \n"
-
- // d22 = a = in[0] + in[8]
- // d23 = b = in[0] - in[8]
- "vqadd.s16 d22, d2, d3 \n"
- "vqsub.s16 d23, d2, d3 \n"
-
- // q8 = in[4]/[12] * kC1 >> 16
- "vshr.s16 q8, q8, #1 \n"
-
- // Add {in[4], in[12]} back after the multiplication.
- "vqadd.s16 q8, q2, q8 \n"
-
- // d20 = c = in[4]*kC2 - in[12]*kC1
- // d21 = d = in[4]*kC1 + in[12]*kC2
- "vqsub.s16 d20, d18, d17 \n"
- "vqadd.s16 d21, d19, d16 \n"
-
- // d2 = tmp[0] = a + d
- // d3 = tmp[1] = b + c
- // d4 = tmp[2] = b - c
- // d5 = tmp[3] = a - d
- "vqadd.s16 d2, d22, d21 \n"
- "vqadd.s16 d3, d23, d20 \n"
- "vqsub.s16 d4, d23, d20 \n"
- "vqsub.s16 d5, d22, d21 \n"
-
- "vzip.16 q1, q2 \n"
- "vzip.16 q1, q2 \n"
-
- "vswp d3, d4 \n"
-
- // q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16
- // q9 = {tmp[4], tmp[12]} * kC2 >> 16
- "vqdmulh.s16 q8, q2, d0[0] \n"
- "vqdmulh.s16 q9, q2, d0[1] \n"
-
- // d22 = a = tmp[0] + tmp[8]
- // d23 = b = tmp[0] - tmp[8]
- "vqadd.s16 d22, d2, d3 \n"
- "vqsub.s16 d23, d2, d3 \n"
-
- "vshr.s16 q8, q8, #1 \n"
- "vqadd.s16 q8, q2, q8 \n"
-
- // d20 = c = in[4]*kC2 - in[12]*kC1
- // d21 = d = in[4]*kC1 + in[12]*kC2
- "vqsub.s16 d20, d18, d17 \n"
- "vqadd.s16 d21, d19, d16 \n"
-
- // d2 = tmp[0] = a + d
- // d3 = tmp[1] = b + c
- // d4 = tmp[2] = b - c
- // d5 = tmp[3] = a - d
- "vqadd.s16 d2, d22, d21 \n"
- "vqadd.s16 d3, d23, d20 \n"
- "vqsub.s16 d4, d23, d20 \n"
- "vqsub.s16 d5, d22, d21 \n"
-
- "vld1.32 d6[0], [%[ref]], %[kBPS] \n"
- "vld1.32 d6[1], [%[ref]], %[kBPS] \n"
- "vld1.32 d7[0], [%[ref]], %[kBPS] \n"
- "vld1.32 d7[1], [%[ref]], %[kBPS] \n"
-
- "sub %[ref], %[ref], %[kBPS], lsl #2 \n"
-
- // (val) + 4 >> 3
- "vrshr.s16 d2, d2, #3 \n"
- "vrshr.s16 d3, d3, #3 \n"
- "vrshr.s16 d4, d4, #3 \n"
- "vrshr.s16 d5, d5, #3 \n"
-
- "vzip.16 q1, q2 \n"
- "vzip.16 q1, q2 \n"
-
- // Must accumulate before saturating
- "vmovl.u8 q8, d6 \n"
- "vmovl.u8 q9, d7 \n"
-
- "vqadd.s16 q1, q1, q8 \n"
- "vqadd.s16 q2, q2, q9 \n"
-
- "vqmovun.s16 d0, q1 \n"
- "vqmovun.s16 d1, q2 \n"
-
- "vst1.32 d0[0], [%[dst]], %[kBPS] \n"
- "vst1.32 d0[1], [%[dst]], %[kBPS] \n"
- "vst1.32 d1[0], [%[dst]], %[kBPS] \n"
- "vst1.32 d1[1], [%[dst]] \n"
-
- : [in] "+r"(in), [dst] "+r"(dst) // modified registers
- : [kBPS] "r"(kBPS), [kC1C2] "r"(kC1C2), [ref] "r"(ref) // constants
- : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" // clobbered
- );
-}
-
-static void ITransform(const uint8_t* ref,
- const int16_t* in, uint8_t* dst, int do_two) {
- ITransformOne(ref, in, dst);
- if (do_two) {
- ITransformOne(ref + 4, in + 16, dst + 4);
- }
-}
-
-// Same code as dec_neon.c
-static void ITransformWHT(const int16_t* in, int16_t* out) {
- const int kStep = 32; // The store is only incrementing the pointer as if we
- // had stored a single byte.
- __asm__ volatile (
- // part 1
- // load data into q0, q1
- "vld1.16 {q0, q1}, [%[in]] \n"
-
- "vaddl.s16 q2, d0, d3 \n" // a0 = in[0] + in[12]
- "vaddl.s16 q3, d1, d2 \n" // a1 = in[4] + in[8]
- "vsubl.s16 q4, d1, d2 \n" // a2 = in[4] - in[8]
- "vsubl.s16 q5, d0, d3 \n" // a3 = in[0] - in[12]
-
- "vadd.s32 q0, q2, q3 \n" // tmp[0] = a0 + a1
- "vsub.s32 q2, q2, q3 \n" // tmp[8] = a0 - a1
- "vadd.s32 q1, q5, q4 \n" // tmp[4] = a3 + a2
- "vsub.s32 q3, q5, q4 \n" // tmp[12] = a3 - a2
-
- // Transpose
- // q0 = tmp[0, 4, 8, 12], q1 = tmp[2, 6, 10, 14]
- // q2 = tmp[1, 5, 9, 13], q3 = tmp[3, 7, 11, 15]
- "vswp d1, d4 \n" // vtrn.64 q0, q2
- "vswp d3, d6 \n" // vtrn.64 q1, q3
- "vtrn.32 q0, q1 \n"
- "vtrn.32 q2, q3 \n"
-
- "vmov.s32 q4, #3 \n" // dc = 3
- "vadd.s32 q0, q0, q4 \n" // dc = tmp[0] + 3
- "vadd.s32 q6, q0, q3 \n" // a0 = dc + tmp[3]
- "vadd.s32 q7, q1, q2 \n" // a1 = tmp[1] + tmp[2]
- "vsub.s32 q8, q1, q2 \n" // a2 = tmp[1] - tmp[2]
- "vsub.s32 q9, q0, q3 \n" // a3 = dc - tmp[3]
-
- "vadd.s32 q0, q6, q7 \n"
- "vshrn.s32 d0, q0, #3 \n" // (a0 + a1) >> 3
- "vadd.s32 q1, q9, q8 \n"
- "vshrn.s32 d1, q1, #3 \n" // (a3 + a2) >> 3
- "vsub.s32 q2, q6, q7 \n"
- "vshrn.s32 d2, q2, #3 \n" // (a0 - a1) >> 3
- "vsub.s32 q3, q9, q8 \n"
- "vshrn.s32 d3, q3, #3 \n" // (a3 - a2) >> 3
-
- // set the results to output
- "vst1.16 d0[0], [%[out]], %[kStep] \n"
- "vst1.16 d1[0], [%[out]], %[kStep] \n"
- "vst1.16 d2[0], [%[out]], %[kStep] \n"
- "vst1.16 d3[0], [%[out]], %[kStep] \n"
- "vst1.16 d0[1], [%[out]], %[kStep] \n"
- "vst1.16 d1[1], [%[out]], %[kStep] \n"
- "vst1.16 d2[1], [%[out]], %[kStep] \n"
- "vst1.16 d3[1], [%[out]], %[kStep] \n"
- "vst1.16 d0[2], [%[out]], %[kStep] \n"
- "vst1.16 d1[2], [%[out]], %[kStep] \n"
- "vst1.16 d2[2], [%[out]], %[kStep] \n"
- "vst1.16 d3[2], [%[out]], %[kStep] \n"
- "vst1.16 d0[3], [%[out]], %[kStep] \n"
- "vst1.16 d1[3], [%[out]], %[kStep] \n"
- "vst1.16 d2[3], [%[out]], %[kStep] \n"
- "vst1.16 d3[3], [%[out]], %[kStep] \n"
-
- : [out] "+r"(out) // modified registers
- : [in] "r"(in), [kStep] "r"(kStep) // constants
- : "memory", "q0", "q1", "q2", "q3", "q4",
- "q5", "q6", "q7", "q8", "q9" // clobbered
- );
-}
-
-// Forward transform.
-
-// adapted from vp8/encoder/arm/neon/shortfdct_neon.asm
-static const int16_t kCoeff16[] = {
- 5352, 5352, 5352, 5352, 2217, 2217, 2217, 2217
-};
-static const int32_t kCoeff32[] = {
- 1812, 1812, 1812, 1812,
- 937, 937, 937, 937,
- 12000, 12000, 12000, 12000,
- 51000, 51000, 51000, 51000
-};
-
-static void FTransform(const uint8_t* src, const uint8_t* ref,
- int16_t* out) {
- const int kBPS = BPS;
- const uint8_t* src_ptr = src;
- const uint8_t* ref_ptr = ref;
- const int16_t* coeff16 = kCoeff16;
- const int32_t* coeff32 = kCoeff32;
-
- __asm__ volatile (
- // load src into q4, q5 in high half
- "vld1.8 {d8}, [%[src_ptr]], %[kBPS] \n"
- "vld1.8 {d10}, [%[src_ptr]], %[kBPS] \n"
- "vld1.8 {d9}, [%[src_ptr]], %[kBPS] \n"
- "vld1.8 {d11}, [%[src_ptr]] \n"
-
- // load ref into q6, q7 in high half
- "vld1.8 {d12}, [%[ref_ptr]], %[kBPS] \n"
- "vld1.8 {d14}, [%[ref_ptr]], %[kBPS] \n"
- "vld1.8 {d13}, [%[ref_ptr]], %[kBPS] \n"
- "vld1.8 {d15}, [%[ref_ptr]] \n"
-
- // Pack the high values in to q4 and q6
- "vtrn.32 q4, q5 \n"
- "vtrn.32 q6, q7 \n"
-
- // d[0-3] = src - ref
- "vsubl.u8 q0, d8, d12 \n"
- "vsubl.u8 q1, d9, d13 \n"
-
- // load coeff16 into q8(d16=5352, d17=2217)
- "vld1.16 {q8}, [%[coeff16]] \n"
-
- // load coeff32 high half into q9 = 1812, q10 = 937
- "vld1.32 {q9, q10}, [%[coeff32]]! \n"
-
- // load coeff32 low half into q11=12000, q12=51000
- "vld1.32 {q11,q12}, [%[coeff32]] \n"
-
- // part 1
- // Transpose. Register dN is the same as dN in C
- "vtrn.32 d0, d2 \n"
- "vtrn.32 d1, d3 \n"
- "vtrn.16 d0, d1 \n"
- "vtrn.16 d2, d3 \n"
-
- "vadd.s16 d4, d0, d3 \n" // a0 = d0 + d3
- "vadd.s16 d5, d1, d2 \n" // a1 = d1 + d2
- "vsub.s16 d6, d1, d2 \n" // a2 = d1 - d2
- "vsub.s16 d7, d0, d3 \n" // a3 = d0 - d3
-
- "vadd.s16 d0, d4, d5 \n" // a0 + a1
- "vshl.s16 d0, d0, #3 \n" // temp[0+i*4] = (a0+a1) << 3
- "vsub.s16 d2, d4, d5 \n" // a0 - a1
- "vshl.s16 d2, d2, #3 \n" // (temp[2+i*4] = (a0-a1) << 3
-
- "vmlal.s16 q9, d7, d16 \n" // a3*5352 + 1812
- "vmlal.s16 q10, d7, d17 \n" // a3*2217 + 937
- "vmlal.s16 q9, d6, d17 \n" // a2*2217 + a3*5352 + 1812
- "vmlsl.s16 q10, d6, d16 \n" // a3*2217 + 937 - a2*5352
-
- // temp[1+i*4] = (d2*2217 + d3*5352 + 1812) >> 9
- // temp[3+i*4] = (d3*2217 + 937 - d2*5352) >> 9
- "vshrn.s32 d1, q9, #9 \n"
- "vshrn.s32 d3, q10, #9 \n"
-
- // part 2
- // transpose d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12]
- "vtrn.32 d0, d2 \n"
- "vtrn.32 d1, d3 \n"
- "vtrn.16 d0, d1 \n"
- "vtrn.16 d2, d3 \n"
-
- "vmov.s16 d26, #7 \n"
-
- "vadd.s16 d4, d0, d3 \n" // a1 = ip[0] + ip[12]
- "vadd.s16 d5, d1, d2 \n" // b1 = ip[4] + ip[8]
- "vsub.s16 d6, d1, d2 \n" // c1 = ip[4] - ip[8]
- "vadd.s16 d4, d4, d26 \n" // a1 + 7
- "vsub.s16 d7, d0, d3 \n" // d1 = ip[0] - ip[12]
-
- "vadd.s16 d0, d4, d5 \n" // op[0] = a1 + b1 + 7
- "vsub.s16 d2, d4, d5 \n" // op[8] = a1 - b1 + 7
-
- "vmlal.s16 q11, d7, d16 \n" // d1*5352 + 12000
- "vmlal.s16 q12, d7, d17 \n" // d1*2217 + 51000
-
- "vceq.s16 d4, d7, #0 \n"
-
- "vshr.s16 d0, d0, #4 \n"
- "vshr.s16 d2, d2, #4 \n"
-
- "vmlal.s16 q11, d6, d17 \n" // c1*2217 + d1*5352 + 12000
- "vmlsl.s16 q12, d6, d16 \n" // d1*2217 - c1*5352 + 51000
-
- "vmvn d4, d4 \n" // !(d1 == 0)
- // op[4] = (c1*2217 + d1*5352 + 12000)>>16
- "vshrn.s32 d1, q11, #16 \n"
- // op[4] += (d1!=0)
- "vsub.s16 d1, d1, d4 \n"
- // op[12]= (d1*2217 - c1*5352 + 51000)>>16
- "vshrn.s32 d3, q12, #16 \n"
-
- // set result to out array
- "vst1.16 {q0, q1}, [%[out]] \n"
- : [src_ptr] "+r"(src_ptr), [ref_ptr] "+r"(ref_ptr),
- [coeff32] "+r"(coeff32) // modified registers
- : [kBPS] "r"(kBPS), [coeff16] "r"(coeff16),
- [out] "r"(out) // constants
- : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
- "q10", "q11", "q12", "q13" // clobbered
- );
-}
-
-static void FTransformWHT(const int16_t* in, int16_t* out) {
- const int kStep = 32;
- __asm__ volatile (
- // d0 = in[0 * 16] , d1 = in[1 * 16]
- // d2 = in[2 * 16] , d3 = in[3 * 16]
- "vld1.16 d0[0], [%[in]], %[kStep] \n"
- "vld1.16 d1[0], [%[in]], %[kStep] \n"
- "vld1.16 d2[0], [%[in]], %[kStep] \n"
- "vld1.16 d3[0], [%[in]], %[kStep] \n"
- "vld1.16 d0[1], [%[in]], %[kStep] \n"
- "vld1.16 d1[1], [%[in]], %[kStep] \n"
- "vld1.16 d2[1], [%[in]], %[kStep] \n"
- "vld1.16 d3[1], [%[in]], %[kStep] \n"
- "vld1.16 d0[2], [%[in]], %[kStep] \n"
- "vld1.16 d1[2], [%[in]], %[kStep] \n"
- "vld1.16 d2[2], [%[in]], %[kStep] \n"
- "vld1.16 d3[2], [%[in]], %[kStep] \n"
- "vld1.16 d0[3], [%[in]], %[kStep] \n"
- "vld1.16 d1[3], [%[in]], %[kStep] \n"
- "vld1.16 d2[3], [%[in]], %[kStep] \n"
- "vld1.16 d3[3], [%[in]], %[kStep] \n"
-
- "vaddl.s16 q2, d0, d2 \n" // a0=(in[0*16]+in[2*16])
- "vaddl.s16 q3, d1, d3 \n" // a1=(in[1*16]+in[3*16])
- "vsubl.s16 q4, d1, d3 \n" // a2=(in[1*16]-in[3*16])
- "vsubl.s16 q5, d0, d2 \n" // a3=(in[0*16]-in[2*16])
-
- "vqadd.s32 q6, q2, q3 \n" // a0 + a1
- "vqadd.s32 q7, q5, q4 \n" // a3 + a2
- "vqsub.s32 q8, q5, q4 \n" // a3 - a2
- "vqsub.s32 q9, q2, q3 \n" // a0 - a1
-
- // Transpose
- // q6 = tmp[0, 1, 2, 3] ; q7 = tmp[ 4, 5, 6, 7]
- // q8 = tmp[8, 9, 10, 11] ; q9 = tmp[12, 13, 14, 15]
- "vswp d13, d16 \n" // vtrn.64 q0, q2
- "vswp d15, d18 \n" // vtrn.64 q1, q3
- "vtrn.32 q6, q7 \n"
- "vtrn.32 q8, q9 \n"
-
- "vqadd.s32 q0, q6, q8 \n" // a0 = tmp[0] + tmp[8]
- "vqadd.s32 q1, q7, q9 \n" // a1 = tmp[4] + tmp[12]
- "vqsub.s32 q2, q7, q9 \n" // a2 = tmp[4] - tmp[12]
- "vqsub.s32 q3, q6, q8 \n" // a3 = tmp[0] - tmp[8]
-
- "vqadd.s32 q4, q0, q1 \n" // b0 = a0 + a1
- "vqadd.s32 q5, q3, q2 \n" // b1 = a3 + a2
- "vqsub.s32 q6, q3, q2 \n" // b2 = a3 - a2
- "vqsub.s32 q7, q0, q1 \n" // b3 = a0 - a1
-
- "vshrn.s32 d18, q4, #1 \n" // b0 >> 1
- "vshrn.s32 d19, q5, #1 \n" // b1 >> 1
- "vshrn.s32 d20, q6, #1 \n" // b2 >> 1
- "vshrn.s32 d21, q7, #1 \n" // b3 >> 1
-
- "vst1.16 {q9, q10}, [%[out]] \n"
-
- : [in] "+r"(in)
- : [kStep] "r"(kStep), [out] "r"(out)
- : "memory", "q0", "q1", "q2", "q3", "q4", "q5",
- "q6", "q7", "q8", "q9", "q10" // clobbered
- ) ;
-}
-
-//------------------------------------------------------------------------------
-// Texture distortion
-//
-// We try to match the spectral content (weighted) between source and
-// reconstructed samples.
-
-// Hadamard transform
-// Returns the weighted sum of the absolute value of transformed coefficients.
-// This uses a TTransform helper function in C
-static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
- const uint16_t* const w) {
- const int kBPS = BPS;
- const uint8_t* A = a;
- const uint8_t* B = b;
- const uint16_t* W = w;
- int sum;
- __asm__ volatile (
- "vld1.32 d0[0], [%[a]], %[kBPS] \n"
- "vld1.32 d0[1], [%[a]], %[kBPS] \n"
- "vld1.32 d2[0], [%[a]], %[kBPS] \n"
- "vld1.32 d2[1], [%[a]] \n"
-
- "vld1.32 d1[0], [%[b]], %[kBPS] \n"
- "vld1.32 d1[1], [%[b]], %[kBPS] \n"
- "vld1.32 d3[0], [%[b]], %[kBPS] \n"
- "vld1.32 d3[1], [%[b]] \n"
-
- // a d0/d2, b d1/d3
- // d0/d1: 01 01 01 01
- // d2/d3: 23 23 23 23
- // But: it goes 01 45 23 67
- // Notice the middle values are transposed
- "vtrn.16 q0, q1 \n"
-
- // {a0, a1} = {in[0] + in[2], in[1] + in[3]}
- "vaddl.u8 q2, d0, d2 \n"
- "vaddl.u8 q10, d1, d3 \n"
- // {a3, a2} = {in[0] - in[2], in[1] - in[3]}
- "vsubl.u8 q3, d0, d2 \n"
- "vsubl.u8 q11, d1, d3 \n"
-
- // tmp[0] = a0 + a1
- "vpaddl.s16 q0, q2 \n"
- "vpaddl.s16 q8, q10 \n"
-
- // tmp[1] = a3 + a2
- "vpaddl.s16 q1, q3 \n"
- "vpaddl.s16 q9, q11 \n"
-
- // No pair subtract
- // q2 = {a0, a3}
- // q3 = {a1, a2}
- "vtrn.16 q2, q3 \n"
- "vtrn.16 q10, q11 \n"
-
- // {tmp[3], tmp[2]} = {a0 - a1, a3 - a2}
- "vsubl.s16 q12, d4, d6 \n"
- "vsubl.s16 q13, d5, d7 \n"
- "vsubl.s16 q14, d20, d22 \n"
- "vsubl.s16 q15, d21, d23 \n"
-
- // separate tmp[3] and tmp[2]
- // q12 = tmp[3]
- // q13 = tmp[2]
- "vtrn.32 q12, q13 \n"
- "vtrn.32 q14, q15 \n"
-
- // Transpose tmp for a
- "vswp d1, d26 \n" // vtrn.64
- "vswp d3, d24 \n" // vtrn.64
- "vtrn.32 q0, q1 \n"
- "vtrn.32 q13, q12 \n"
-
- // Transpose tmp for b
- "vswp d17, d30 \n" // vtrn.64
- "vswp d19, d28 \n" // vtrn.64
- "vtrn.32 q8, q9 \n"
- "vtrn.32 q15, q14 \n"
-
- // The first Q register is a, the second b.
- // q0/8 tmp[0-3]
- // q13/15 tmp[4-7]
- // q1/9 tmp[8-11]
- // q12/14 tmp[12-15]
-
- // These are still in 01 45 23 67 order. We fix it easily in the addition
- // case but the subtraction propagates them.
- "vswp d3, d27 \n"
- "vswp d19, d31 \n"
-
- // a0 = tmp[0] + tmp[8]
- "vadd.s32 q2, q0, q1 \n"
- "vadd.s32 q3, q8, q9 \n"
-
- // a1 = tmp[4] + tmp[12]
- "vadd.s32 q10, q13, q12 \n"
- "vadd.s32 q11, q15, q14 \n"
-
- // a2 = tmp[4] - tmp[12]
- "vsub.s32 q13, q13, q12 \n"
- "vsub.s32 q15, q15, q14 \n"
-
- // a3 = tmp[0] - tmp[8]
- "vsub.s32 q0, q0, q1 \n"
- "vsub.s32 q8, q8, q9 \n"
-
- // b0 = a0 + a1
- "vadd.s32 q1, q2, q10 \n"
- "vadd.s32 q9, q3, q11 \n"
-
- // b1 = a3 + a2
- "vadd.s32 q12, q0, q13 \n"
- "vadd.s32 q14, q8, q15 \n"
-
- // b2 = a3 - a2
- "vsub.s32 q0, q0, q13 \n"
- "vsub.s32 q8, q8, q15 \n"
-
- // b3 = a0 - a1
- "vsub.s32 q2, q2, q10 \n"
- "vsub.s32 q3, q3, q11 \n"
-
- "vld1.64 {q10, q11}, [%[w]] \n"
-
- // abs(b0)
- "vabs.s32 q1, q1 \n"
- "vabs.s32 q9, q9 \n"
- // abs(b1)
- "vabs.s32 q12, q12 \n"
- "vabs.s32 q14, q14 \n"
- // abs(b2)
- "vabs.s32 q0, q0 \n"
- "vabs.s32 q8, q8 \n"
- // abs(b3)
- "vabs.s32 q2, q2 \n"
- "vabs.s32 q3, q3 \n"
-
- // expand w before using.
- "vmovl.u16 q13, d20 \n"
- "vmovl.u16 q15, d21 \n"
-
- // w[0] * abs(b0)
- "vmul.u32 q1, q1, q13 \n"
- "vmul.u32 q9, q9, q13 \n"
-
- // w[4] * abs(b1)
- "vmla.u32 q1, q12, q15 \n"
- "vmla.u32 q9, q14, q15 \n"
-
- // expand w before using.
- "vmovl.u16 q13, d22 \n"
- "vmovl.u16 q15, d23 \n"
-
- // w[8] * abs(b1)
- "vmla.u32 q1, q0, q13 \n"
- "vmla.u32 q9, q8, q13 \n"
-
- // w[12] * abs(b1)
- "vmla.u32 q1, q2, q15 \n"
- "vmla.u32 q9, q3, q15 \n"
-
- // Sum the arrays
- "vpaddl.u32 q1, q1 \n"
- "vpaddl.u32 q9, q9 \n"
- "vadd.u64 d2, d3 \n"
- "vadd.u64 d18, d19 \n"
-
- // Hadamard transform needs 4 bits of extra precision (2 bits in each
- // direction) for dynamic raw. Weights w[] are 16bits at max, so the maximum
- // precision for coeff is 8bit of input + 4bits of Hadamard transform +
- // 16bits for w[] + 2 bits of abs() summation.
- //
- // This uses a maximum of 31 bits (signed). Discarding the top 32 bits is
- // A-OK.
-
- // sum2 - sum1
- "vsub.u32 d0, d2, d18 \n"
- // abs(sum2 - sum1)
- "vabs.s32 d0, d0 \n"
- // abs(sum2 - sum1) >> 5
- "vshr.u32 d0, #5 \n"
-
- // It would be better to move the value straight into r0 but I'm not
- // entirely sure how this works with inline assembly.
- "vmov.32 %[sum], d0[0] \n"
-
- : [sum] "=r"(sum), [a] "+r"(A), [b] "+r"(B), [w] "+r"(W)
- : [kBPS] "r"(kBPS)
- : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9",
- "q10", "q11", "q12", "q13", "q14", "q15" // clobbered
- ) ;
-
- return sum;
-}
-
-static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
- const uint16_t* const w) {
- int D = 0;
- int x, y;
- for (y = 0; y < 16 * BPS; y += 4 * BPS) {
- for (x = 0; x < 16; x += 4) {
- D += Disto4x4(a + x + y, b + x + y, w);
- }
- }
- return D;
-}
-
-#endif // WEBP_USE_NEON
-
-//------------------------------------------------------------------------------
-// Entry point
-
-extern void VP8EncDspInitNEON(void);
-
-void VP8EncDspInitNEON(void) {
-#if defined(WEBP_USE_NEON)
- VP8ITransform = ITransform;
- VP8FTransform = FTransform;
-
- VP8ITransformWHT = ITransformWHT;
- VP8FTransformWHT = FTransformWHT;
-
- VP8TDisto4x4 = Disto4x4;
- VP8TDisto16x16 = Disto16x16;
-#endif // WEBP_USE_NEON
-}
-
diff --git a/drivers/webp/dsp/enc_sse2.c b/drivers/webp/dsp/enc_sse2.c
index 540a3cb2db..b046761dc1 100644
--- a/drivers/webp/dsp/enc_sse2.c
+++ b/drivers/webp/dsp/enc_sse2.c
@@ -1,10 +1,8 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// SSE2 version of speed-critical encoding functions.
@@ -19,48 +17,21 @@
#include "../enc/vp8enci.h"
-//------------------------------------------------------------------------------
-// Quite useful macro for debugging. Left here for convenience.
-
-#if 0
-#include <stdio.h>
-static void PrintReg(const __m128i r, const char* const name, int size) {
- int n;
- union {
- __m128i r;
- uint8_t i8[16];
- uint16_t i16[8];
- uint32_t i32[4];
- uint64_t i64[2];
- } tmp;
- tmp.r = r;
- printf("%s\t: ", name);
- if (size == 8) {
- for (n = 0; n < 16; ++n) printf("%.2x ", tmp.i8[n]);
- } else if (size == 16) {
- for (n = 0; n < 8; ++n) printf("%.4x ", tmp.i16[n]);
- } else if (size == 32) {
- for (n = 0; n < 4; ++n) printf("%.8x ", tmp.i32[n]);
- } else {
- for (n = 0; n < 2; ++n) printf("%.16lx ", tmp.i64[n]);
- }
- printf("\n");
-}
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
#endif
//------------------------------------------------------------------------------
// Compute susceptibility based on DCT-coeff histograms:
// the higher, the "easier" the macroblock is to compress.
-static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
- int start_block, int end_block,
- VP8Histogram* const histo) {
+static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
+ int start_block, int end_block) {
+ int histo[MAX_COEFF_THRESH + 1] = { 0 };
+ int16_t out[16];
+ int j, k;
const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
- int j;
for (j = start_block; j < end_block; ++j) {
- int16_t out[16];
- int k;
-
VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
// Convert coefficients to bin (within out[]).
@@ -76,9 +47,9 @@ static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
const __m128i xor1 = _mm_xor_si128(out1, sign1);
const __m128i abs0 = _mm_sub_epi16(xor0, sign0);
const __m128i abs1 = _mm_sub_epi16(xor1, sign1);
- // v = abs(out) >> 3
- const __m128i v0 = _mm_srai_epi16(abs0, 3);
- const __m128i v1 = _mm_srai_epi16(abs1, 3);
+ // v = abs(out) >> 2
+ const __m128i v0 = _mm_srai_epi16(abs0, 2);
+ const __m128i v1 = _mm_srai_epi16(abs1, 2);
// bin = min(v, MAX_COEFF_THRESH)
const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
@@ -87,11 +58,13 @@ static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred,
_mm_storeu_si128((__m128i*)&out[8], bin1);
}
- // Convert coefficients to bin.
+ // Use bin to update histogram.
for (k = 0; k < 16; ++k) {
- histo->distribution[out[k]]++;
+ histo[out[k]]++;
}
}
+
+ return VP8GetAlpha(histo);
}
//------------------------------------------------------------------------------
@@ -270,7 +243,7 @@ static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst,
// Add inverse transform to 'ref' and store.
{
- const __m128i zero = _mm_setzero_si128();
+ const __m128i zero = _mm_set1_epi16(0);
// Load the reference(s).
__m128i ref0, ref1, ref2, ref3;
if (do_two) {
@@ -322,22 +295,16 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
int16_t* out) {
const __m128i zero = _mm_setzero_si128();
const __m128i seven = _mm_set1_epi16(7);
- const __m128i k937 = _mm_set1_epi32(937);
- const __m128i k1812 = _mm_set1_epi32(1812);
+ const __m128i k7500 = _mm_set1_epi32(7500);
+ const __m128i k14500 = _mm_set1_epi32(14500);
const __m128i k51000 = _mm_set1_epi32(51000);
const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16));
const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217,
5352, 2217, 5352, 2217);
const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352,
2217, -5352, 2217, -5352);
- const __m128i k88p = _mm_set_epi16(8, 8, 8, 8, 8, 8, 8, 8);
- const __m128i k88m = _mm_set_epi16(-8, 8, -8, 8, -8, 8, -8, 8);
- const __m128i k5352_2217p = _mm_set_epi16(2217, 5352, 2217, 5352,
- 2217, 5352, 2217, 5352);
- const __m128i k5352_2217m = _mm_set_epi16(-5352, 2217, -5352, 2217,
- -5352, 2217, -5352, 2217);
- __m128i v01, v32;
+ __m128i v01, v32;
// Difference between src and ref and initial transpose.
{
@@ -359,52 +326,73 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero);
const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero);
const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero);
- // Compute difference. -> 00 01 02 03 00 00 00 00
+ // Compute difference.
const __m128i diff0 = _mm_sub_epi16(src_0, ref_0);
const __m128i diff1 = _mm_sub_epi16(src_1, ref_1);
const __m128i diff2 = _mm_sub_epi16(src_2, ref_2);
const __m128i diff3 = _mm_sub_epi16(src_3, ref_3);
-
- // Unpack and shuffle
+ // Transpose.
// 00 01 02 03 0 0 0 0
// 10 11 12 13 0 0 0 0
// 20 21 22 23 0 0 0 0
// 30 31 32 33 0 0 0 0
- const __m128i shuf01 = _mm_unpacklo_epi32(diff0, diff1);
- const __m128i shuf23 = _mm_unpacklo_epi32(diff2, diff3);
- // 00 01 10 11 02 03 12 13
- // 20 21 30 31 22 23 32 33
- const __m128i shuf01_p =
- _mm_shufflehi_epi16(shuf01, _MM_SHUFFLE(2, 3, 0, 1));
- const __m128i shuf23_p =
- _mm_shufflehi_epi16(shuf23, _MM_SHUFFLE(2, 3, 0, 1));
- // 00 01 10 11 03 02 13 12
- // 20 21 30 31 23 22 33 32
- const __m128i s01 = _mm_unpacklo_epi64(shuf01_p, shuf23_p);
- const __m128i s32 = _mm_unpackhi_epi64(shuf01_p, shuf23_p);
- // 00 01 10 11 20 21 30 31
- // 03 02 13 12 23 22 33 32
- const __m128i a01 = _mm_add_epi16(s01, s32);
- const __m128i a32 = _mm_sub_epi16(s01, s32);
- // [d0 + d3 | d1 + d2 | ...] = [a0 a1 | a0' a1' | ... ]
- // [d0 - d3 | d1 - d2 | ...] = [a3 a2 | a3' a2' | ... ]
-
- const __m128i tmp0 = _mm_madd_epi16(a01, k88p); // [ (a0 + a1) << 3, ... ]
- const __m128i tmp2 = _mm_madd_epi16(a01, k88m); // [ (a0 - a1) << 3, ... ]
- const __m128i tmp1_1 = _mm_madd_epi16(a32, k5352_2217p);
- const __m128i tmp3_1 = _mm_madd_epi16(a32, k5352_2217m);
- const __m128i tmp1_2 = _mm_add_epi32(tmp1_1, k1812);
- const __m128i tmp3_2 = _mm_add_epi32(tmp3_1, k937);
- const __m128i tmp1 = _mm_srai_epi32(tmp1_2, 9);
- const __m128i tmp3 = _mm_srai_epi32(tmp3_2, 9);
- const __m128i s03 = _mm_packs_epi32(tmp0, tmp2);
- const __m128i s12 = _mm_packs_epi32(tmp1, tmp3);
- const __m128i s_lo = _mm_unpacklo_epi16(s03, s12); // 0 1 0 1 0 1...
- const __m128i s_hi = _mm_unpackhi_epi16(s03, s12); // 2 3 2 3 2 3
- const __m128i v23 = _mm_unpackhi_epi32(s_lo, s_hi);
- v01 = _mm_unpacklo_epi32(s_lo, s_hi);
- v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); // 3 2 3 2 3 2..
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(diff0, diff1);
+ const __m128i transpose0_1 = _mm_unpacklo_epi16(diff2, diff3);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
+ // a02 a12 a22 a32 a03 a13 a23 a33
+ // a00 a10 a20 a30 a01 a11 a21 a31
+ // a03 a13 a23 a33 a02 a12 a22 a32
+ }
+
+ // First pass and subsequent transpose.
+ {
+ // Same operations are done on the (0,3) and (1,2) pairs.
+ // b0 = (a0 + a3) << 3
+ // b1 = (a1 + a2) << 3
+ // b3 = (a0 - a3) << 3
+ // b2 = (a1 - a2) << 3
+ const __m128i a01 = _mm_add_epi16(v01, v32);
+ const __m128i a32 = _mm_sub_epi16(v01, v32);
+ const __m128i b01 = _mm_slli_epi16(a01, 3);
+ const __m128i b32 = _mm_slli_epi16(a32, 3);
+ const __m128i b11 = _mm_unpackhi_epi64(b01, b01);
+ const __m128i b22 = _mm_unpackhi_epi64(b32, b32);
+
+ // e0 = b0 + b1
+ // e2 = b0 - b1
+ const __m128i e0 = _mm_add_epi16(b01, b11);
+ const __m128i e2 = _mm_sub_epi16(b01, b11);
+ const __m128i e02 = _mm_unpacklo_epi64(e0, e2);
+
+ // e1 = (b3 * 5352 + b2 * 2217 + 14500) >> 12
+ // e3 = (b3 * 2217 - b2 * 5352 + 7500) >> 12
+ const __m128i b23 = _mm_unpacklo_epi16(b22, b32);
+ const __m128i c1 = _mm_madd_epi16(b23, k5352_2217);
+ const __m128i c3 = _mm_madd_epi16(b23, k2217_5352);
+ const __m128i d1 = _mm_add_epi32(c1, k14500);
+ const __m128i d3 = _mm_add_epi32(c3, k7500);
+ const __m128i e1 = _mm_srai_epi32(d1, 12);
+ const __m128i e3 = _mm_srai_epi32(d3, 12);
+ const __m128i e13 = _mm_packs_epi32(e1, e3);
+
+ // Transpose.
+ // 00 01 02 03 20 21 22 23
+ // 10 11 12 13 30 31 32 33
+ const __m128i transpose0_0 = _mm_unpacklo_epi16(e02, e13);
+ const __m128i transpose0_1 = _mm_unpackhi_epi16(e02, e13);
+ // 00 10 01 11 02 12 03 13
+ // 20 30 21 31 22 32 23 33
+ const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1);
+ v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1);
+ v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2));
+ // 02 12 22 32 03 13 23 33
+ // 00 10 20 30 01 11 21 31
+ // 03 13 23 33 02 12 22 32
}
// Second pass
@@ -418,12 +406,13 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
const __m128i a32 = _mm_sub_epi16(v01, v32);
const __m128i a11 = _mm_unpackhi_epi64(a01, a01);
const __m128i a22 = _mm_unpackhi_epi64(a32, a32);
- const __m128i a01_plus_7 = _mm_add_epi16(a01, seven);
// d0 = (a0 + a1 + 7) >> 4;
// d2 = (a0 - a1 + 7) >> 4;
- const __m128i c0 = _mm_add_epi16(a01_plus_7, a11);
- const __m128i c2 = _mm_sub_epi16(a01_plus_7, a11);
+ const __m128i b0 = _mm_add_epi16(a01, a11);
+ const __m128i b2 = _mm_sub_epi16(a01, a11);
+ const __m128i c0 = _mm_add_epi16(b0, seven);
+ const __m128i c2 = _mm_add_epi16(b2, seven);
const __m128i d0 = _mm_srai_epi16(c0, 4);
const __m128i d2 = _mm_srai_epi16(c2, 4);
@@ -441,7 +430,6 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
// f1 = f1 + (a3 != 0);
// The compare will return (0xffff, 0) for (==0, !=0). To turn that into the
// desired (0, 1), we add one earlier through k12000_plus_one.
- // -> f1 = f1 + 1 - (a3 == 0)
const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero));
_mm_storel_epi64((__m128i*)&out[ 0], d0);
@@ -451,137 +439,13 @@ static void FTransformSSE2(const uint8_t* src, const uint8_t* ref,
}
}
-static void FTransformWHTSSE2(const int16_t* in, int16_t* out) {
- int32_t tmp[16];
- int i;
- for (i = 0; i < 4; ++i, in += 64) {
- const int a0 = (in[0 * 16] + in[2 * 16]);
- const int a1 = (in[1 * 16] + in[3 * 16]);
- const int a2 = (in[1 * 16] - in[3 * 16]);
- const int a3 = (in[0 * 16] - in[2 * 16]);
- tmp[0 + i * 4] = a0 + a1;
- tmp[1 + i * 4] = a3 + a2;
- tmp[2 + i * 4] = a3 - a2;
- tmp[3 + i * 4] = a0 - a1;
- }
- {
- const __m128i src0 = _mm_loadu_si128((__m128i*)&tmp[0]);
- const __m128i src1 = _mm_loadu_si128((__m128i*)&tmp[4]);
- const __m128i src2 = _mm_loadu_si128((__m128i*)&tmp[8]);
- const __m128i src3 = _mm_loadu_si128((__m128i*)&tmp[12]);
- const __m128i a0 = _mm_add_epi32(src0, src2);
- const __m128i a1 = _mm_add_epi32(src1, src3);
- const __m128i a2 = _mm_sub_epi32(src1, src3);
- const __m128i a3 = _mm_sub_epi32(src0, src2);
- const __m128i b0 = _mm_srai_epi32(_mm_add_epi32(a0, a1), 1);
- const __m128i b1 = _mm_srai_epi32(_mm_add_epi32(a3, a2), 1);
- const __m128i b2 = _mm_srai_epi32(_mm_sub_epi32(a3, a2), 1);
- const __m128i b3 = _mm_srai_epi32(_mm_sub_epi32(a0, a1), 1);
- const __m128i out0 = _mm_packs_epi32(b0, b1);
- const __m128i out1 = _mm_packs_epi32(b2, b3);
- _mm_storeu_si128((__m128i*)&out[0], out0);
- _mm_storeu_si128((__m128i*)&out[8], out1);
- }
-}
-
//------------------------------------------------------------------------------
// Metric
-static int SSE_Nx4SSE2(const uint8_t* a, const uint8_t* b,
- int num_quads, int do_16) {
- const __m128i zero = _mm_setzero_si128();
- __m128i sum1 = zero;
- __m128i sum2 = zero;
-
- while (num_quads-- > 0) {
- // Note: for the !do_16 case, we read 16 pixels instead of 8 but that's ok,
- // thanks to buffer over-allocation to that effect.
- const __m128i a0 = _mm_loadu_si128((__m128i*)&a[BPS * 0]);
- const __m128i a1 = _mm_loadu_si128((__m128i*)&a[BPS * 1]);
- const __m128i a2 = _mm_loadu_si128((__m128i*)&a[BPS * 2]);
- const __m128i a3 = _mm_loadu_si128((__m128i*)&a[BPS * 3]);
- const __m128i b0 = _mm_loadu_si128((__m128i*)&b[BPS * 0]);
- const __m128i b1 = _mm_loadu_si128((__m128i*)&b[BPS * 1]);
- const __m128i b2 = _mm_loadu_si128((__m128i*)&b[BPS * 2]);
- const __m128i b3 = _mm_loadu_si128((__m128i*)&b[BPS * 3]);
-
- // compute clip0(a-b) and clip0(b-a)
- const __m128i a0p = _mm_subs_epu8(a0, b0);
- const __m128i a0m = _mm_subs_epu8(b0, a0);
- const __m128i a1p = _mm_subs_epu8(a1, b1);
- const __m128i a1m = _mm_subs_epu8(b1, a1);
- const __m128i a2p = _mm_subs_epu8(a2, b2);
- const __m128i a2m = _mm_subs_epu8(b2, a2);
- const __m128i a3p = _mm_subs_epu8(a3, b3);
- const __m128i a3m = _mm_subs_epu8(b3, a3);
-
- // compute |a-b| with 8b arithmetic as clip0(a-b) | clip0(b-a)
- const __m128i diff0 = _mm_or_si128(a0p, a0m);
- const __m128i diff1 = _mm_or_si128(a1p, a1m);
- const __m128i diff2 = _mm_or_si128(a2p, a2m);
- const __m128i diff3 = _mm_or_si128(a3p, a3m);
-
- // unpack (only four operations, instead of eight)
- const __m128i low0 = _mm_unpacklo_epi8(diff0, zero);
- const __m128i low1 = _mm_unpacklo_epi8(diff1, zero);
- const __m128i low2 = _mm_unpacklo_epi8(diff2, zero);
- const __m128i low3 = _mm_unpacklo_epi8(diff3, zero);
-
- // multiply with self
- const __m128i low_madd0 = _mm_madd_epi16(low0, low0);
- const __m128i low_madd1 = _mm_madd_epi16(low1, low1);
- const __m128i low_madd2 = _mm_madd_epi16(low2, low2);
- const __m128i low_madd3 = _mm_madd_epi16(low3, low3);
-
- // collect in a cascading way
- const __m128i low_sum0 = _mm_add_epi32(low_madd0, low_madd1);
- const __m128i low_sum1 = _mm_add_epi32(low_madd2, low_madd3);
- sum1 = _mm_add_epi32(sum1, low_sum0);
- sum2 = _mm_add_epi32(sum2, low_sum1);
-
- if (do_16) { // if necessary, process the higher 8 bytes similarly
- const __m128i hi0 = _mm_unpackhi_epi8(diff0, zero);
- const __m128i hi1 = _mm_unpackhi_epi8(diff1, zero);
- const __m128i hi2 = _mm_unpackhi_epi8(diff2, zero);
- const __m128i hi3 = _mm_unpackhi_epi8(diff3, zero);
-
- const __m128i hi_madd0 = _mm_madd_epi16(hi0, hi0);
- const __m128i hi_madd1 = _mm_madd_epi16(hi1, hi1);
- const __m128i hi_madd2 = _mm_madd_epi16(hi2, hi2);
- const __m128i hi_madd3 = _mm_madd_epi16(hi3, hi3);
- const __m128i hi_sum0 = _mm_add_epi32(hi_madd0, hi_madd1);
- const __m128i hi_sum1 = _mm_add_epi32(hi_madd2, hi_madd3);
- sum1 = _mm_add_epi32(sum1, hi_sum0);
- sum2 = _mm_add_epi32(sum2, hi_sum1);
- }
- a += 4 * BPS;
- b += 4 * BPS;
- }
- {
- int32_t tmp[4];
- const __m128i sum = _mm_add_epi32(sum1, sum2);
- _mm_storeu_si128((__m128i*)tmp, sum);
- return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
- }
-}
-
-static int SSE16x16SSE2(const uint8_t* a, const uint8_t* b) {
- return SSE_Nx4SSE2(a, b, 4, 1);
-}
-
-static int SSE16x8SSE2(const uint8_t* a, const uint8_t* b) {
- return SSE_Nx4SSE2(a, b, 2, 1);
-}
-
-static int SSE8x8SSE2(const uint8_t* a, const uint8_t* b) {
- return SSE_Nx4SSE2(a, b, 2, 0);
-}
-
static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
- const __m128i zero = _mm_setzero_si128();
+ const __m128i zero = _mm_set1_epi16(0);
- // Load values. Note that we read 8 pixels instead of 4,
- // but the a/b buffers are over-allocated to that effect.
+ // Load values.
const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]);
const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]);
const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]);
@@ -619,7 +483,6 @@ static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) {
const __m128i sum0 = _mm_add_epi32(madd0, madd1);
const __m128i sum1 = _mm_add_epi32(madd2, madd3);
const __m128i sum2 = _mm_add_epi32(sum0, sum1);
-
int32_t tmp[4];
_mm_storeu_si128((__m128i*)tmp, sum2);
return (tmp[3] + tmp[2] + tmp[1] + tmp[0]);
@@ -639,8 +502,10 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
int32_t sum[4];
__m128i tmp_0, tmp_1, tmp_2, tmp_3;
const __m128i zero = _mm_setzero_si128();
+ const __m128i one = _mm_set1_epi16(1);
+ const __m128i three = _mm_set1_epi16(3);
- // Load, combine and transpose inputs.
+ // Load, combine and tranpose inputs.
{
const __m128i inA_0 = _mm_loadl_epi64((__m128i*)&inA[BPS * 0]);
const __m128i inA_1 = _mm_loadl_epi64((__m128i*)&inA[BPS * 1]);
@@ -685,14 +550,17 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
// Horizontal pass and subsequent transpose.
{
// Calculate a and b (two 4x4 at once).
- const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
- const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
- const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
- const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
- const __m128i b0 = _mm_add_epi16(a0, a1);
+ const __m128i a0 = _mm_slli_epi16(_mm_add_epi16(tmp_0, tmp_2), 2);
+ const __m128i a1 = _mm_slli_epi16(_mm_add_epi16(tmp_1, tmp_3), 2);
+ const __m128i a2 = _mm_slli_epi16(_mm_sub_epi16(tmp_1, tmp_3), 2);
+ const __m128i a3 = _mm_slli_epi16(_mm_sub_epi16(tmp_0, tmp_2), 2);
+ // b0_extra = (a0 != 0);
+ const __m128i b0_extra = _mm_andnot_si128(_mm_cmpeq_epi16 (a0, zero), one);
+ const __m128i b0_base = _mm_add_epi16(a0, a1);
const __m128i b1 = _mm_add_epi16(a3, a2);
const __m128i b2 = _mm_sub_epi16(a3, a2);
const __m128i b3 = _mm_sub_epi16(a0, a1);
+ const __m128i b0 = _mm_add_epi16(b0_base, b0_extra);
// a00 a01 a02 a03 b00 b01 b02 b03
// a10 a11 a12 a13 b10 b11 b12 b13
// a20 a21 a22 a23 b20 b21 b22 b23
@@ -767,6 +635,19 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
B_b2 = _mm_sub_epi16(B_b2, sign_B_b2);
}
+ // b = abs(b) + 3
+ A_b0 = _mm_add_epi16(A_b0, three);
+ A_b2 = _mm_add_epi16(A_b2, three);
+ B_b0 = _mm_add_epi16(B_b0, three);
+ B_b2 = _mm_add_epi16(B_b2, three);
+
+ // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3
+ // b = (abs(b) + 3) >> 3
+ A_b0 = _mm_srai_epi16(A_b0, 3);
+ A_b2 = _mm_srai_epi16(A_b2, 3);
+ B_b0 = _mm_srai_epi16(B_b0, 3);
+ B_b2 = _mm_srai_epi16(B_b2, 3);
+
// weighted sums
A_b0 = _mm_madd_epi16(A_b0, w_0);
A_b2 = _mm_madd_epi16(A_b2, w_8);
@@ -785,7 +666,7 @@ static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB,
static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b,
const uint16_t* const w) {
const int diff_sum = TTransformSSE2(a, b, w);
- return abs(diff_sum) >> 5;
+ return (abs(diff_sum) + 8) >> 4;
}
static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
@@ -800,6 +681,7 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
return D;
}
+
//------------------------------------------------------------------------------
// Quantization
//
@@ -807,8 +689,9 @@ static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b,
// Simple quantization
static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
int n, const VP8Matrix* const mtx) {
- const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);
- const __m128i zero = _mm_setzero_si128();
+ const __m128i max_coeff_2047 = _mm_set1_epi16(2047);
+ const __m128i zero = _mm_set1_epi16(0);
+ __m128i sign0, sign8;
__m128i coeff0, coeff8;
__m128i out0, out8;
__m128i packed_out;
@@ -826,10 +709,12 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
const __m128i bias8 = _mm_loadu_si128((__m128i*)&mtx->bias_[8]);
const __m128i q0 = _mm_loadu_si128((__m128i*)&mtx->q_[0]);
const __m128i q8 = _mm_loadu_si128((__m128i*)&mtx->q_[8]);
+ const __m128i zthresh0 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[0]);
+ const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]);
// sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative)
- const __m128i sign0 = _mm_srai_epi16(in0, 15);
- const __m128i sign8 = _mm_srai_epi16(in8, 15);
+ sign0 = _mm_srai_epi16(in0, 15);
+ sign8 = _mm_srai_epi16(in8, 15);
// coeff = abs(in) = (in ^ sign) - sign
coeff0 = _mm_xor_si128(in0, sign0);
@@ -841,6 +726,10 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
coeff0 = _mm_add_epi16(coeff0, sharpen0);
coeff8 = _mm_add_epi16(coeff8, sharpen8);
+ // if (coeff > 2047) coeff = 2047
+ coeff0 = _mm_min_epi16(coeff0, max_coeff_2047);
+ coeff8 = _mm_min_epi16(coeff8, max_coeff_2047);
+
// out = (coeff * iQ + B) >> QFIX;
{
// doing calculations with 32b precision (QFIX=17)
@@ -868,14 +757,9 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
out_04 = _mm_srai_epi32(out_04, QFIX);
out_08 = _mm_srai_epi32(out_08, QFIX);
out_12 = _mm_srai_epi32(out_12, QFIX);
-
// pack result as 16b
out0 = _mm_packs_epi32(out_00, out_04);
out8 = _mm_packs_epi32(out_08, out_12);
-
- // if (coeff > 2047) coeff = 2047
- out0 = _mm_min_epi16(out0, max_coeff_2047);
- out8 = _mm_min_epi16(out8, max_coeff_2047);
}
// get sign back (if (sign[j]) out_n = -out_n)
@@ -888,8 +772,17 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
in0 = _mm_mullo_epi16(out0, q0);
in8 = _mm_mullo_epi16(out8, q8);
- _mm_storeu_si128((__m128i*)&in[0], in0);
- _mm_storeu_si128((__m128i*)&in[8], in8);
+ // if (coeff <= mtx->zthresh_) {in=0; out=0;}
+ {
+ __m128i cmp0 = _mm_cmpgt_epi16(coeff0, zthresh0);
+ __m128i cmp8 = _mm_cmpgt_epi16(coeff8, zthresh8);
+ in0 = _mm_and_si128(in0, cmp0);
+ in8 = _mm_and_si128(in8, cmp8);
+ _mm_storeu_si128((__m128i*)&in[0], in0);
+ _mm_storeu_si128((__m128i*)&in[8], in8);
+ out0 = _mm_and_si128(out0, cmp0);
+ out8 = _mm_and_si128(out8, cmp8);
+ }
// zigzag the output before storing it.
//
@@ -926,32 +819,19 @@ static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16],
}
}
-static int QuantizeBlockWHTSSE2(int16_t in[16], int16_t out[16],
- const VP8Matrix* const mtx) {
- return QuantizeBlockSSE2(in, out, 0, mtx);
-}
-
-#endif // WEBP_USE_SSE2
-
-//------------------------------------------------------------------------------
-// Entry point
-
extern void VP8EncDspInitSSE2(void);
-
void VP8EncDspInitSSE2(void) {
-#if defined(WEBP_USE_SSE2)
VP8CollectHistogram = CollectHistogramSSE2;
VP8EncQuantizeBlock = QuantizeBlockSSE2;
- VP8EncQuantizeBlockWHT = QuantizeBlockWHTSSE2;
VP8ITransform = ITransformSSE2;
VP8FTransform = FTransformSSE2;
- VP8FTransformWHT = FTransformWHTSSE2;
- VP8SSE16x16 = SSE16x16SSE2;
- VP8SSE16x8 = SSE16x8SSE2;
- VP8SSE8x8 = SSE8x8SSE2;
VP8SSE4x4 = SSE4x4SSE2;
VP8TDisto4x4 = Disto4x4SSE2;
VP8TDisto16x16 = Disto16x16SSE2;
-#endif // WEBP_USE_SSE2
}
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // WEBP_USE_SSE2
diff --git a/drivers/webp/dsp/lossless.c b/drivers/webp/dsp/lossless.c
index bab76d22de..62a6b7b15a 100644
--- a/drivers/webp/dsp/lossless.c
+++ b/drivers/webp/dsp/lossless.c
@@ -1,10 +1,8 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// Image transforms and color space conversion methods for lossless decoder.
@@ -13,24 +11,25 @@
// Jyrki Alakuijala (jyrki@google.com)
// Urvang Joshi (urvang@google.com)
-#include "./dsp.h"
-
-#if defined(WEBP_USE_SSE2)
-#include <emmintrin.h>
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
#endif
#include <math.h>
#include <stdlib.h>
#include "./lossless.h"
#include "../dec/vp8li.h"
-#include "./yuv.h"
+#include "../dsp/yuv.h"
+#include "../dsp/dsp.h"
+#include "../enc/histogram.h"
#define MAX_DIFF_COST (1e30f)
// lookup table for small values of log2(int)
#define APPROX_LOG_MAX 4096
#define LOG_2_RECIPROCAL 1.44269504088896338700465094007086
-const float kLog2Table[LOG_LOOKUP_IDX_MAX] = {
+#define LOG_LOOKUP_IDX_MAX 256
+static const float kLog2Table[LOG_LOOKUP_IDX_MAX] = {
0.0000000000000000f, 0.0000000000000000f,
1.0000000000000000f, 1.5849625007211560f,
2.0000000000000000f, 2.3219280948873621f,
@@ -161,200 +160,16 @@ const float kLog2Table[LOG_LOOKUP_IDX_MAX] = {
7.9886846867721654f, 7.9943534368588577f
};
-const float kSLog2Table[LOG_LOOKUP_IDX_MAX] = {
- 0.00000000f, 0.00000000f, 2.00000000f, 4.75488750f,
- 8.00000000f, 11.60964047f, 15.50977500f, 19.65148445f,
- 24.00000000f, 28.52932501f, 33.21928095f, 38.05374781f,
- 43.01955001f, 48.10571634f, 53.30296891f, 58.60335893f,
- 64.00000000f, 69.48686830f, 75.05865003f, 80.71062276f,
- 86.43856190f, 92.23866588f, 98.10749561f, 104.04192499f,
- 110.03910002f, 116.09640474f, 122.21143267f, 128.38196256f,
- 134.60593782f, 140.88144886f, 147.20671787f, 153.58008562f,
- 160.00000000f, 166.46500594f, 172.97373660f, 179.52490559f,
- 186.11730005f, 192.74977453f, 199.42124551f, 206.13068654f,
- 212.87712380f, 219.65963219f, 226.47733176f, 233.32938445f,
- 240.21499122f, 247.13338933f, 254.08384998f, 261.06567603f,
- 268.07820003f, 275.12078236f, 282.19280949f, 289.29369244f,
- 296.42286534f, 303.57978409f, 310.76392512f, 317.97478424f,
- 325.21187564f, 332.47473081f, 339.76289772f, 347.07593991f,
- 354.41343574f, 361.77497759f, 369.16017124f, 376.56863518f,
- 384.00000000f, 391.45390785f, 398.93001188f, 406.42797576f,
- 413.94747321f, 421.48818752f, 429.04981119f, 436.63204548f,
- 444.23460010f, 451.85719280f, 459.49954906f, 467.16140179f,
- 474.84249102f, 482.54256363f, 490.26137307f, 497.99867911f,
- 505.75424759f, 513.52785023f, 521.31926438f, 529.12827280f,
- 536.95466351f, 544.79822957f, 552.65876890f, 560.53608414f,
- 568.42998244f, 576.34027536f, 584.26677867f, 592.20931226f,
- 600.16769996f, 608.14176943f, 616.13135206f, 624.13628279f,
- 632.15640007f, 640.19154569f, 648.24156472f, 656.30630539f,
- 664.38561898f, 672.47935976f, 680.58738488f, 688.70955430f,
- 696.84573069f, 704.99577935f, 713.15956818f, 721.33696754f,
- 729.52785023f, 737.73209140f, 745.94956849f, 754.18016116f,
- 762.42375127f, 770.68022275f, 778.94946161f, 787.23135586f,
- 795.52579543f, 803.83267219f, 812.15187982f, 820.48331383f,
- 828.82687147f, 837.18245171f, 845.54995518f, 853.92928416f,
- 862.32034249f, 870.72303558f, 879.13727036f, 887.56295522f,
- 896.00000000f, 904.44831595f, 912.90781569f, 921.37841320f,
- 929.86002376f, 938.35256392f, 946.85595152f, 955.37010560f,
- 963.89494641f, 972.43039537f, 980.97637504f, 989.53280911f,
- 998.09962237f, 1006.67674069f, 1015.26409097f, 1023.86160116f,
- 1032.46920021f, 1041.08681805f, 1049.71438560f, 1058.35183469f,
- 1066.99909811f, 1075.65610955f, 1084.32280357f, 1092.99911564f,
- 1101.68498204f, 1110.38033993f, 1119.08512727f, 1127.79928282f,
- 1136.52274614f, 1145.25545758f, 1153.99735821f, 1162.74838989f,
- 1171.50849518f, 1180.27761738f, 1189.05570047f, 1197.84268914f,
- 1206.63852876f, 1215.44316535f, 1224.25654560f, 1233.07861684f,
- 1241.90932703f, 1250.74862473f, 1259.59645914f, 1268.45278005f,
- 1277.31753781f, 1286.19068338f, 1295.07216828f, 1303.96194457f,
- 1312.85996488f, 1321.76618236f, 1330.68055071f, 1339.60302413f,
- 1348.53355734f, 1357.47210556f, 1366.41862452f, 1375.37307041f,
- 1384.33539991f, 1393.30557020f, 1402.28353887f, 1411.26926400f,
- 1420.26270412f, 1429.26381818f, 1438.27256558f, 1447.28890615f,
- 1456.31280014f, 1465.34420819f, 1474.38309138f, 1483.42941118f,
- 1492.48312945f, 1501.54420843f, 1510.61261078f, 1519.68829949f,
- 1528.77123795f, 1537.86138993f, 1546.95871952f, 1556.06319119f,
- 1565.17476976f, 1574.29342040f, 1583.41910860f, 1592.55180020f,
- 1601.69146137f, 1610.83805860f, 1619.99155871f, 1629.15192882f,
- 1638.31913637f, 1647.49314911f, 1656.67393509f, 1665.86146266f,
- 1675.05570047f, 1684.25661744f, 1693.46418280f, 1702.67836605f,
- 1711.89913698f, 1721.12646563f, 1730.36032233f, 1739.60067768f,
- 1748.84750254f, 1758.10076802f, 1767.36044551f, 1776.62650662f,
- 1785.89892323f, 1795.17766747f, 1804.46271172f, 1813.75402857f,
- 1823.05159087f, 1832.35537170f, 1841.66534438f, 1850.98148244f,
- 1860.30375965f, 1869.63214999f, 1878.96662767f, 1888.30716711f,
- 1897.65374295f, 1907.00633003f, 1916.36490342f, 1925.72943838f,
- 1935.09991037f, 1944.47629506f, 1953.85856831f, 1963.24670620f,
- 1972.64068498f, 1982.04048108f, 1991.44607117f, 2000.85743204f,
- 2010.27454072f, 2019.69737440f, 2029.12591044f, 2038.56012640f
-};
-
-const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX] = {
- { 0, 0}, { 0, 0}, { 1, 0}, { 2, 0}, { 3, 0}, { 4, 1}, { 4, 1}, { 5, 1},
- { 5, 1}, { 6, 2}, { 6, 2}, { 6, 2}, { 6, 2}, { 7, 2}, { 7, 2}, { 7, 2},
- { 7, 2}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3},
- { 8, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3},
- { 9, 3}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4},
- {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4},
- {10, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4},
- {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4},
- {11, 4}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
- {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
- {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
- {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5},
- {12, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
- {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
- {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
- {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5},
- {13, 5}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6},
- {14, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6},
- {15, 6}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7},
- {16, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
- {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7},
-};
-
-const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX] = {
- 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3,
- 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
- 127,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126
-};
-
-float VP8LFastSLog2Slow(int v) {
- assert(v >= LOG_LOOKUP_IDX_MAX);
- if (v < APPROX_LOG_MAX) {
+float VP8LFastLog2(int v) {
+ if (v < LOG_LOOKUP_IDX_MAX) {
+ return kLog2Table[v];
+ } else if (v < APPROX_LOG_MAX) {
int log_cnt = 0;
- const float v_f = (float)v;
while (v >= LOG_LOOKUP_IDX_MAX) {
++log_cnt;
v = v >> 1;
}
- return v_f * (kLog2Table[v] + log_cnt);
- } else {
- return (float)(LOG_2_RECIPROCAL * v * log((double)v));
- }
-}
-
-float VP8LFastLog2Slow(int v) {
- assert(v >= LOG_LOOKUP_IDX_MAX);
- if (v < APPROX_LOG_MAX) {
- int log_cnt = 0;
- while (v >= LOG_LOOKUP_IDX_MAX) {
- ++log_cnt;
- v = v >> 1;
- }
- return kLog2Table[v] + log_cnt;
+ return kLog2Table[v] + (float)log_cnt;
} else {
return (float)(LOG_2_RECIPROCAL * log((double)v));
}
@@ -424,9 +239,9 @@ static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1,
}
static WEBP_INLINE int Sub3(int a, int b, int c) {
- const int pb = b - c;
- const int pa = a - c;
- return abs(pb) - abs(pa);
+ const int pa = b - c;
+ const int pb = a - c;
+ return abs(pa) - abs(pb);
}
static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
@@ -435,6 +250,7 @@ static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) {
Sub3((a >> 16) & 0xff, (b >> 16) & 0xff, (c >> 16) & 0xff) +
Sub3((a >> 8) & 0xff, (b >> 8) & 0xff, (c >> 8) & 0xff) +
Sub3((a ) & 0xff, (b ) & 0xff, (c ) & 0xff);
+
return (pa_minus_pb <= 0) ? a : b;
}
@@ -489,19 +305,18 @@ static uint32_t Predictor10(uint32_t left, const uint32_t* const top) {
return pred;
}
static uint32_t Predictor11(uint32_t left, const uint32_t* const top) {
- const uint32_t pred = VP8LSelect(top[0], left, top[-1]);
+ const uint32_t pred = Select(top[0], left, top[-1]);
return pred;
}
static uint32_t Predictor12(uint32_t left, const uint32_t* const top) {
- const uint32_t pred = VP8LClampedAddSubtractFull(left, top[0], top[-1]);
+ const uint32_t pred = ClampedAddSubtractFull(left, top[0], top[-1]);
return pred;
}
static uint32_t Predictor13(uint32_t left, const uint32_t* const top) {
- const uint32_t pred = VP8LClampedAddSubtractHalf(left, top[0], top[-1]);
+ const uint32_t pred = ClampedAddSubtractHalf(left, top[0], top[-1]);
return pred;
}
-// TODO(vikasa): Export the predictor array, to allow SSE2 variants.
typedef uint32_t (*PredictorFunc)(uint32_t left, const uint32_t* const top);
static const PredictorFunc kPredictors[16] = {
Predictor0, Predictor1, Predictor2, Predictor3,
@@ -525,36 +340,35 @@ static float PredictionCostSpatial(const int* counts,
return (float)(-0.1 * bits);
}
-// Compute the combined Shanon's entropy for distribution {X} and {X+Y}
-static float CombinedShannonEntropy(const int* const X,
- const int* const Y, int n) {
+// Compute the Shanon's entropy: Sum(p*log2(p))
+static float ShannonEntropy(const int* const array, int n) {
int i;
- double retval = 0.;
- int sumX = 0, sumXY = 0;
+ float retval = 0.f;
+ int sum = 0;
for (i = 0; i < n; ++i) {
- const int x = X[i];
- const int xy = X[i] + Y[i];
- if (x != 0) {
- sumX += x;
- retval -= VP8LFastSLog2(x);
- }
- if (xy != 0) {
- sumXY += xy;
- retval -= VP8LFastSLog2(xy);
+ if (array[i] != 0) {
+ sum += array[i];
+ retval -= VP8LFastSLog2(array[i]);
}
}
- retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY);
- return (float)retval;
+ retval += VP8LFastSLog2(sum);
+ return retval;
}
static float PredictionCostSpatialHistogram(int accumulated[4][256],
int tile[4][256]) {
int i;
+ int k;
+ int combo[256];
double retval = 0;
for (i = 0; i < 4; ++i) {
- const double kExpValue = 0.94;
- retval += PredictionCostSpatial(tile[i], 1, kExpValue);
- retval += CombinedShannonEntropy(tile[i], accumulated[i], 256);
+ const double exp_val = 0.94;
+ retval += PredictionCostSpatial(&tile[i][0], 1, exp_val);
+ retval += ShannonEntropy(&tile[i][0], 256);
+ for (k = 0; k < 256; ++k) {
+ combo[k] = accumulated[i][k] + tile[i][k];
+ }
+ retval += ShannonEntropy(&combo[0], 256);
}
return (float)retval;
}
@@ -757,9 +571,9 @@ static void PredictorInverseTransform(const VP8LTransform* const transform,
}
}
-static void SubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
- int i = 0;
- for (; i < num_pixs; ++i) {
+void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
+ int i;
+ for (i = 0; i < num_pixs; ++i) {
const uint32_t argb = argb_data[i];
const uint32_t green = (argb >> 8) & 0xff;
const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff;
@@ -770,9 +584,13 @@ static void SubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) {
// Add green to blue and red channels (i.e. perform the inverse transform of
// 'subtract green').
-static void AddGreenToBlueAndRed(uint32_t* data, const uint32_t* data_end) {
+static void AddGreenToBlueAndRed(const VP8LTransform* const transform,
+ int y_start, int y_end, uint32_t* data) {
+ const int width = transform->xsize_;
+ const uint32_t* const data_end = data + (y_end - y_start) * width;
while (data < data_end) {
const uint32_t argb = *data;
+ // "* 0001001u" is equivalent to "(green << 16) + green)"
const uint32_t green = ((argb >> 8) & 0xff);
uint32_t red_blue = (argb & 0x00ff00ffu);
red_blue += (green << 16) | green;
@@ -837,25 +655,6 @@ static WEBP_INLINE uint32_t TransformColor(const Multipliers* const m,
return (argb & 0xff00ff00u) | (new_red << 16) | (new_blue);
}
-static WEBP_INLINE uint8_t TransformColorRed(uint8_t green_to_red,
- uint32_t argb) {
- const uint32_t green = argb >> 8;
- uint32_t new_red = argb >> 16;
- new_red -= ColorTransformDelta(green_to_red, green);
- return (new_red & 0xff);
-}
-
-static WEBP_INLINE uint8_t TransformColorBlue(uint8_t green_to_blue,
- uint8_t red_to_blue,
- uint32_t argb) {
- const uint32_t green = argb >> 8;
- const uint32_t red = argb >> 16;
- uint8_t new_blue = argb;
- new_blue -= ColorTransformDelta(green_to_blue, green);
- new_blue -= ColorTransformDelta(red_to_blue, red);
- return (new_blue & 0xff);
-}
-
static WEBP_INLINE int SkipRepeatedPixels(const uint32_t* const argb,
int ix, int xsize) {
const uint32_t v = argb[ix];
@@ -876,10 +675,14 @@ static WEBP_INLINE int SkipRepeatedPixels(const uint32_t* const argb,
static float PredictionCostCrossColor(const int accumulated[256],
const int counts[256]) {
// Favor low entropy, locally and globally.
- // Favor small absolute values for PredictionCostSpatial
- static const double kExpValue = 2.4;
- return CombinedShannonEntropy(counts, accumulated, 256) +
- PredictionCostSpatial(counts, 3, kExpValue);
+ int i;
+ int combo[256];
+ for (i = 0; i < 256; ++i) {
+ combo[i] = accumulated[i] + counts[i];
+ }
+ return ShannonEntropy(combo, 256) +
+ ShannonEntropy(counts, 256) +
+ PredictionCostSpatial(counts, 3, 2.4); // Favor small absolute values.
}
static Multipliers GetBestColorTransformForTile(
@@ -909,75 +712,85 @@ static Multipliers GetBestColorTransformForTile(
if (all_y_max > ysize) {
all_y_max = ysize;
}
-
for (green_to_red = -64; green_to_red <= 64; green_to_red += halfstep) {
int histo[256] = { 0 };
int all_y;
+ Multipliers tx;
+ MultipliersClear(&tx);
+ tx.green_to_red_ = green_to_red & 0xff;
for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) {
+ uint32_t predict;
int ix = all_y * xsize + tile_x_offset;
int all_x;
for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
if (SkipRepeatedPixels(argb, ix, xsize)) {
continue;
}
- ++histo[TransformColorRed(green_to_red, argb[ix])]; // red.
+ predict = TransformColor(&tx, argb[ix], 0);
+ ++histo[(predict >> 16) & 0xff]; // red.
}
}
cur_diff = PredictionCostCrossColor(&accumulated_red_histo[0], &histo[0]);
- if ((uint8_t)green_to_red == prevX.green_to_red_) {
+ if (tx.green_to_red_ == prevX.green_to_red_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if ((uint8_t)green_to_red == prevY.green_to_red_) {
+ if (tx.green_to_red_ == prevY.green_to_red_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (green_to_red == 0) {
+ if (tx.green_to_red_ == 0) {
cur_diff -= 3;
}
if (cur_diff < best_diff) {
best_diff = cur_diff;
- best_tx.green_to_red_ = green_to_red;
+ best_tx = tx;
}
}
best_diff = MAX_DIFF_COST;
+ green_to_red = best_tx.green_to_red_;
for (green_to_blue = -32; green_to_blue <= 32; green_to_blue += step) {
for (red_to_blue = -32; red_to_blue <= 32; red_to_blue += step) {
int all_y;
int histo[256] = { 0 };
+ Multipliers tx;
+ tx.green_to_red_ = green_to_red;
+ tx.green_to_blue_ = green_to_blue;
+ tx.red_to_blue_ = red_to_blue;
for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) {
+ uint32_t predict;
int all_x;
int ix = all_y * xsize + tile_x_offset;
for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) {
if (SkipRepeatedPixels(argb, ix, xsize)) {
continue;
}
- ++histo[TransformColorBlue(green_to_blue, red_to_blue, argb[ix])];
+ predict = TransformColor(&tx, argb[ix], 0);
+ ++histo[predict & 0xff]; // blue.
}
}
cur_diff =
- PredictionCostCrossColor(&accumulated_blue_histo[0], &histo[0]);
- if ((uint8_t)green_to_blue == prevX.green_to_blue_) {
+ PredictionCostCrossColor(&accumulated_blue_histo[0], &histo[0]);
+ if (tx.green_to_blue_ == prevX.green_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if ((uint8_t)green_to_blue == prevY.green_to_blue_) {
+ if (tx.green_to_blue_ == prevY.green_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if ((uint8_t)red_to_blue == prevX.red_to_blue_) {
+ if (tx.red_to_blue_ == prevX.red_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if ((uint8_t)red_to_blue == prevY.red_to_blue_) {
+ if (tx.red_to_blue_ == prevY.red_to_blue_) {
cur_diff -= 3; // favor keeping the areas locally similar
}
- if (green_to_blue == 0) {
+ if (tx.green_to_blue_ == 0) {
cur_diff -= 3;
}
- if (red_to_blue == 0) {
+ if (tx.red_to_blue_ == 0) {
cur_diff -= 3;
}
if (cur_diff < best_diff) {
best_diff = cur_diff;
- best_tx.green_to_blue_ = green_to_blue;
- best_tx.red_to_blue_ = red_to_blue;
+ best_tx = tx;
}
}
}
@@ -1107,79 +920,54 @@ static void ColorSpaceInverseTransform(const VP8LTransform* const transform,
}
// Separate out pixels packed together using pixel-bundling.
-// We define two methods for ARGB data (uint32_t) and alpha-only data (uint8_t).
-#define COLOR_INDEX_INVERSE(FUNC_NAME, TYPE, GET_INDEX, GET_VALUE) \
-void FUNC_NAME(const VP8LTransform* const transform, \
- int y_start, int y_end, const TYPE* src, TYPE* dst) { \
- int y; \
- const int bits_per_pixel = 8 >> transform->bits_; \
- const int width = transform->xsize_; \
- const uint32_t* const color_map = transform->data_; \
- if (bits_per_pixel < 8) { \
- const int pixels_per_byte = 1 << transform->bits_; \
- const int count_mask = pixels_per_byte - 1; \
- const uint32_t bit_mask = (1 << bits_per_pixel) - 1; \
- for (y = y_start; y < y_end; ++y) { \
- uint32_t packed_pixels = 0; \
- int x; \
- for (x = 0; x < width; ++x) { \
- /* We need to load fresh 'packed_pixels' once every */ \
- /* 'pixels_per_byte' increments of x. Fortunately, pixels_per_byte */ \
- /* is a power of 2, so can just use a mask for that, instead of */ \
- /* decrementing a counter. */ \
- if ((x & count_mask) == 0) packed_pixels = GET_INDEX(*src++); \
- *dst++ = GET_VALUE(color_map[packed_pixels & bit_mask]); \
- packed_pixels >>= bits_per_pixel; \
- } \
- } \
- } else { \
- for (y = y_start; y < y_end; ++y) { \
- int x; \
- for (x = 0; x < width; ++x) { \
- *dst++ = GET_VALUE(color_map[GET_INDEX(*src++)]); \
- } \
- } \
- } \
-}
-
-static WEBP_INLINE uint32_t GetARGBIndex(uint32_t idx) {
- return (idx >> 8) & 0xff;
-}
-
-static WEBP_INLINE uint8_t GetAlphaIndex(uint8_t idx) {
- return idx;
-}
-
-static WEBP_INLINE uint32_t GetARGBValue(uint32_t val) {
- return val;
-}
-
-static WEBP_INLINE uint8_t GetAlphaValue(uint32_t val) {
- return (val >> 8) & 0xff;
+static void ColorIndexInverseTransform(
+ const VP8LTransform* const transform,
+ int y_start, int y_end, const uint32_t* src, uint32_t* dst) {
+ int y;
+ const int bits_per_pixel = 8 >> transform->bits_;
+ const int width = transform->xsize_;
+ const uint32_t* const color_map = transform->data_;
+ if (bits_per_pixel < 8) {
+ const int pixels_per_byte = 1 << transform->bits_;
+ const int count_mask = pixels_per_byte - 1;
+ const uint32_t bit_mask = (1 << bits_per_pixel) - 1;
+ for (y = y_start; y < y_end; ++y) {
+ uint32_t packed_pixels = 0;
+ int x;
+ for (x = 0; x < width; ++x) {
+ // We need to load fresh 'packed_pixels' once every 'pixels_per_byte'
+ // increments of x. Fortunately, pixels_per_byte is a power of 2, so
+ // can just use a mask for that, instead of decrementing a counter.
+ if ((x & count_mask) == 0) packed_pixels = ((*src++) >> 8) & 0xff;
+ *dst++ = color_map[packed_pixels & bit_mask];
+ packed_pixels >>= bits_per_pixel;
+ }
+ }
+ } else {
+ for (y = y_start; y < y_end; ++y) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ *dst++ = color_map[((*src++) >> 8) & 0xff];
+ }
+ }
+ }
}
-static COLOR_INDEX_INVERSE(ColorIndexInverseTransform, uint32_t, GetARGBIndex,
- GetARGBValue)
-COLOR_INDEX_INVERSE(VP8LColorIndexInverseTransformAlpha, uint8_t, GetAlphaIndex,
- GetAlphaValue)
-
-#undef COLOR_INDEX_INVERSE
-
void VP8LInverseTransform(const VP8LTransform* const transform,
int row_start, int row_end,
const uint32_t* const in, uint32_t* const out) {
- const int width = transform->xsize_;
assert(row_start < row_end);
assert(row_end <= transform->ysize_);
switch (transform->type_) {
case SUBTRACT_GREEN:
- VP8LAddGreenToBlueAndRed(out, out + (row_end - row_start) * width);
+ AddGreenToBlueAndRed(transform, row_start, row_end, out);
break;
case PREDICTOR_TRANSFORM:
PredictorInverseTransform(transform, row_start, row_end, out);
if (row_end != transform->ysize_) {
// The last predicted row in this iteration will be the top-pred row
// for the first row in next iteration.
+ const int width = transform->xsize_;
memcpy(out - width, out + (row_end - row_start - 1) * width,
width * sizeof(*out));
}
@@ -1194,7 +982,7 @@ void VP8LInverseTransform(const VP8LTransform* const transform,
// Also, note that this is the only transform that applies on
// the effective width of VP8LSubSampleSize(xsize_, bits_). All other
// transforms work on effective width of xsize_.
- const int out_stride = (row_end - row_start) * width;
+ const int out_stride = (row_end - row_start) * transform->xsize_;
const int in_stride = (row_end - row_start) *
VP8LSubSampleSize(transform->xsize_, transform->bits_);
uint32_t* const src = out + out_stride - in_stride;
@@ -1246,15 +1034,8 @@ static void ConvertBGRAToRGBA4444(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
- const uint8_t rg = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
- const uint8_t ba = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
-#ifdef WEBP_SWAP_16BIT_CSP
- *dst++ = ba;
- *dst++ = rg;
-#else
- *dst++ = rg;
- *dst++ = ba;
-#endif
+ *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf);
+ *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf);
}
}
@@ -1263,15 +1044,8 @@ static void ConvertBGRAToRGB565(const uint32_t* src,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
const uint32_t argb = *src++;
- const uint8_t rg = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
- const uint8_t gb = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
-#ifdef WEBP_SWAP_16BIT_CSP
- *dst++ = gb;
- *dst++ = rg;
-#else
- *dst++ = rg;
- *dst++ = gb;
-#endif
+ *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7);
+ *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f);
}
}
@@ -1292,34 +1066,20 @@ static void CopyOrSwap(const uint32_t* src, int num_pixels, uint8_t* dst,
const uint32_t* const src_end = src + num_pixels;
while (src < src_end) {
uint32_t argb = *src++;
-
-#if !defined(__BIG_ENDIAN__)
-#if !defined(WEBP_REFERENCE_IMPLEMENTATION)
-#if defined(__i386__) || defined(__x86_64__)
+#if !defined(__BIG_ENDIAN__) && (defined(__i386__) || defined(__x86_64__))
__asm__ volatile("bswap %0" : "=r"(argb) : "0"(argb));
*(uint32_t*)dst = argb;
-#elif defined(_MSC_VER)
+ dst += sizeof(argb);
+#elif !defined(__BIG_ENDIAN__) && defined(_MSC_VER)
argb = _byteswap_ulong(argb);
*(uint32_t*)dst = argb;
+ dst += sizeof(argb);
#else
- dst[0] = (argb >> 24) & 0xff;
- dst[1] = (argb >> 16) & 0xff;
- dst[2] = (argb >> 8) & 0xff;
- dst[3] = (argb >> 0) & 0xff;
-#endif
-#else // WEBP_REFERENCE_IMPLEMENTATION
- dst[0] = (argb >> 24) & 0xff;
- dst[1] = (argb >> 16) & 0xff;
- dst[2] = (argb >> 8) & 0xff;
- dst[3] = (argb >> 0) & 0xff;
-#endif
-#else // __BIG_ENDIAN__
- dst[0] = (argb >> 0) & 0xff;
- dst[1] = (argb >> 8) & 0xff;
- dst[2] = (argb >> 16) & 0xff;
- dst[3] = (argb >> 24) & 0xff;
+ *dst++ = (argb >> 24) & 0xff;
+ *dst++ = (argb >> 16) & 0xff;
+ *dst++ = (argb >> 8) & 0xff;
+ *dst++ = (argb >> 0) & 0xff;
#endif
- dst += sizeof(argb);
}
} else {
memcpy(dst, src, num_pixels * sizeof(*src));
@@ -1371,162 +1131,8 @@ void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels,
}
}
-// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel.
-void VP8LBundleColorMap(const uint8_t* const row, int width,
- int xbits, uint32_t* const dst) {
- int x;
- if (xbits > 0) {
- const int bit_depth = 1 << (3 - xbits);
- const int mask = (1 << xbits) - 1;
- uint32_t code = 0xff000000;
- for (x = 0; x < width; ++x) {
- const int xsub = x & mask;
- if (xsub == 0) {
- code = 0xff000000;
- }
- code |= row[x] << (8 + bit_depth * xsub);
- dst[x >> xbits] = code;
- }
- } else {
- for (x = 0; x < width; ++x) dst[x] = 0xff000000 | (row[x] << 8);
- }
-}
-
-//------------------------------------------------------------------------------
-
-// TODO(vikasa): Move the SSE2 functions to lossless_dsp.c (new file), once
-// color-space conversion methods (ConvertFromBGRA) are also updated for SSE2.
-#if defined(WEBP_USE_SSE2)
-static WEBP_INLINE uint32_t ClampedAddSubtractFullSSE2(uint32_t c0, uint32_t c1,
- uint32_t c2) {
- const __m128i zero = _mm_setzero_si128();
- const __m128i C0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c0), zero);
- const __m128i C1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c1), zero);
- const __m128i C2 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
- const __m128i V1 = _mm_add_epi16(C0, C1);
- const __m128i V2 = _mm_sub_epi16(V1, C2);
- const __m128i b = _mm_packus_epi16(V2, V2);
- const uint32_t output = _mm_cvtsi128_si32(b);
- return output;
-}
-
-static WEBP_INLINE uint32_t ClampedAddSubtractHalfSSE2(uint32_t c0, uint32_t c1,
- uint32_t c2) {
- const uint32_t ave = Average2(c0, c1);
- const __m128i zero = _mm_setzero_si128();
- const __m128i A0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(ave), zero);
- const __m128i B0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero);
- const __m128i A1 = _mm_sub_epi16(A0, B0);
- const __m128i BgtA = _mm_cmpgt_epi16(B0, A0);
- const __m128i A2 = _mm_sub_epi16(A1, BgtA);
- const __m128i A3 = _mm_srai_epi16(A2, 1);
- const __m128i A4 = _mm_add_epi16(A0, A3);
- const __m128i A5 = _mm_packus_epi16(A4, A4);
- const uint32_t output = _mm_cvtsi128_si32(A5);
- return output;
-}
-
-static WEBP_INLINE uint32_t SelectSSE2(uint32_t a, uint32_t b, uint32_t c) {
- int pa_minus_pb;
- const __m128i zero = _mm_setzero_si128();
- const __m128i A0 = _mm_cvtsi32_si128(a);
- const __m128i B0 = _mm_cvtsi32_si128(b);
- const __m128i C0 = _mm_cvtsi32_si128(c);
- const __m128i AC0 = _mm_subs_epu8(A0, C0);
- const __m128i CA0 = _mm_subs_epu8(C0, A0);
- const __m128i BC0 = _mm_subs_epu8(B0, C0);
- const __m128i CB0 = _mm_subs_epu8(C0, B0);
- const __m128i AC = _mm_or_si128(AC0, CA0);
- const __m128i BC = _mm_or_si128(BC0, CB0);
- const __m128i pa = _mm_unpacklo_epi8(AC, zero); // |a - c|
- const __m128i pb = _mm_unpacklo_epi8(BC, zero); // |b - c|
- const __m128i diff = _mm_sub_epi16(pb, pa);
- {
- int16_t out[8];
- _mm_storeu_si128((__m128i*)out, diff);
- pa_minus_pb = out[0] + out[1] + out[2] + out[3];
- }
- return (pa_minus_pb <= 0) ? a : b;
-}
-
-static void SubtractGreenFromBlueAndRedSSE2(uint32_t* argb_data, int num_pixs) {
- int i = 0;
- const __m128i mask = _mm_set1_epi32(0x0000ff00);
- for (; i + 4 < num_pixs; i += 4) {
- const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]);
- const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
- const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
- const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
- const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
- const __m128i out = _mm_sub_epi8(in, in_0g0g);
- _mm_storeu_si128((__m128i*)&argb_data[i], out);
- }
- // fallthrough and finish off with plain-C
- for (; i < num_pixs; ++i) {
- const uint32_t argb = argb_data[i];
- const uint32_t green = (argb >> 8) & 0xff;
- const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff;
- const uint32_t new_b = ((argb & 0xff) - green) & 0xff;
- argb_data[i] = (argb & 0xff00ff00) | (new_r << 16) | new_b;
- }
-}
-
-static void AddGreenToBlueAndRedSSE2(uint32_t* data, const uint32_t* data_end) {
- const __m128i mask = _mm_set1_epi32(0x0000ff00);
- for (; data + 4 < data_end; data += 4) {
- const __m128i in = _mm_loadu_si128((__m128i*)data);
- const __m128i in_00g0 = _mm_and_si128(in, mask); // 00g0|00g0|...
- const __m128i in_0g00 = _mm_slli_epi32(in_00g0, 8); // 0g00|0g00|...
- const __m128i in_000g = _mm_srli_epi32(in_00g0, 8); // 000g|000g|...
- const __m128i in_0g0g = _mm_or_si128(in_0g00, in_000g);
- const __m128i out = _mm_add_epi8(in, in_0g0g);
- _mm_storeu_si128((__m128i*)data, out);
- }
- // fallthrough and finish off with plain-C
- while (data < data_end) {
- const uint32_t argb = *data;
- const uint32_t green = ((argb >> 8) & 0xff);
- uint32_t red_blue = (argb & 0x00ff00ffu);
- red_blue += (green << 16) | green;
- red_blue &= 0x00ff00ffu;
- *data++ = (argb & 0xff00ff00u) | red_blue;
- }
-}
-
-extern void VP8LDspInitSSE2(void);
-
-void VP8LDspInitSSE2(void) {
- VP8LClampedAddSubtractFull = ClampedAddSubtractFullSSE2;
- VP8LClampedAddSubtractHalf = ClampedAddSubtractHalfSSE2;
- VP8LSelect = SelectSSE2;
- VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRedSSE2;
- VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRedSSE2;
-}
-#endif
//------------------------------------------------------------------------------
-VP8LPredClampedAddSubFunc VP8LClampedAddSubtractFull;
-VP8LPredClampedAddSubFunc VP8LClampedAddSubtractHalf;
-VP8LPredSelectFunc VP8LSelect;
-VP8LSubtractGreenFromBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed;
-VP8LAddGreenToBlueAndRedFunc VP8LAddGreenToBlueAndRed;
-
-void VP8LDspInit(void) {
- VP8LClampedAddSubtractFull = ClampedAddSubtractFull;
- VP8LClampedAddSubtractHalf = ClampedAddSubtractHalf;
- VP8LSelect = Select;
- VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed;
- VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed;
-
- // If defined, use CPUInfo() to overwrite some pointers with faster versions.
- if (VP8GetCPUInfo != NULL) {
-#if defined(WEBP_USE_SSE2)
- if (VP8GetCPUInfo(kSSE2)) {
- VP8LDspInitSSE2();
- }
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
#endif
- }
-}
-
-//------------------------------------------------------------------------------
-
diff --git a/drivers/webp/dsp/lossless.h b/drivers/webp/dsp/lossless.h
index 0f1d44200b..992516fcdf 100644
--- a/drivers/webp/dsp/lossless.h
+++ b/drivers/webp/dsp/lossless.h
@@ -1,10 +1,8 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// Image transforms and color space conversion methods for lossless decoder.
@@ -18,31 +16,11 @@
#include "../webp/types.h"
#include "../webp/decode.h"
-#ifdef __cplusplus
+#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
//------------------------------------------------------------------------------
-//
-
-typedef uint32_t (*VP8LPredClampedAddSubFunc)(uint32_t c0, uint32_t c1,
- uint32_t c2);
-typedef uint32_t (*VP8LPredSelectFunc)(uint32_t c0, uint32_t c1, uint32_t c2);
-typedef void (*VP8LSubtractGreenFromBlueAndRedFunc)(uint32_t* argb_data,
- int num_pixs);
-typedef void (*VP8LAddGreenToBlueAndRedFunc)(uint32_t* data_start,
- const uint32_t* data_end);
-
-extern VP8LPredClampedAddSubFunc VP8LClampedAddSubtractFull;
-extern VP8LPredClampedAddSubFunc VP8LClampedAddSubtractHalf;
-extern VP8LPredSelectFunc VP8LSelect;
-extern VP8LSubtractGreenFromBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed;
-extern VP8LAddGreenToBlueAndRedFunc VP8LAddGreenToBlueAndRed;
-
-// Must be called before calling any of the above methods.
-void VP8LDspInit(void);
-
-//------------------------------------------------------------------------------
// Image transforms.
struct VP8LTransform; // Defined in dec/vp8li.h.
@@ -55,12 +33,8 @@ void VP8LInverseTransform(const struct VP8LTransform* const transform,
int row_start, int row_end,
const uint32_t* const in, uint32_t* const out);
-// Similar to the static method ColorIndexInverseTransform() that is part of
-// lossless.c, but used only for alpha decoding. It takes uint8_t (rather than
-// uint32_t) arguments for 'src' and 'dst'.
-void VP8LColorIndexInverseTransformAlpha(
- const struct VP8LTransform* const transform, int y_start, int y_end,
- const uint8_t* src, uint8_t* dst);
+// Subtracts green from blue and red channels.
+void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs);
void VP8LResidualImage(int width, int height, int bits,
uint32_t* const argb, uint32_t* const argb_scratch,
@@ -85,119 +59,10 @@ static WEBP_INLINE uint32_t VP8LSubSampleSize(uint32_t size,
return (size + (1 << sampling_bits) - 1) >> sampling_bits;
}
-// Faster logarithm for integers. Small values use a look-up table.
-#define LOG_LOOKUP_IDX_MAX 256
-extern const float kLog2Table[LOG_LOOKUP_IDX_MAX];
-extern const float kSLog2Table[LOG_LOOKUP_IDX_MAX];
-float VP8LFastLog2Slow(int v);
-float VP8LFastSLog2Slow(int v);
-static WEBP_INLINE float VP8LFastLog2(int v) {
- return (v < LOG_LOOKUP_IDX_MAX) ? kLog2Table[v] : VP8LFastLog2Slow(v);
-}
+// Faster logarithm for integers, with the property of log2(0) == 0.
+float VP8LFastLog2(int v);
// Fast calculation of v * log2(v) for integer input.
-static WEBP_INLINE float VP8LFastSLog2(int v) {
- return (v < LOG_LOOKUP_IDX_MAX) ? kSLog2Table[v] : VP8LFastSLog2Slow(v);
-}
-
-// -----------------------------------------------------------------------------
-// PrefixEncode()
-
-// use GNU builtins where available.
-#if defined(__GNUC__) && \
- ((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4)
-static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
- return 31 ^ __builtin_clz(n);
-}
-#elif defined(_MSC_VER) && _MSC_VER > 1310 && \
- (defined(_M_X64) || defined(_M_IX86))
-#include <intrin.h>
-#pragma intrinsic(_BitScanReverse)
-
-static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
- unsigned long first_set_bit;
- _BitScanReverse(&first_set_bit, n);
- return first_set_bit;
-}
-#else
-// Returns (int)floor(log2(n)). n must be > 0.
-static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
- int log = 0;
- uint32_t value = n;
- int i;
-
- for (i = 4; i >= 0; --i) {
- const int shift = (1 << i);
- const uint32_t x = value >> shift;
- if (x != 0) {
- value = x;
- log += shift;
- }
- }
- return log;
-}
-#endif
-
-static WEBP_INLINE int VP8LBitsLog2Ceiling(uint32_t n) {
- const int log_floor = BitsLog2Floor(n);
- if (n == (n & ~(n - 1))) // zero or a power of two.
- return log_floor;
- else
- return log_floor + 1;
-}
-
-// Splitting of distance and length codes into prefixes and
-// extra bits. The prefixes are encoded with an entropy code
-// while the extra bits are stored just as normal bits.
-static WEBP_INLINE void VP8LPrefixEncodeBitsNoLUT(int distance, int* const code,
- int* const extra_bits) {
- const int highest_bit = BitsLog2Floor(--distance);
- const int second_highest_bit = (distance >> (highest_bit - 1)) & 1;
- *extra_bits = highest_bit - 1;
- *code = 2 * highest_bit + second_highest_bit;
-}
-
-static WEBP_INLINE void VP8LPrefixEncodeNoLUT(int distance, int* const code,
- int* const extra_bits,
- int* const extra_bits_value) {
- const int highest_bit = BitsLog2Floor(--distance);
- const int second_highest_bit = (distance >> (highest_bit - 1)) & 1;
- *extra_bits = highest_bit - 1;
- *extra_bits_value = distance & ((1 << *extra_bits) - 1);
- *code = 2 * highest_bit + second_highest_bit;
-}
-
-#define PREFIX_LOOKUP_IDX_MAX 512
-typedef struct {
- int8_t code_;
- int8_t extra_bits_;
-} VP8LPrefixCode;
-
-// These tables are derived using VP8LPrefixEncodeNoLUT.
-extern const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX];
-extern const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX];
-static WEBP_INLINE void VP8LPrefixEncodeBits(int distance, int* const code,
- int* const extra_bits) {
- if (distance < PREFIX_LOOKUP_IDX_MAX) {
- const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance];
- *code = prefix_code.code_;
- *extra_bits = prefix_code.extra_bits_;
- } else {
- VP8LPrefixEncodeBitsNoLUT(distance, code, extra_bits);
- }
-}
-
-static WEBP_INLINE void VP8LPrefixEncode(int distance, int* const code,
- int* const extra_bits,
- int* const extra_bits_value) {
- if (distance < PREFIX_LOOKUP_IDX_MAX) {
- const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance];
- *code = prefix_code.code_;
- *extra_bits = prefix_code.extra_bits_;
- *extra_bits_value = kPrefixEncodeExtraBitsValue[distance];
- } else {
- VP8LPrefixEncodeNoLUT(distance, code, extra_bits, extra_bits_value);
- }
-}
+static WEBP_INLINE float VP8LFastSLog2(int v) { return VP8LFastLog2(v) * v; }
// In-place difference of each component with mod 256.
static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) {
@@ -208,12 +73,9 @@ static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) {
return (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu);
}
-void VP8LBundleColorMap(const uint8_t* const row, int width,
- int xbits, uint32_t* const dst);
-
//------------------------------------------------------------------------------
-#ifdef __cplusplus
+#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif
diff --git a/drivers/webp/dsp/upsampling.c b/drivers/webp/dsp/upsampling.c
index 978e3ce250..4855eb1432 100644
--- a/drivers/webp/dsp/upsampling.c
+++ b/drivers/webp/dsp/upsampling.c
@@ -1,10 +1,8 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// YUV to RGB upsampling functions.
@@ -14,7 +12,9 @@
#include "./dsp.h"
#include "./yuv.h"
-#include <assert.h>
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
//------------------------------------------------------------------------------
// Fancy upsampler
@@ -32,7 +32,7 @@ WebPUpsampleLinePairFunc WebPUpsamplers[MODE_LAST];
// ([3*a + b + 9*c + 3*d a + 3*b + 3*c + 9*d] [8 8]) / 16
// We process u and v together stashed into 32bit (16bit each).
-#define LOAD_UV(u, v) ((u) | ((v) << 16))
+#define LOAD_UV(u,v) ((u) | ((v) << 16))
#define UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
@@ -43,12 +43,11 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const int last_pixel_pair = (len - 1) >> 1; \
uint32_t tl_uv = LOAD_UV(top_u[0], top_v[0]); /* top-left sample */ \
uint32_t l_uv = LOAD_UV(cur_u[0], cur_v[0]); /* left-sample */ \
- assert(top_y != NULL); \
- { \
+ if (top_y) { \
const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \
FUNC(top_y[0], uv0 & 0xff, (uv0 >> 16), top_dst); \
} \
- if (bottom_y != NULL) { \
+ if (bottom_y) { \
const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \
FUNC(bottom_y[0], uv0 & 0xff, (uv0 >> 16), bottom_dst); \
} \
@@ -59,7 +58,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const uint32_t avg = tl_uv + t_uv + l_uv + uv + 0x00080008u; \
const uint32_t diag_12 = (avg + 2 * (t_uv + l_uv)) >> 3; \
const uint32_t diag_03 = (avg + 2 * (tl_uv + uv)) >> 3; \
- { \
+ if (top_y) { \
const uint32_t uv0 = (diag_12 + tl_uv) >> 1; \
const uint32_t uv1 = (diag_03 + t_uv) >> 1; \
FUNC(top_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \
@@ -67,7 +66,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
FUNC(top_y[2 * x - 0], uv1 & 0xff, (uv1 >> 16), \
top_dst + (2 * x - 0) * XSTEP); \
} \
- if (bottom_y != NULL) { \
+ if (bottom_y) { \
const uint32_t uv0 = (diag_03 + l_uv) >> 1; \
const uint32_t uv1 = (diag_12 + uv) >> 1; \
FUNC(bottom_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \
@@ -79,12 +78,12 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
l_uv = uv; \
} \
if (!(len & 1)) { \
- { \
+ if (top_y) { \
const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \
FUNC(top_y[len - 1], uv0 & 0xff, (uv0 >> 16), \
top_dst + (len - 1) * XSTEP); \
} \
- if (bottom_y != NULL) { \
+ if (bottom_y) { \
const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \
FUNC(bottom_y[len - 1], uv0 & 0xff, (uv0 >> 16), \
bottom_dst + (len - 1) * XSTEP); \
@@ -167,8 +166,7 @@ static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bot_y, \
uint8_t* top_dst, uint8_t* bot_dst, int len) { \
const int half_len = len >> 1; \
int x; \
- assert(top_dst != NULL); \
- { \
+ if (top_dst != NULL) { \
for (x = 0; x < half_len; ++x) { \
FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x + 0); \
FUNC(top_y[2 * x + 1], top_u[x], top_v[x], top_dst + 8 * x + 4); \
@@ -330,11 +328,6 @@ void WebPInitUpsamplers(void) {
WebPInitUpsamplersSSE2();
}
#endif
-#if defined(WEBP_USE_NEON)
- if (VP8GetCPUInfo(kNEON)) {
- WebPInitUpsamplersNEON();
- }
-#endif
}
#endif // FANCY_UPSAMPLING
}
@@ -355,12 +348,10 @@ void WebPInitPremultiply(void) {
WebPInitPremultiplySSE2();
}
#endif
-#if defined(WEBP_USE_NEON)
- if (VP8GetCPUInfo(kNEON)) {
- WebPInitPremultiplyNEON();
- }
-#endif
}
#endif // FANCY_UPSAMPLING
}
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/drivers/webp/dsp/upsampling_neon.c b/drivers/webp/dsp/upsampling_neon.c
deleted file mode 100644
index 791222f81e..0000000000
--- a/drivers/webp/dsp/upsampling_neon.c
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
-//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
-// -----------------------------------------------------------------------------
-//
-// NEON version of YUV to RGB upsampling functions.
-//
-// Author: mans@mansr.com (Mans Rullgard)
-// Based on SSE code by: somnath@google.com (Somnath Banerjee)
-
-#include "./dsp.h"
-
-#if defined(WEBP_USE_NEON)
-
-#include <assert.h>
-#include <arm_neon.h>
-#include <string.h>
-#include "./yuv.h"
-
-#ifdef FANCY_UPSAMPLING
-
-//-----------------------------------------------------------------------------
-// U/V upsampling
-
-// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels.
-#define UPSAMPLE_16PIXELS(r1, r2, out) { \
- uint8x8_t a = vld1_u8(r1); \
- uint8x8_t b = vld1_u8(r1 + 1); \
- uint8x8_t c = vld1_u8(r2); \
- uint8x8_t d = vld1_u8(r2 + 1); \
- \
- uint16x8_t al = vshll_n_u8(a, 1); \
- uint16x8_t bl = vshll_n_u8(b, 1); \
- uint16x8_t cl = vshll_n_u8(c, 1); \
- uint16x8_t dl = vshll_n_u8(d, 1); \
- \
- uint8x8_t diag1, diag2; \
- uint16x8_t sl; \
- \
- /* a + b + c + d */ \
- sl = vaddl_u8(a, b); \
- sl = vaddw_u8(sl, c); \
- sl = vaddw_u8(sl, d); \
- \
- al = vaddq_u16(sl, al); /* 3a + b + c + d */ \
- bl = vaddq_u16(sl, bl); /* a + 3b + c + d */ \
- \
- al = vaddq_u16(al, dl); /* 3a + b + c + 3d */ \
- bl = vaddq_u16(bl, cl); /* a + 3b + 3c + d */ \
- \
- diag2 = vshrn_n_u16(al, 3); \
- diag1 = vshrn_n_u16(bl, 3); \
- \
- a = vrhadd_u8(a, diag1); \
- b = vrhadd_u8(b, diag2); \
- c = vrhadd_u8(c, diag2); \
- d = vrhadd_u8(d, diag1); \
- \
- { \
- const uint8x8x2_t a_b = {{ a, b }}; \
- const uint8x8x2_t c_d = {{ c, d }}; \
- vst2_u8(out, a_b); \
- vst2_u8(out + 32, c_d); \
- } \
-}
-
-// Turn the macro into a function for reducing code-size when non-critical
-static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2,
- uint8_t *out) {
- UPSAMPLE_16PIXELS(r1, r2, out);
-}
-
-#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \
- uint8_t r1[9], r2[9]; \
- memcpy(r1, (tb), (num_pixels)); \
- memcpy(r2, (bb), (num_pixels)); \
- /* replicate last byte */ \
- memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \
- memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \
- Upsample16Pixels(r1, r2, out); \
-}
-
-//-----------------------------------------------------------------------------
-// YUV->RGB conversion
-
-static const int16_t kCoeffs[4] = { kYScale, kVToR, kUToG, kVToG };
-
-#define v255 vmov_n_u8(255)
-
-#define STORE_Rgb(out, r, g, b) do { \
- const uint8x8x3_t r_g_b = {{ r, g, b }}; \
- vst3_u8(out, r_g_b); \
-} while (0)
-
-#define STORE_Bgr(out, r, g, b) do { \
- const uint8x8x3_t b_g_r = {{ b, g, r }}; \
- vst3_u8(out, b_g_r); \
-} while (0)
-
-#define STORE_Rgba(out, r, g, b) do { \
- const uint8x8x4_t r_g_b_v255 = {{ r, g, b, v255 }}; \
- vst4_u8(out, r_g_b_v255); \
-} while (0)
-
-#define STORE_Bgra(out, r, g, b) do { \
- const uint8x8x4_t b_g_r_v255 = {{ b, g, r, v255 }}; \
- vst4_u8(out, b_g_r_v255); \
-} while (0)
-
-#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) { \
- int i; \
- for (i = 0; i < N; i += 8) { \
- const int off = ((cur_x) + i) * XSTEP; \
- uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \
- uint8x8_t u = vld1_u8((src_uv) + i); \
- uint8x8_t v = vld1_u8((src_uv) + i + 16); \
- const int16x8_t yy = vreinterpretq_s16_u16(vsubl_u8(y, u16)); \
- const int16x8_t uu = vreinterpretq_s16_u16(vsubl_u8(u, u128)); \
- const int16x8_t vv = vreinterpretq_s16_u16(vsubl_u8(v, u128)); \
- int32x4_t yl = vmull_lane_s16(vget_low_s16(yy), cf16, 0); \
- int32x4_t yh = vmull_lane_s16(vget_high_s16(yy), cf16, 0); \
- const int32x4_t rl = vmlal_lane_s16(yl, vget_low_s16(vv), cf16, 1);\
- const int32x4_t rh = vmlal_lane_s16(yh, vget_high_s16(vv), cf16, 1);\
- int32x4_t gl = vmlsl_lane_s16(yl, vget_low_s16(uu), cf16, 2); \
- int32x4_t gh = vmlsl_lane_s16(yh, vget_high_s16(uu), cf16, 2); \
- const int32x4_t bl = vmovl_s16(vget_low_s16(uu)); \
- const int32x4_t bh = vmovl_s16(vget_high_s16(uu)); \
- gl = vmlsl_lane_s16(gl, vget_low_s16(vv), cf16, 3); \
- gh = vmlsl_lane_s16(gh, vget_high_s16(vv), cf16, 3); \
- yl = vmlaq_lane_s32(yl, bl, cf32, 0); \
- yh = vmlaq_lane_s32(yh, bh, cf32, 0); \
- /* vrshrn_n_s32() already incorporates the rounding constant */ \
- y = vqmovun_s16(vcombine_s16(vrshrn_n_s32(rl, YUV_FIX2), \
- vrshrn_n_s32(rh, YUV_FIX2))); \
- u = vqmovun_s16(vcombine_s16(vrshrn_n_s32(gl, YUV_FIX2), \
- vrshrn_n_s32(gh, YUV_FIX2))); \
- v = vqmovun_s16(vcombine_s16(vrshrn_n_s32(yl, YUV_FIX2), \
- vrshrn_n_s32(yh, YUV_FIX2))); \
- STORE_ ## FMT(out + off, y, u, v); \
- } \
-}
-
-#define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \
- int i; \
- for (i = 0; i < N; i++) { \
- const int off = ((cur_x) + i) * XSTEP; \
- const int y = src_y[(cur_x) + i]; \
- const int u = (src_uv)[i]; \
- const int v = (src_uv)[i + 16]; \
- FUNC(y, u, v, rgb + off); \
- } \
-}
-
-#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \
- top_dst, bottom_dst, cur_x, len) { \
- CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x) \
- if (bottom_y != NULL) { \
- CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x) \
- } \
-}
-
-#define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \
- top_dst, bottom_dst, cur_x, len) { \
- CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \
- if (bottom_y != NULL) { \
- CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \
- } \
-}
-
-#define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \
-static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \
- const uint8_t *top_u, const uint8_t *top_v, \
- const uint8_t *cur_u, const uint8_t *cur_v, \
- uint8_t *top_dst, uint8_t *bottom_dst, int len) { \
- int block; \
- /* 16 byte aligned array to cache reconstructed u and v */ \
- uint8_t uv_buf[2 * 32 + 15]; \
- uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
- const int uv_len = (len + 1) >> 1; \
- /* 9 pixels must be read-able for each block */ \
- const int num_blocks = (uv_len - 1) >> 3; \
- const int leftover = uv_len - num_blocks * 8; \
- const int last_pos = 1 + 16 * num_blocks; \
- \
- const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
- const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
- \
- const int16x4_t cf16 = vld1_s16(kCoeffs); \
- const int32x2_t cf32 = vmov_n_s32(kUToB); \
- const uint8x8_t u16 = vmov_n_u8(16); \
- const uint8x8_t u128 = vmov_n_u8(128); \
- \
- /* Treat the first pixel in regular way */ \
- assert(top_y != NULL); \
- { \
- const int u0 = (top_u[0] + u_diag) >> 1; \
- const int v0 = (top_v[0] + v_diag) >> 1; \
- VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \
- } \
- if (bottom_y != NULL) { \
- const int u0 = (cur_u[0] + u_diag) >> 1; \
- const int v0 = (cur_v[0] + v_diag) >> 1; \
- VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \
- } \
- \
- for (block = 0; block < num_blocks; ++block) { \
- UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \
- UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \
- CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \
- top_dst, bottom_dst, 16 * block + 1, 16); \
- top_u += 8; \
- cur_u += 8; \
- top_v += 8; \
- cur_v += 8; \
- } \
- \
- UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \
- UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \
- CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \
- top_dst, bottom_dst, last_pos, len - last_pos); \
-}
-
-// NEON variants of the fancy upsampler.
-NEON_UPSAMPLE_FUNC(UpsampleRgbLinePairNEON, Rgb, 3)
-NEON_UPSAMPLE_FUNC(UpsampleBgrLinePairNEON, Bgr, 3)
-NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePairNEON, Rgba, 4)
-NEON_UPSAMPLE_FUNC(UpsampleBgraLinePairNEON, Bgra, 4)
-
-#endif // FANCY_UPSAMPLING
-
-#endif // WEBP_USE_NEON
-
-//------------------------------------------------------------------------------
-
-#ifdef FANCY_UPSAMPLING
-
-extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
-
-void WebPInitUpsamplersNEON(void) {
-#if defined(WEBP_USE_NEON)
- WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairNEON;
- WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairNEON;
- WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairNEON;
- WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairNEON;
-#endif // WEBP_USE_NEON
-}
-
-void WebPInitPremultiplyNEON(void) {
-#if defined(WEBP_USE_NEON)
- WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairNEON;
- WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairNEON;
-#endif // WEBP_USE_NEON
-}
-
-#else
-
-// this empty function is to avoid an empty .o
-void WebPInitPremultiplyNEON(void) {}
-
-#endif // FANCY_UPSAMPLING
-
diff --git a/drivers/webp/dsp/upsampling_sse2.c b/drivers/webp/dsp/upsampling_sse2.c
index 0db0798c6d..8cb275a02b 100644
--- a/drivers/webp/dsp/upsampling_sse2.c
+++ b/drivers/webp/dsp/upsampling_sse2.c
@@ -1,10 +1,8 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// SSE2 version of YUV to RGB upsampling functions.
@@ -20,6 +18,10 @@
#include <string.h>
#include "./yuv.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#ifdef FANCY_UPSAMPLING
// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows
@@ -47,14 +49,14 @@
(out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \
} while (0)
-// pack and store two alternating pixel rows
+// pack and store two alterning pixel rows
#define PACK_AND_STORE(a, b, da, db, out) do { \
- const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
- const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
- const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \
- const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \
- _mm_store_si128(((__m128i*)(out)) + 0, t_1); \
- _mm_store_si128(((__m128i*)(out)) + 1, t_2); \
+ const __m128i ta = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \
+ const __m128i tb = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \
+ const __m128i t1 = _mm_unpacklo_epi8(ta, tb); \
+ const __m128i t2 = _mm_unpackhi_epi8(ta, tb); \
+ _mm_store_si128(((__m128i*)(out)) + 0, t1); \
+ _mm_store_si128(((__m128i*)(out)) + 1, t2); \
} while (0)
// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels.
@@ -83,8 +85,8 @@
GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \
\
/* pack the alternate pixels */ \
- PACK_AND_STORE(a, b, diag1, diag2, out + 0); /* store top */ \
- PACK_AND_STORE(c, d, diag2, diag1, out + 2 * 32); /* store bottom */ \
+ PACK_AND_STORE(a, b, diag1, diag2, &(out)[0 * 32]); \
+ PACK_AND_STORE(c, d, diag2, diag1, &(out)[2 * 32]); \
}
// Turn the macro into a function for reducing code-size when non-critical
@@ -104,68 +106,69 @@ static void Upsample32Pixels(const uint8_t r1[], const uint8_t r2[],
Upsample32Pixels(r1, r2, out); \
}
-#define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, \
+#define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, uv, \
top_dst, bottom_dst, cur_x, num_pixels) { \
int n; \
- for (n = 0; n < (num_pixels); ++n) { \
- FUNC(top_y[(cur_x) + n], r_u[n], r_v[n], \
- top_dst + ((cur_x) + n) * XSTEP); \
+ if (top_y) { \
+ for (n = 0; n < (num_pixels); ++n) { \
+ FUNC(top_y[(cur_x) + n], (uv)[n], (uv)[32 + n], \
+ top_dst + ((cur_x) + n) * XSTEP); \
+ } \
} \
- if (bottom_y != NULL) { \
+ if (bottom_y) { \
for (n = 0; n < (num_pixels); ++n) { \
- FUNC(bottom_y[(cur_x) + n], r_u[64 + n], r_v[64 + n], \
+ FUNC(bottom_y[(cur_x) + n], (uv)[64 + n], (uv)[64 + 32 + n], \
bottom_dst + ((cur_x) + n) * XSTEP); \
} \
} \
}
-#define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \
- top_dst, bottom_dst, cur_x) do { \
- FUNC##32(top_y + (cur_x), r_u, r_v, top_dst + (cur_x) * XSTEP); \
- if (bottom_y != NULL) { \
- FUNC##32(bottom_y + (cur_x), r_u + 64, r_v + 64, \
- bottom_dst + (cur_x) * XSTEP); \
- } \
-} while (0)
-
#define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \
static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \
const uint8_t* top_u, const uint8_t* top_v, \
const uint8_t* cur_u, const uint8_t* cur_v, \
uint8_t* top_dst, uint8_t* bottom_dst, int len) { \
- int uv_pos, pos; \
- /* 16byte-aligned array to cache reconstructed u and v */ \
+ int b; \
+ /* 16 byte aligned array to cache reconstructed u and v */ \
uint8_t uv_buf[4 * 32 + 15]; \
- uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
- uint8_t* const r_v = r_u + 32; \
+ uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \
+ const int uv_len = (len + 1) >> 1; \
+ /* 17 pixels must be read-able for each block */ \
+ const int num_blocks = (uv_len - 1) >> 4; \
+ const int leftover = uv_len - num_blocks * 16; \
+ const int last_pos = 1 + 32 * num_blocks; \
\
- assert(top_y != NULL); \
- { /* Treat the first pixel in regular way */ \
- const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
- const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
- const int u0_t = (top_u[0] + u_diag) >> 1; \
- const int v0_t = (top_v[0] + v_diag) >> 1; \
- FUNC(top_y[0], u0_t, v0_t, top_dst); \
- if (bottom_y != NULL) { \
- const int u0_b = (cur_u[0] + u_diag) >> 1; \
- const int v0_b = (cur_v[0] + v_diag) >> 1; \
- FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \
- } \
+ const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \
+ const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \
+ \
+ assert(len > 0); \
+ /* Treat the first pixel in regular way */ \
+ if (top_y) { \
+ const int u0 = (top_u[0] + u_diag) >> 1; \
+ const int v0 = (top_v[0] + v_diag) >> 1; \
+ FUNC(top_y[0], u0, v0, top_dst); \
} \
- /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \
- for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \
- UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \
- UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \
- CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \
+ if (bottom_y) { \
+ const int u0 = (cur_u[0] + u_diag) >> 1; \
+ const int v0 = (cur_v[0] + v_diag) >> 1; \
+ FUNC(bottom_y[0], u0, v0, bottom_dst); \
} \
- if (len > 1) { \
- const int left_over = ((len + 1) >> 1) - (pos >> 1); \
- assert(left_over > 0); \
- UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \
- UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \
- CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, \
- pos, len - pos); \
+ \
+ for (b = 0; b < num_blocks; ++b) { \
+ UPSAMPLE_32PIXELS(top_u, cur_u, r_uv + 0 * 32); \
+ UPSAMPLE_32PIXELS(top_v, cur_v, r_uv + 1 * 32); \
+ CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \
+ 32 * b + 1, 32) \
+ top_u += 16; \
+ cur_u += 16; \
+ top_v += 16; \
+ cur_v += 16; \
} \
+ \
+ UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv + 0 * 32); \
+ UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 1 * 32); \
+ CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \
+ last_pos, len - last_pos); \
}
// SSE2 variants of the fancy upsampler.
@@ -179,40 +182,28 @@ SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4)
#undef UPSAMPLE_32PIXELS
#undef UPSAMPLE_LAST_BLOCK
#undef CONVERT2RGB
-#undef CONVERT2RGB_32
#undef SSE2_UPSAMPLE_FUNC
-#endif // FANCY_UPSAMPLING
-
-#endif // WEBP_USE_SSE2
-
//------------------------------------------------------------------------------
-#ifdef FANCY_UPSAMPLING
-
extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */];
void WebPInitUpsamplersSSE2(void) {
-#if defined(WEBP_USE_SSE2)
- VP8YUVInitSSE2();
WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairSSE2;
WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairSSE2;
WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairSSE2;
-#endif // WEBP_USE_SSE2
}
void WebPInitPremultiplySSE2(void) {
-#if defined(WEBP_USE_SSE2)
WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairSSE2;
WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairSSE2;
-#endif // WEBP_USE_SSE2
}
-#else
-
-// this empty function is to avoid an empty .o
-void WebPInitPremultiplySSE2(void) {}
-
#endif // FANCY_UPSAMPLING
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
+
+#endif // WEBP_USE_SSE2
diff --git a/drivers/webp/dsp/yuv.c b/drivers/webp/dsp/yuv.c
index 4f9cafc104..7f05f9a3aa 100644
--- a/drivers/webp/dsp/yuv.c
+++ b/drivers/webp/dsp/yuv.c
@@ -1,10 +1,8 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// YUV->RGB conversion function
@@ -13,8 +11,16 @@
#include "./yuv.h"
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
-#if defined(WEBP_YUV_USE_TABLE)
+enum { YUV_HALF = 1 << (YUV_FIX - 1) };
+
+int16_t VP8kVToR[256], VP8kUToB[256];
+int32_t VP8kVToG[256], VP8kUToG[256];
+uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
+uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
static int done = 0;
@@ -22,17 +28,11 @@ static WEBP_INLINE uint8_t clip(int v, int max_value) {
return v < 0 ? 0 : v > max_value ? max_value : v;
}
-int16_t VP8kVToR[256], VP8kUToB[256];
-int32_t VP8kVToG[256], VP8kUToG[256];
-uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
-uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
-
void VP8YUVInit(void) {
int i;
if (done) {
return;
}
-#ifndef USE_YUVj
for (i = 0; i < 256; ++i) {
VP8kVToR[i] = (89858 * (i - 128) + YUV_HALF) >> YUV_FIX;
VP8kUToG[i] = -22014 * (i - 128) + YUV_HALF;
@@ -44,164 +44,9 @@ void VP8YUVInit(void) {
VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
}
-#else
- for (i = 0; i < 256; ++i) {
- VP8kVToR[i] = (91881 * (i - 128) + YUV_HALF) >> YUV_FIX;
- VP8kUToG[i] = -22554 * (i - 128) + YUV_HALF;
- VP8kVToG[i] = -46802 * (i - 128);
- VP8kUToB[i] = (116130 * (i - 128) + YUV_HALF) >> YUV_FIX;
- }
- for (i = YUV_RANGE_MIN; i < YUV_RANGE_MAX; ++i) {
- const int k = i;
- VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255);
- VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15);
- }
-#endif
-
done = 1;
}
-#else
-
-void VP8YUVInit(void) {}
-
-#endif // WEBP_YUV_USE_TABLE
-
-//-----------------------------------------------------------------------------
-// SSE2 extras
-
-#if defined(WEBP_USE_SSE2)
-
-#ifdef FANCY_UPSAMPLING
-
-#include <emmintrin.h>
-#include <string.h> // for memcpy
-
-typedef union { // handy struct for converting SSE2 registers
- int32_t i32[4];
- uint8_t u8[16];
- __m128i m;
-} VP8kCstSSE2;
-
-static int done_sse2 = 0;
-static VP8kCstSSE2 VP8kUtoRGBA[256], VP8kVtoRGBA[256], VP8kYtoRGBA[256];
-
-void VP8YUVInitSSE2(void) {
- if (!done_sse2) {
- int i;
- for (i = 0; i < 256; ++i) {
- VP8kYtoRGBA[i].i32[0] =
- VP8kYtoRGBA[i].i32[1] =
- VP8kYtoRGBA[i].i32[2] = (i - 16) * kYScale + YUV_HALF2;
- VP8kYtoRGBA[i].i32[3] = 0xff << YUV_FIX2;
-
- VP8kUtoRGBA[i].i32[0] = 0;
- VP8kUtoRGBA[i].i32[1] = -kUToG * (i - 128);
- VP8kUtoRGBA[i].i32[2] = kUToB * (i - 128);
- VP8kUtoRGBA[i].i32[3] = 0;
-
- VP8kVtoRGBA[i].i32[0] = kVToR * (i - 128);
- VP8kVtoRGBA[i].i32[1] = -kVToG * (i - 128);
- VP8kVtoRGBA[i].i32[2] = 0;
- VP8kVtoRGBA[i].i32[3] = 0;
- }
- done_sse2 = 1;
- }
-}
-
-static WEBP_INLINE __m128i VP8GetRGBA32b(int y, int u, int v) {
- const __m128i u_part = _mm_loadu_si128(&VP8kUtoRGBA[u].m);
- const __m128i v_part = _mm_loadu_si128(&VP8kVtoRGBA[v].m);
- const __m128i y_part = _mm_loadu_si128(&VP8kYtoRGBA[y].m);
- const __m128i uv_part = _mm_add_epi32(u_part, v_part);
- const __m128i rgba1 = _mm_add_epi32(y_part, uv_part);
- const __m128i rgba2 = _mm_srai_epi32(rgba1, YUV_FIX2);
- return rgba2;
-}
-
-static WEBP_INLINE void VP8YuvToRgbSSE2(uint8_t y, uint8_t u, uint8_t v,
- uint8_t* const rgb) {
- const __m128i tmp0 = VP8GetRGBA32b(y, u, v);
- const __m128i tmp1 = _mm_packs_epi32(tmp0, tmp0);
- const __m128i tmp2 = _mm_packus_epi16(tmp1, tmp1);
- // Note: we store 8 bytes at a time, not 3 bytes! -> memory stomp
- _mm_storel_epi64((__m128i*)rgb, tmp2);
-}
-
-static WEBP_INLINE void VP8YuvToBgrSSE2(uint8_t y, uint8_t u, uint8_t v,
- uint8_t* const bgr) {
- const __m128i tmp0 = VP8GetRGBA32b(y, u, v);
- const __m128i tmp1 = _mm_shuffle_epi32(tmp0, _MM_SHUFFLE(3, 0, 1, 2));
- const __m128i tmp2 = _mm_packs_epi32(tmp1, tmp1);
- const __m128i tmp3 = _mm_packus_epi16(tmp2, tmp2);
- // Note: we store 8 bytes at a time, not 3 bytes! -> memory stomp
- _mm_storel_epi64((__m128i*)bgr, tmp3);
-}
-
-void VP8YuvToRgba32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst) {
- int n;
- for (n = 0; n < 32; n += 4) {
- const __m128i tmp0_1 = VP8GetRGBA32b(y[n + 0], u[n + 0], v[n + 0]);
- const __m128i tmp0_2 = VP8GetRGBA32b(y[n + 1], u[n + 1], v[n + 1]);
- const __m128i tmp0_3 = VP8GetRGBA32b(y[n + 2], u[n + 2], v[n + 2]);
- const __m128i tmp0_4 = VP8GetRGBA32b(y[n + 3], u[n + 3], v[n + 3]);
- const __m128i tmp1_1 = _mm_packs_epi32(tmp0_1, tmp0_2);
- const __m128i tmp1_2 = _mm_packs_epi32(tmp0_3, tmp0_4);
- const __m128i tmp2 = _mm_packus_epi16(tmp1_1, tmp1_2);
- _mm_storeu_si128((__m128i*)dst, tmp2);
- dst += 4 * 4;
- }
-}
-
-void VP8YuvToBgra32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst) {
- int n;
- for (n = 0; n < 32; n += 2) {
- const __m128i tmp0_1 = VP8GetRGBA32b(y[n + 0], u[n + 0], v[n + 0]);
- const __m128i tmp0_2 = VP8GetRGBA32b(y[n + 1], u[n + 1], v[n + 1]);
- const __m128i tmp1_1 = _mm_shuffle_epi32(tmp0_1, _MM_SHUFFLE(3, 0, 1, 2));
- const __m128i tmp1_2 = _mm_shuffle_epi32(tmp0_2, _MM_SHUFFLE(3, 0, 1, 2));
- const __m128i tmp2_1 = _mm_packs_epi32(tmp1_1, tmp1_2);
- const __m128i tmp3 = _mm_packus_epi16(tmp2_1, tmp2_1);
- _mm_storel_epi64((__m128i*)dst, tmp3);
- dst += 4 * 2;
- }
-}
-
-void VP8YuvToRgb32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst) {
- int n;
- uint8_t tmp0[2 * 3 + 5 + 15];
- uint8_t* const tmp = (uint8_t*)((uintptr_t)(tmp0 + 15) & ~15); // align
- for (n = 0; n < 30; ++n) { // we directly stomp the *dst memory
- VP8YuvToRgbSSE2(y[n], u[n], v[n], dst + n * 3);
- }
- // Last two pixels are special: we write in a tmp buffer before sending
- // to dst.
- VP8YuvToRgbSSE2(y[n + 0], u[n + 0], v[n + 0], tmp + 0);
- VP8YuvToRgbSSE2(y[n + 1], u[n + 1], v[n + 1], tmp + 3);
- memcpy(dst + n * 3, tmp, 2 * 3);
-}
-
-void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst) {
- int n;
- uint8_t tmp0[2 * 3 + 5 + 15];
- uint8_t* const tmp = (uint8_t*)((uintptr_t)(tmp0 + 15) & ~15); // align
- for (n = 0; n < 30; ++n) {
- VP8YuvToBgrSSE2(y[n], u[n], v[n], dst + n * 3);
- }
- VP8YuvToBgrSSE2(y[n + 0], u[n + 0], v[n + 0], tmp + 0);
- VP8YuvToBgrSSE2(y[n + 1], u[n + 1], v[n + 1], tmp + 3);
- memcpy(dst + n * 3, tmp, 2 * 3);
-}
-
-#else
-
-void VP8YUVInitSSE2(void) {}
-
-#endif // FANCY_UPSAMPLING
-
-#endif // WEBP_USE_SSE2
-
+#if defined(__cplusplus) || defined(c_plusplus)
+} // extern "C"
+#endif
diff --git a/drivers/webp/dsp/yuv.h b/drivers/webp/dsp/yuv.h
index dd778f9cbe..a569109c54 100644
--- a/drivers/webp/dsp/yuv.h
+++ b/drivers/webp/dsp/yuv.h
@@ -1,165 +1,36 @@
// Copyright 2010 Google Inc. All Rights Reserved.
//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the COPYING file in the root of the source
-// tree. An additional intellectual property rights grant can be found
-// in the file PATENTS. All contributing project authors may
-// be found in the AUTHORS file in the root of the source tree.
+// This code is licensed under the same terms as WebM:
+// Software License Agreement: http://www.webmproject.org/license/software/
+// Additional IP Rights Grant: http://www.webmproject.org/license/additional/
// -----------------------------------------------------------------------------
//
// inline YUV<->RGB conversion function
//
-// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
-// More information at: http://en.wikipedia.org/wiki/YCbCr
-// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
-// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
-// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
-// We use 16bit fixed point operations for RGB->YUV conversion (YUV_FIX).
-//
-// For the Y'CbCr to RGB conversion, the BT.601 specification reads:
-// R = 1.164 * (Y-16) + 1.596 * (V-128)
-// G = 1.164 * (Y-16) - 0.813 * (V-128) - 0.391 * (U-128)
-// B = 1.164 * (Y-16) + 2.018 * (U-128)
-// where Y is in the [16,235] range, and U/V in the [16,240] range.
-// In the table-lookup version (WEBP_YUV_USE_TABLE), the common factor
-// "1.164 * (Y-16)" can be handled as an offset in the VP8kClip[] table.
-// So in this case the formulae should read:
-// R = 1.164 * [Y + 1.371 * (V-128) ] - 18.624
-// G = 1.164 * [Y - 0.698 * (V-128) - 0.336 * (U-128)] - 18.624
-// B = 1.164 * [Y + 1.733 * (U-128)] - 18.624
-// once factorized.
-// For YUV->RGB conversion, only 14bit fixed precision is used (YUV_FIX2).
-// That's the maximum possible for a convenient ARM implementation.
-//
// Author: Skal (pascal.massimino@gmail.com)
#ifndef WEBP_DSP_YUV_H_
#define WEBP_DSP_YUV_H_
-#include "./dsp.h"
#include "../dec/decode_vp8.h"
-// Define the following to use the LUT-based code:
-// #define WEBP_YUV_USE_TABLE
-
-#if defined(WEBP_EXPERIMENTAL_FEATURES)
-// Do NOT activate this feature for real compression. This is only experimental!
-// This flag is for comparison purpose against JPEG's "YUVj" natural colorspace.
-// This colorspace is close to Rec.601's Y'CbCr model with the notable
-// difference of allowing larger range for luma/chroma.
-// See http://en.wikipedia.org/wiki/YCbCr#JPEG_conversion paragraph, and its
-// difference with http://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion
-// #define USE_YUVj
-#endif
-
//------------------------------------------------------------------------------
// YUV -> RGB conversion
-#ifdef __cplusplus
+#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
-enum {
- YUV_FIX = 16, // fixed-point precision for RGB->YUV
- YUV_HALF = 1 << (YUV_FIX - 1),
- YUV_MASK = (256 << YUV_FIX) - 1,
- YUV_RANGE_MIN = -227, // min value of r/g/b output
- YUV_RANGE_MAX = 256 + 226, // max value of r/g/b output
-
- YUV_FIX2 = 14, // fixed-point precision for YUV->RGB
- YUV_HALF2 = 1 << (YUV_FIX2 - 1),
- YUV_MASK2 = (256 << YUV_FIX2) - 1
+enum { YUV_FIX = 16, // fixed-point precision
+ YUV_RANGE_MIN = -227, // min value of r/g/b output
+ YUV_RANGE_MAX = 256 + 226 // max value of r/g/b output
};
-
-// These constants are 14b fixed-point version of ITU-R BT.601 constants.
-#define kYScale 19077 // 1.164 = 255 / 219
-#define kVToR 26149 // 1.596 = 255 / 112 * 0.701
-#define kUToG 6419 // 0.391 = 255 / 112 * 0.886 * 0.114 / 0.587
-#define kVToG 13320 // 0.813 = 255 / 112 * 0.701 * 0.299 / 0.587
-#define kUToB 33050 // 2.018 = 255 / 112 * 0.886
-#define kRCst (-kYScale * 16 - kVToR * 128 + YUV_HALF2)
-#define kGCst (-kYScale * 16 + kUToG * 128 + kVToG * 128 + YUV_HALF2)
-#define kBCst (-kYScale * 16 - kUToB * 128 + YUV_HALF2)
-
-//------------------------------------------------------------------------------
-
-#if !defined(WEBP_YUV_USE_TABLE)
-
-// slower on x86 by ~7-8%, but bit-exact with the SSE2 version
-
-static WEBP_INLINE int VP8Clip8(int v) {
- return ((v & ~YUV_MASK2) == 0) ? (v >> YUV_FIX2) : (v < 0) ? 0 : 255;
-}
-
-static WEBP_INLINE int VP8YUVToR(int y, int v) {
- return VP8Clip8(kYScale * y + kVToR * v + kRCst);
-}
-
-static WEBP_INLINE int VP8YUVToG(int y, int u, int v) {
- return VP8Clip8(kYScale * y - kUToG * u - kVToG * v + kGCst);
-}
-
-static WEBP_INLINE int VP8YUVToB(int y, int u) {
- return VP8Clip8(kYScale * y + kUToB * u + kBCst);
-}
-
-static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v,
- uint8_t* const rgb) {
- rgb[0] = VP8YUVToR(y, v);
- rgb[1] = VP8YUVToG(y, u, v);
- rgb[2] = VP8YUVToB(y, u);
-}
-
-static WEBP_INLINE void VP8YuvToBgr(int y, int u, int v,
- uint8_t* const bgr) {
- bgr[0] = VP8YUVToB(y, u);
- bgr[1] = VP8YUVToG(y, u, v);
- bgr[2] = VP8YUVToR(y, v);
-}
-
-static WEBP_INLINE void VP8YuvToRgb565(int y, int u, int v,
- uint8_t* const rgb) {
- const int r = VP8YUVToR(y, v); // 5 usable bits
- const int g = VP8YUVToG(y, u, v); // 6 usable bits
- const int b = VP8YUVToB(y, u); // 5 usable bits
- const int rg = (r & 0xf8) | (g >> 5);
- const int gb = ((g << 3) & 0xe0) | (b >> 3);
-#ifdef WEBP_SWAP_16BIT_CSP
- rgb[0] = gb;
- rgb[1] = rg;
-#else
- rgb[0] = rg;
- rgb[1] = gb;
-#endif
-}
-
-static WEBP_INLINE void VP8YuvToRgba4444(int y, int u, int v,
- uint8_t* const argb) {
- const int r = VP8YUVToR(y, v); // 4 usable bits
- const int g = VP8YUVToG(y, u, v); // 4 usable bits
- const int b = VP8YUVToB(y, u); // 4 usable bits
- const int rg = (r & 0xf0) | (g >> 4);
- const int ba = (b & 0xf0) | 0x0f; // overwrite the lower 4 bits
-#ifdef WEBP_SWAP_16BIT_CSP
- argb[0] = ba;
- argb[1] = rg;
-#else
- argb[0] = rg;
- argb[1] = ba;
-#endif
-}
-
-#else
-
-// Table-based version, not totally equivalent to the SSE2 version.
-// Rounding diff is only +/-1 though.
-
extern int16_t VP8kVToR[256], VP8kUToB[256];
extern int32_t VP8kVToG[256], VP8kUToG[256];
extern uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN];
extern uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN];
-static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v,
+static WEBP_INLINE void VP8YuvToRgb(uint8_t y, uint8_t u, uint8_t v,
uint8_t* const rgb) {
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
@@ -169,60 +40,42 @@ static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v,
rgb[2] = VP8kClip[y + b_off - YUV_RANGE_MIN];
}
-static WEBP_INLINE void VP8YuvToBgr(int y, int u, int v,
- uint8_t* const bgr) {
+static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const rgb) {
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
- bgr[0] = VP8kClip[y + b_off - YUV_RANGE_MIN];
- bgr[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
- bgr[2] = VP8kClip[y + r_off - YUV_RANGE_MIN];
+ rgb[0] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
+ (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
+ rgb[1] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
+ (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
}
-static WEBP_INLINE void VP8YuvToRgb565(int y, int u, int v,
- uint8_t* const rgb) {
- const int r_off = VP8kVToR[v];
- const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
- const int b_off = VP8kUToB[u];
- const int rg = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) |
- (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5));
- const int gb = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) |
- (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3));
-#ifdef WEBP_SWAP_16BIT_CSP
- rgb[0] = gb;
- rgb[1] = rg;
-#else
- rgb[0] = rg;
- rgb[1] = gb;
-#endif
+static WEBP_INLINE void VP8YuvToArgb(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const argb) {
+ argb[0] = 0xff;
+ VP8YuvToRgb(y, u, v, argb + 1);
}
-static WEBP_INLINE void VP8YuvToRgba4444(int y, int u, int v,
+static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v,
uint8_t* const argb) {
const int r_off = VP8kVToR[v];
const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
const int b_off = VP8kUToB[u];
- const int rg = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
- VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
- const int ba = (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4) | 0x0f;
-#ifdef WEBP_SWAP_16BIT_CSP
- argb[0] = ba;
- argb[1] = rg;
-#else
- argb[0] = rg;
- argb[1] = ba;
-#endif
+ // Don't update alpha (last 4 bits of argb[1])
+ argb[0] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) |
+ VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]);
+ argb[1] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4);
}
-#endif // WEBP_YUV_USE_TABLE
-
-//-----------------------------------------------------------------------------
-// Alpha handling variants
-
-static WEBP_INLINE void VP8YuvToArgb(uint8_t y, uint8_t u, uint8_t v,
- uint8_t* const argb) {
- argb[0] = 0xff;
- VP8YuvToRgb(y, u, v, argb + 1);
+static WEBP_INLINE void VP8YuvToBgr(uint8_t y, uint8_t u, uint8_t v,
+ uint8_t* const bgr) {
+ const int r_off = VP8kVToR[v];
+ const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX;
+ const int b_off = VP8kUToB[u];
+ bgr[0] = VP8kClip[y + b_off - YUV_RANGE_MIN];
+ bgr[1] = VP8kClip[y + g_off - YUV_RANGE_MIN];
+ bgr[2] = VP8kClip[y + r_off - YUV_RANGE_MIN];
}
static WEBP_INLINE void VP8YuvToBgra(uint8_t y, uint8_t u, uint8_t v,
@@ -240,77 +93,35 @@ static WEBP_INLINE void VP8YuvToRgba(uint8_t y, uint8_t u, uint8_t v,
// Must be called before everything, to initialize the tables.
void VP8YUVInit(void);
-//-----------------------------------------------------------------------------
-// SSE2 extra functions (mostly for upsampling_sse2.c)
-
-#if defined(WEBP_USE_SSE2)
-
-#if defined(FANCY_UPSAMPLING)
-// Process 32 pixels and store the result (24b or 32b per pixel) in *dst.
-void VP8YuvToRgba32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst);
-void VP8YuvToRgb32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst);
-void VP8YuvToBgra32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst);
-void VP8YuvToBgr32(const uint8_t* y, const uint8_t* u, const uint8_t* v,
- uint8_t* dst);
-#endif // FANCY_UPSAMPLING
-
-// Must be called to initialize tables before using the functions.
-void VP8YUVInitSSE2(void);
-
-#endif // WEBP_USE_SSE2
-
//------------------------------------------------------------------------------
// RGB -> YUV conversion
+// The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
+// More information at: http://en.wikipedia.org/wiki/YCbCr
+// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
+// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
+// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
+// We use 16bit fixed point operations.
-// Stub functions that can be called with various rounding values:
-static WEBP_INLINE int VP8ClipUV(int uv, int rounding) {
- uv = (uv + rounding + (128 << (YUV_FIX + 2))) >> (YUV_FIX + 2);
- return ((uv & ~0xff) == 0) ? uv : (uv < 0) ? 0 : 255;
+static WEBP_INLINE int VP8ClipUV(int v) {
+ v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2);
+ return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255;
}
-#ifndef USE_YUVj
-
-static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
+static WEBP_INLINE int VP8RGBToY(int r, int g, int b) {
+ const int kRound = (1 << (YUV_FIX - 1)) + (16 << YUV_FIX);
const int luma = 16839 * r + 33059 * g + 6420 * b;
- return (luma + rounding + (16 << YUV_FIX)) >> YUV_FIX; // no need to clip
+ return (luma + kRound) >> YUV_FIX; // no need to clip
}
-static WEBP_INLINE int VP8RGBToU(int r, int g, int b, int rounding) {
- const int u = -9719 * r - 19081 * g + 28800 * b;
- return VP8ClipUV(u, rounding);
+static WEBP_INLINE int VP8RGBToU(int r, int g, int b) {
+ return VP8ClipUV(-9719 * r - 19081 * g + 28800 * b);
}
-static WEBP_INLINE int VP8RGBToV(int r, int g, int b, int rounding) {
- const int v = +28800 * r - 24116 * g - 4684 * b;
- return VP8ClipUV(v, rounding);
-}
-
-#else
-
-// This JPEG-YUV colorspace, only for comparison!
-// These are also 16bit precision coefficients from Rec.601, but with full
-// [0..255] output range.
-static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) {
- const int luma = 19595 * r + 38470 * g + 7471 * b;
- return (luma + rounding) >> YUV_FIX; // no need to clip
+static WEBP_INLINE int VP8RGBToV(int r, int g, int b) {
+ return VP8ClipUV(+28800 * r - 24116 * g - 4684 * b);
}
-static WEBP_INLINE int VP8_RGB_TO_U(int r, int g, int b, int rounding) {
- const int u = -11058 * r - 21710 * g + 32768 * b;
- return VP8ClipUV(u, rounding);
-}
-
-static WEBP_INLINE int VP8_RGB_TO_V(int r, int g, int b, int rounding) {
- const int v = 32768 * r - 27439 * g - 5329 * b;
- return VP8ClipUV(v, rounding);
-}
-
-#endif // USE_YUVj
-
-#ifdef __cplusplus
+#if defined(__cplusplus) || defined(c_plusplus)
} // extern "C"
#endif