diff options
Diffstat (limited to 'drivers/webpold/dsp')
-rw-r--r-- | drivers/webpold/dsp/cpu.c | 85 | ||||
-rw-r--r-- | drivers/webpold/dsp/dec.c | 732 | ||||
-rw-r--r-- | drivers/webpold/dsp/dec_neon.c | 329 | ||||
-rw-r--r-- | drivers/webpold/dsp/dec_sse2.c | 903 | ||||
-rw-r--r-- | drivers/webpold/dsp/dsp.h | 210 | ||||
-rw-r--r-- | drivers/webpold/dsp/enc.c | 743 | ||||
-rw-r--r-- | drivers/webpold/dsp/enc_sse2.c | 837 | ||||
-rw-r--r-- | drivers/webpold/dsp/lossless.c | 1138 | ||||
-rw-r--r-- | drivers/webpold/dsp/lossless.h | 82 | ||||
-rw-r--r-- | drivers/webpold/dsp/upsampling.c | 357 | ||||
-rw-r--r-- | drivers/webpold/dsp/upsampling_sse2.c | 209 | ||||
-rw-r--r-- | drivers/webpold/dsp/yuv.c | 52 | ||||
-rw-r--r-- | drivers/webpold/dsp/yuv.h | 128 |
13 files changed, 5805 insertions, 0 deletions
diff --git a/drivers/webpold/dsp/cpu.c b/drivers/webpold/dsp/cpu.c new file mode 100644 index 0000000000..0228734457 --- /dev/null +++ b/drivers/webpold/dsp/cpu.c @@ -0,0 +1,85 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// CPU detection +// +// Author: Christian Duvivier (cduvivier@google.com) + +#include "./dsp.h" + +#if defined(__ANDROID__) +#include <cpu-features.h> +#endif + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// SSE2 detection. +// + +// apple/darwin gcc-4.0.1 defines __PIC__, but not __pic__ with -fPIC. +#if (defined(__pic__) || defined(__PIC__)) && defined(__i386__) +static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) { + __asm__ volatile ( + "mov %%ebx, %%edi\n" + "cpuid\n" + "xchg %%edi, %%ebx\n" + : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type)); +} +#elif defined(__i386__) || defined(__x86_64__) +static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) { + __asm__ volatile ( + "cpuid\n" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type)); +} +#elif defined(WEBP_MSC_SSE2) +#define GetCPUInfo __cpuid +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(WEBP_MSC_SSE2) +static int x86CPUInfo(CPUFeature feature) { + int cpu_info[4]; + GetCPUInfo(cpu_info, 1); + if (feature == kSSE2) { + return 0 != (cpu_info[3] & 0x04000000); + } + if (feature == kSSE3) { + return 0 != (cpu_info[2] & 0x00000001); + } + return 0; +} +VP8CPUInfo VP8GetCPUInfo = x86CPUInfo; +#elif defined(WEBP_ANDROID_NEON) +static int AndroidCPUInfo(CPUFeature feature) { + const AndroidCpuFamily cpu_family = android_getCpuFamily(); + const uint64_t cpu_features = android_getCpuFeatures(); + if (feature == kNEON) { + return (cpu_family == ANDROID_CPU_FAMILY_ARM && + 0 != (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON)); + } + return 0; +} +VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo; +#elif defined(__ARM_NEON__) +// define a dummy function to enable turning off NEON at runtime by setting +// VP8DecGetCPUInfo = NULL +static int armCPUInfo(CPUFeature feature) { + (void)feature; + return 1; +} +VP8CPUInfo VP8GetCPUInfo = armCPUInfo; +#else +VP8CPUInfo VP8GetCPUInfo = NULL; +#endif + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif diff --git a/drivers/webpold/dsp/dec.c b/drivers/webpold/dsp/dec.c new file mode 100644 index 0000000000..9ae7b6fa76 --- /dev/null +++ b/drivers/webpold/dsp/dec.c @@ -0,0 +1,732 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// Speed-critical decoding functions. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "./dsp.h" +#include "../dec/vp8i.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// run-time tables (~4k) + +static uint8_t abs0[255 + 255 + 1]; // abs(i) +static uint8_t abs1[255 + 255 + 1]; // abs(i)>>1 +static int8_t sclip1[1020 + 1020 + 1]; // clips [-1020, 1020] to [-128, 127] +static int8_t sclip2[112 + 112 + 1]; // clips [-112, 112] to [-16, 15] +static uint8_t clip1[255 + 510 + 1]; // clips [-255,510] to [0,255] + +// We declare this variable 'volatile' to prevent instruction reordering +// and make sure it's set to true _last_ (so as to be thread-safe) +static volatile int tables_ok = 0; + +static void DspInitTables(void) { + if (!tables_ok) { + int i; + for (i = -255; i <= 255; ++i) { + abs0[255 + i] = (i < 0) ? -i : i; + abs1[255 + i] = abs0[255 + i] >> 1; + } + for (i = -1020; i <= 1020; ++i) { + sclip1[1020 + i] = (i < -128) ? -128 : (i > 127) ? 127 : i; + } + for (i = -112; i <= 112; ++i) { + sclip2[112 + i] = (i < -16) ? -16 : (i > 15) ? 15 : i; + } + for (i = -255; i <= 255 + 255; ++i) { + clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i; + } + tables_ok = 1; + } +} + +static WEBP_INLINE uint8_t clip_8b(int v) { + return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255; +} + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +#define STORE(x, y, v) \ + dst[x + y * BPS] = clip_8b(dst[x + y * BPS] + ((v) >> 3)) + +static const int kC1 = 20091 + (1 << 16); +static const int kC2 = 35468; +#define MUL(a, b) (((a) * (b)) >> 16) + +static void TransformOne(const int16_t* in, uint8_t* dst) { + int C[4 * 4], *tmp; + int i; + tmp = C; + for (i = 0; i < 4; ++i) { // vertical pass + const int a = in[0] + in[8]; // [-4096, 4094] + const int b = in[0] - in[8]; // [-4095, 4095] + const int c = MUL(in[4], kC2) - MUL(in[12], kC1); // [-3783, 3783] + const int d = MUL(in[4], kC1) + MUL(in[12], kC2); // [-3785, 3781] + tmp[0] = a + d; // [-7881, 7875] + tmp[1] = b + c; // [-7878, 7878] + tmp[2] = b - c; // [-7878, 7878] + tmp[3] = a - d; // [-7877, 7879] + tmp += 4; + in++; + } + // Each pass is expanding the dynamic range by ~3.85 (upper bound). + // The exact value is (2. + (kC1 + kC2) / 65536). + // After the second pass, maximum interval is [-3794, 3794], assuming + // an input in [-2048, 2047] interval. We then need to add a dst value + // in the [0, 255] range. + // In the worst case scenario, the input to clip_8b() can be as large as + // [-60713, 60968]. + tmp = C; + for (i = 0; i < 4; ++i) { // horizontal pass + const int dc = tmp[0] + 4; + const int a = dc + tmp[8]; + const int b = dc - tmp[8]; + const int c = MUL(tmp[4], kC2) - MUL(tmp[12], kC1); + const int d = MUL(tmp[4], kC1) + MUL(tmp[12], kC2); + STORE(0, 0, a + d); + STORE(1, 0, b + c); + STORE(2, 0, b - c); + STORE(3, 0, a - d); + tmp++; + dst += BPS; + } +} +#undef MUL + +static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) { + TransformOne(in, dst); + if (do_two) { + TransformOne(in + 16, dst + 4); + } +} + +static void TransformUV(const int16_t* in, uint8_t* dst) { + VP8Transform(in + 0 * 16, dst, 1); + VP8Transform(in + 2 * 16, dst + 4 * BPS, 1); +} + +static void TransformDC(const int16_t *in, uint8_t* dst) { + const int DC = in[0] + 4; + int i, j; + for (j = 0; j < 4; ++j) { + for (i = 0; i < 4; ++i) { + STORE(i, j, DC); + } + } +} + +static void TransformDCUV(const int16_t* in, uint8_t* dst) { + if (in[0 * 16]) TransformDC(in + 0 * 16, dst); + if (in[1 * 16]) TransformDC(in + 1 * 16, dst + 4); + if (in[2 * 16]) TransformDC(in + 2 * 16, dst + 4 * BPS); + if (in[3 * 16]) TransformDC(in + 3 * 16, dst + 4 * BPS + 4); +} + +#undef STORE + +//------------------------------------------------------------------------------ +// Paragraph 14.3 + +static void TransformWHT(const int16_t* in, int16_t* out) { + int tmp[16]; + int i; + for (i = 0; i < 4; ++i) { + const int a0 = in[0 + i] + in[12 + i]; + const int a1 = in[4 + i] + in[ 8 + i]; + const int a2 = in[4 + i] - in[ 8 + i]; + const int a3 = in[0 + i] - in[12 + i]; + tmp[0 + i] = a0 + a1; + tmp[8 + i] = a0 - a1; + tmp[4 + i] = a3 + a2; + tmp[12 + i] = a3 - a2; + } + for (i = 0; i < 4; ++i) { + const int dc = tmp[0 + i * 4] + 3; // w/ rounder + const int a0 = dc + tmp[3 + i * 4]; + const int a1 = tmp[1 + i * 4] + tmp[2 + i * 4]; + const int a2 = tmp[1 + i * 4] - tmp[2 + i * 4]; + const int a3 = dc - tmp[3 + i * 4]; + out[ 0] = (a0 + a1) >> 3; + out[16] = (a3 + a2) >> 3; + out[32] = (a0 - a1) >> 3; + out[48] = (a3 - a2) >> 3; + out += 64; + } +} + +void (*VP8TransformWHT)(const int16_t* in, int16_t* out) = TransformWHT; + +//------------------------------------------------------------------------------ +// Intra predictions + +#define DST(x, y) dst[(x) + (y) * BPS] + +static WEBP_INLINE void TrueMotion(uint8_t *dst, int size) { + const uint8_t* top = dst - BPS; + const uint8_t* const clip0 = clip1 + 255 - top[-1]; + int y; + for (y = 0; y < size; ++y) { + const uint8_t* const clip = clip0 + dst[-1]; + int x; + for (x = 0; x < size; ++x) { + dst[x] = clip[top[x]]; + } + dst += BPS; + } +} +static void TM4(uint8_t *dst) { TrueMotion(dst, 4); } +static void TM8uv(uint8_t *dst) { TrueMotion(dst, 8); } +static void TM16(uint8_t *dst) { TrueMotion(dst, 16); } + +//------------------------------------------------------------------------------ +// 16x16 + +static void VE16(uint8_t *dst) { // vertical + int j; + for (j = 0; j < 16; ++j) { + memcpy(dst + j * BPS, dst - BPS, 16); + } +} + +static void HE16(uint8_t *dst) { // horizontal + int j; + for (j = 16; j > 0; --j) { + memset(dst, dst[-1], 16); + dst += BPS; + } +} + +static WEBP_INLINE void Put16(int v, uint8_t* dst) { + int j; + for (j = 0; j < 16; ++j) { + memset(dst + j * BPS, v, 16); + } +} + +static void DC16(uint8_t *dst) { // DC + int DC = 16; + int j; + for (j = 0; j < 16; ++j) { + DC += dst[-1 + j * BPS] + dst[j - BPS]; + } + Put16(DC >> 5, dst); +} + +static void DC16NoTop(uint8_t *dst) { // DC with top samples not available + int DC = 8; + int j; + for (j = 0; j < 16; ++j) { + DC += dst[-1 + j * BPS]; + } + Put16(DC >> 4, dst); +} + +static void DC16NoLeft(uint8_t *dst) { // DC with left samples not available + int DC = 8; + int i; + for (i = 0; i < 16; ++i) { + DC += dst[i - BPS]; + } + Put16(DC >> 4, dst); +} + +static void DC16NoTopLeft(uint8_t *dst) { // DC with no top and left samples + Put16(0x80, dst); +} + +//------------------------------------------------------------------------------ +// 4x4 + +#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) +#define AVG2(a, b) (((a) + (b) + 1) >> 1) + +static void VE4(uint8_t *dst) { // vertical + const uint8_t* top = dst - BPS; + const uint8_t vals[4] = { + AVG3(top[-1], top[0], top[1]), + AVG3(top[ 0], top[1], top[2]), + AVG3(top[ 1], top[2], top[3]), + AVG3(top[ 2], top[3], top[4]) + }; + int i; + for (i = 0; i < 4; ++i) { + memcpy(dst + i * BPS, vals, sizeof(vals)); + } +} + +static void HE4(uint8_t *dst) { // horizontal + const int A = dst[-1 - BPS]; + const int B = dst[-1]; + const int C = dst[-1 + BPS]; + const int D = dst[-1 + 2 * BPS]; + const int E = dst[-1 + 3 * BPS]; + *(uint32_t*)(dst + 0 * BPS) = 0x01010101U * AVG3(A, B, C); + *(uint32_t*)(dst + 1 * BPS) = 0x01010101U * AVG3(B, C, D); + *(uint32_t*)(dst + 2 * BPS) = 0x01010101U * AVG3(C, D, E); + *(uint32_t*)(dst + 3 * BPS) = 0x01010101U * AVG3(D, E, E); +} + +static void DC4(uint8_t *dst) { // DC + uint32_t dc = 4; + int i; + for (i = 0; i < 4; ++i) dc += dst[i - BPS] + dst[-1 + i * BPS]; + dc >>= 3; + for (i = 0; i < 4; ++i) memset(dst + i * BPS, dc, 4); +} + +static void RD4(uint8_t *dst) { // Down-right + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int L = dst[-1 + 3 * BPS]; + const int X = dst[-1 - BPS]; + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + DST(0, 3) = AVG3(J, K, L); + DST(0, 2) = DST(1, 3) = AVG3(I, J, K); + DST(0, 1) = DST(1, 2) = DST(2, 3) = AVG3(X, I, J); + DST(0, 0) = DST(1, 1) = DST(2, 2) = DST(3, 3) = AVG3(A, X, I); + DST(1, 0) = DST(2, 1) = DST(3, 2) = AVG3(B, A, X); + DST(2, 0) = DST(3, 1) = AVG3(C, B, A); + DST(3, 0) = AVG3(D, C, B); +} + +static void LD4(uint8_t *dst) { // Down-Left + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + const int E = dst[4 - BPS]; + const int F = dst[5 - BPS]; + const int G = dst[6 - BPS]; + const int H = dst[7 - BPS]; + DST(0, 0) = AVG3(A, B, C); + DST(1, 0) = DST(0, 1) = AVG3(B, C, D); + DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); + DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F); + DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); + DST(3, 2) = DST(2, 3) = AVG3(F, G, H); + DST(3, 3) = AVG3(G, H, H); +} + +static void VR4(uint8_t *dst) { // Vertical-Right + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int X = dst[-1 - BPS]; + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + DST(0, 0) = DST(1, 2) = AVG2(X, A); + DST(1, 0) = DST(2, 2) = AVG2(A, B); + DST(2, 0) = DST(3, 2) = AVG2(B, C); + DST(3, 0) = AVG2(C, D); + + DST(0, 3) = AVG3(K, J, I); + DST(0, 2) = AVG3(J, I, X); + DST(0, 1) = DST(1, 3) = AVG3(I, X, A); + DST(1, 1) = DST(2, 3) = AVG3(X, A, B); + DST(2, 1) = DST(3, 3) = AVG3(A, B, C); + DST(3, 1) = AVG3(B, C, D); +} + +static void VL4(uint8_t *dst) { // Vertical-Left + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + const int E = dst[4 - BPS]; + const int F = dst[5 - BPS]; + const int G = dst[6 - BPS]; + const int H = dst[7 - BPS]; + DST(0, 0) = AVG2(A, B); + DST(1, 0) = DST(0, 2) = AVG2(B, C); + DST(2, 0) = DST(1, 2) = AVG2(C, D); + DST(3, 0) = DST(2, 2) = AVG2(D, E); + + DST(0, 1) = AVG3(A, B, C); + DST(1, 1) = DST(0, 3) = AVG3(B, C, D); + DST(2, 1) = DST(1, 3) = AVG3(C, D, E); + DST(3, 1) = DST(2, 3) = AVG3(D, E, F); + DST(3, 2) = AVG3(E, F, G); + DST(3, 3) = AVG3(F, G, H); +} + +static void HU4(uint8_t *dst) { // Horizontal-Up + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int L = dst[-1 + 3 * BPS]; + DST(0, 0) = AVG2(I, J); + DST(2, 0) = DST(0, 1) = AVG2(J, K); + DST(2, 1) = DST(0, 2) = AVG2(K, L); + DST(1, 0) = AVG3(I, J, K); + DST(3, 0) = DST(1, 1) = AVG3(J, K, L); + DST(3, 1) = DST(1, 2) = AVG3(K, L, L); + DST(3, 2) = DST(2, 2) = + DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; +} + +static void HD4(uint8_t *dst) { // Horizontal-Down + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int L = dst[-1 + 3 * BPS]; + const int X = dst[-1 - BPS]; + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + + DST(0, 0) = DST(2, 1) = AVG2(I, X); + DST(0, 1) = DST(2, 2) = AVG2(J, I); + DST(0, 2) = DST(2, 3) = AVG2(K, J); + DST(0, 3) = AVG2(L, K); + + DST(3, 0) = AVG3(A, B, C); + DST(2, 0) = AVG3(X, A, B); + DST(1, 0) = DST(3, 1) = AVG3(I, X, A); + DST(1, 1) = DST(3, 2) = AVG3(J, I, X); + DST(1, 2) = DST(3, 3) = AVG3(K, J, I); + DST(1, 3) = AVG3(L, K, J); +} + +#undef DST +#undef AVG3 +#undef AVG2 + +//------------------------------------------------------------------------------ +// Chroma + +static void VE8uv(uint8_t *dst) { // vertical + int j; + for (j = 0; j < 8; ++j) { + memcpy(dst + j * BPS, dst - BPS, 8); + } +} + +static void HE8uv(uint8_t *dst) { // horizontal + int j; + for (j = 0; j < 8; ++j) { + memset(dst, dst[-1], 8); + dst += BPS; + } +} + +// helper for chroma-DC predictions +static WEBP_INLINE void Put8x8uv(uint64_t v, uint8_t* dst) { + int j; + for (j = 0; j < 8; ++j) { + *(uint64_t*)(dst + j * BPS) = v; + } +} + +static void DC8uv(uint8_t *dst) { // DC + int dc0 = 8; + int i; + for (i = 0; i < 8; ++i) { + dc0 += dst[i - BPS] + dst[-1 + i * BPS]; + } + Put8x8uv((uint64_t)((dc0 >> 4) * 0x0101010101010101ULL), dst); +} + +static void DC8uvNoLeft(uint8_t *dst) { // DC with no left samples + int dc0 = 4; + int i; + for (i = 0; i < 8; ++i) { + dc0 += dst[i - BPS]; + } + Put8x8uv((uint64_t)((dc0 >> 3) * 0x0101010101010101ULL), dst); +} + +static void DC8uvNoTop(uint8_t *dst) { // DC with no top samples + int dc0 = 4; + int i; + for (i = 0; i < 8; ++i) { + dc0 += dst[-1 + i * BPS]; + } + Put8x8uv((uint64_t)((dc0 >> 3) * 0x0101010101010101ULL), dst); +} + +static void DC8uvNoTopLeft(uint8_t *dst) { // DC with nothing + Put8x8uv(0x8080808080808080ULL, dst); +} + +//------------------------------------------------------------------------------ +// default C implementations + +const VP8PredFunc VP8PredLuma4[NUM_BMODES] = { + DC4, TM4, VE4, HE4, RD4, VR4, LD4, VL4, HD4, HU4 +}; + +const VP8PredFunc VP8PredLuma16[NUM_B_DC_MODES] = { + DC16, TM16, VE16, HE16, + DC16NoTop, DC16NoLeft, DC16NoTopLeft +}; + +const VP8PredFunc VP8PredChroma8[NUM_B_DC_MODES] = { + DC8uv, TM8uv, VE8uv, HE8uv, + DC8uvNoTop, DC8uvNoLeft, DC8uvNoTopLeft +}; + +//------------------------------------------------------------------------------ +// Edge filtering functions + +// 4 pixels in, 2 pixels out +static WEBP_INLINE void do_filter2(uint8_t* p, int step) { + const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step]; + const int a = 3 * (q0 - p0) + sclip1[1020 + p1 - q1]; + const int a1 = sclip2[112 + ((a + 4) >> 3)]; + const int a2 = sclip2[112 + ((a + 3) >> 3)]; + p[-step] = clip1[255 + p0 + a2]; + p[ 0] = clip1[255 + q0 - a1]; +} + +// 4 pixels in, 4 pixels out +static WEBP_INLINE void do_filter4(uint8_t* p, int step) { + const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step]; + const int a = 3 * (q0 - p0); + const int a1 = sclip2[112 + ((a + 4) >> 3)]; + const int a2 = sclip2[112 + ((a + 3) >> 3)]; + const int a3 = (a1 + 1) >> 1; + p[-2*step] = clip1[255 + p1 + a3]; + p[- step] = clip1[255 + p0 + a2]; + p[ 0] = clip1[255 + q0 - a1]; + p[ step] = clip1[255 + q1 - a3]; +} + +// 6 pixels in, 6 pixels out +static WEBP_INLINE void do_filter6(uint8_t* p, int step) { + const int p2 = p[-3*step], p1 = p[-2*step], p0 = p[-step]; + const int q0 = p[0], q1 = p[step], q2 = p[2*step]; + const int a = sclip1[1020 + 3 * (q0 - p0) + sclip1[1020 + p1 - q1]]; + const int a1 = (27 * a + 63) >> 7; // eq. to ((3 * a + 7) * 9) >> 7 + const int a2 = (18 * a + 63) >> 7; // eq. to ((2 * a + 7) * 9) >> 7 + const int a3 = (9 * a + 63) >> 7; // eq. to ((1 * a + 7) * 9) >> 7 + p[-3*step] = clip1[255 + p2 + a3]; + p[-2*step] = clip1[255 + p1 + a2]; + p[- step] = clip1[255 + p0 + a1]; + p[ 0] = clip1[255 + q0 - a1]; + p[ step] = clip1[255 + q1 - a2]; + p[ 2*step] = clip1[255 + q2 - a3]; +} + +static WEBP_INLINE int hev(const uint8_t* p, int step, int thresh) { + const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step]; + return (abs0[255 + p1 - p0] > thresh) || (abs0[255 + q1 - q0] > thresh); +} + +static WEBP_INLINE int needs_filter(const uint8_t* p, int step, int thresh) { + const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step]; + return (2 * abs0[255 + p0 - q0] + abs1[255 + p1 - q1]) <= thresh; +} + +static WEBP_INLINE int needs_filter2(const uint8_t* p, + int step, int t, int it) { + const int p3 = p[-4*step], p2 = p[-3*step], p1 = p[-2*step], p0 = p[-step]; + const int q0 = p[0], q1 = p[step], q2 = p[2*step], q3 = p[3*step]; + if ((2 * abs0[255 + p0 - q0] + abs1[255 + p1 - q1]) > t) + return 0; + return abs0[255 + p3 - p2] <= it && abs0[255 + p2 - p1] <= it && + abs0[255 + p1 - p0] <= it && abs0[255 + q3 - q2] <= it && + abs0[255 + q2 - q1] <= it && abs0[255 + q1 - q0] <= it; +} + +//------------------------------------------------------------------------------ +// Simple In-loop filtering (Paragraph 15.2) + +static void SimpleVFilter16(uint8_t* p, int stride, int thresh) { + int i; + for (i = 0; i < 16; ++i) { + if (needs_filter(p + i, stride, thresh)) { + do_filter2(p + i, stride); + } + } +} + +static void SimpleHFilter16(uint8_t* p, int stride, int thresh) { + int i; + for (i = 0; i < 16; ++i) { + if (needs_filter(p + i * stride, 1, thresh)) { + do_filter2(p + i * stride, 1); + } + } +} + +static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + SimpleVFilter16(p, stride, thresh); + } +} + +static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + SimpleHFilter16(p, stride, thresh); + } +} + +//------------------------------------------------------------------------------ +// Complex In-loop filtering (Paragraph 15.3) + +static WEBP_INLINE void FilterLoop26(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, int hev_thresh) { + while (size-- > 0) { + if (needs_filter2(p, hstride, thresh, ithresh)) { + if (hev(p, hstride, hev_thresh)) { + do_filter2(p, hstride); + } else { + do_filter6(p, hstride); + } + } + p += vstride; + } +} + +static WEBP_INLINE void FilterLoop24(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, int hev_thresh) { + while (size-- > 0) { + if (needs_filter2(p, hstride, thresh, ithresh)) { + if (hev(p, hstride, hev_thresh)) { + do_filter2(p, hstride); + } else { + do_filter4(p, hstride); + } + } + p += vstride; + } +} + +// on macroblock edges +static void VFilter16(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(p, stride, 1, 16, thresh, ithresh, hev_thresh); +} + +static void HFilter16(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(p, 1, stride, 16, thresh, ithresh, hev_thresh); +} + +// on three inner edges +static void VFilter16i(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + FilterLoop24(p, stride, 1, 16, thresh, ithresh, hev_thresh); + } +} + +static void HFilter16i(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + FilterLoop24(p, 1, stride, 16, thresh, ithresh, hev_thresh); + } +} + +// 8-pixels wide variant, for chroma filtering +static void VFilter8(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh); +} + +static void HFilter8(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh); +} + +static void VFilter8i(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); +} + +static void HFilter8i(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh); +} + +//------------------------------------------------------------------------------ + +VP8DecIdct2 VP8Transform; +VP8DecIdct VP8TransformUV; +VP8DecIdct VP8TransformDC; +VP8DecIdct VP8TransformDCUV; + +VP8LumaFilterFunc VP8VFilter16; +VP8LumaFilterFunc VP8HFilter16; +VP8ChromaFilterFunc VP8VFilter8; +VP8ChromaFilterFunc VP8HFilter8; +VP8LumaFilterFunc VP8VFilter16i; +VP8LumaFilterFunc VP8HFilter16i; +VP8ChromaFilterFunc VP8VFilter8i; +VP8ChromaFilterFunc VP8HFilter8i; +VP8SimpleFilterFunc VP8SimpleVFilter16; +VP8SimpleFilterFunc VP8SimpleHFilter16; +VP8SimpleFilterFunc VP8SimpleVFilter16i; +VP8SimpleFilterFunc VP8SimpleHFilter16i; + +extern void VP8DspInitSSE2(void); +extern void VP8DspInitNEON(void); + +void VP8DspInit(void) { + DspInitTables(); + + VP8Transform = TransformTwo; + VP8TransformUV = TransformUV; + VP8TransformDC = TransformDC; + VP8TransformDCUV = TransformDCUV; + + VP8VFilter16 = VFilter16; + VP8HFilter16 = HFilter16; + VP8VFilter8 = VFilter8; + VP8HFilter8 = HFilter8; + VP8VFilter16i = VFilter16i; + VP8HFilter16i = HFilter16i; + VP8VFilter8i = VFilter8i; + VP8HFilter8i = HFilter8i; + VP8SimpleVFilter16 = SimpleVFilter16; + VP8SimpleHFilter16 = SimpleHFilter16; + VP8SimpleVFilter16i = SimpleVFilter16i; + VP8SimpleHFilter16i = SimpleHFilter16i; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8DspInitSSE2(); + } +#elif defined(WEBP_USE_NEON) + if (VP8GetCPUInfo(kNEON)) { + VP8DspInitNEON(); + } +#endif + } +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif diff --git a/drivers/webpold/dsp/dec_neon.c b/drivers/webpold/dsp/dec_neon.c new file mode 100644 index 0000000000..ec824b790b --- /dev/null +++ b/drivers/webpold/dsp/dec_neon.c @@ -0,0 +1,329 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// ARM NEON version of dsp functions and loop filtering. +// +// Authors: Somnath Banerjee (somnath@google.com) +// Johann Koenig (johannkoenig@google.com) + +#include "./dsp.h" + +#if defined(WEBP_USE_NEON) + +#include "../dec/vp8i.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +#define QRegs "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", \ + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + +#define FLIP_SIGN_BIT2(a, b, s) \ + "veor " #a "," #a "," #s " \n" \ + "veor " #b "," #b "," #s " \n" \ + +#define FLIP_SIGN_BIT4(a, b, c, d, s) \ + FLIP_SIGN_BIT2(a, b, s) \ + FLIP_SIGN_BIT2(c, d, s) \ + +#define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \ + "vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \ + "vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \ + "vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \ + "vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \ + "vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \ + "vdup.8 q14, " #thresh " \n" \ + "vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */ + +#define GET_BASE_DELTA(p1, p0, q0, q1, o) \ + "vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \ + "vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \ + "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \ + "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \ + "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */ + +#define DO_SIMPLE_FILTER(p0, q0, fl) \ + "vmov.i8 q15, #0x03 \n" \ + "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \ + "vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \ + "vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \ + \ + "vmov.i8 q15, #0x04 \n" \ + "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \ + "vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \ + "vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */ + +// Applies filter on 2 pixels (p0 and q0) +#define DO_FILTER2(p1, p0, q0, q1, thresh) \ + NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \ + "vmov.i8 q10, #0x80 \n" /* sign bit */ \ + FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \ + GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \ + "vand q9, q9, q11 \n" /* apply filter mask */ \ + DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \ + FLIP_SIGN_BIT2(p0, q0, q10) + +// Load/Store vertical edge +#define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \ + "vld4.8 {" #c1"[0], " #c2"[0], " #c3"[0], " #c4"[0]}," #b1 "," #stride"\n" \ + "vld4.8 {" #c1"[1], " #c2"[1], " #c3"[1], " #c4"[1]}," #b2 "," #stride"\n" \ + "vld4.8 {" #c1"[2], " #c2"[2], " #c3"[2], " #c4"[2]}," #b1 "," #stride"\n" \ + "vld4.8 {" #c1"[3], " #c2"[3], " #c3"[3], " #c4"[3]}," #b2 "," #stride"\n" \ + "vld4.8 {" #c1"[4], " #c2"[4], " #c3"[4], " #c4"[4]}," #b1 "," #stride"\n" \ + "vld4.8 {" #c1"[5], " #c2"[5], " #c3"[5], " #c4"[5]}," #b2 "," #stride"\n" \ + "vld4.8 {" #c1"[6], " #c2"[6], " #c3"[6], " #c4"[6]}," #b1 "," #stride"\n" \ + "vld4.8 {" #c1"[7], " #c2"[7], " #c3"[7], " #c4"[7]}," #b2 "," #stride"\n" + +#define STORE8x2(c1, c2, p,stride) \ + "vst2.8 {" #c1"[0], " #c2"[0]}," #p "," #stride " \n" \ + "vst2.8 {" #c1"[1], " #c2"[1]}," #p "," #stride " \n" \ + "vst2.8 {" #c1"[2], " #c2"[2]}," #p "," #stride " \n" \ + "vst2.8 {" #c1"[3], " #c2"[3]}," #p "," #stride " \n" \ + "vst2.8 {" #c1"[4], " #c2"[4]}," #p "," #stride " \n" \ + "vst2.8 {" #c1"[5], " #c2"[5]}," #p "," #stride " \n" \ + "vst2.8 {" #c1"[6], " #c2"[6]}," #p "," #stride " \n" \ + "vst2.8 {" #c1"[7], " #c2"[7]}," #p "," #stride " \n" + +//----------------------------------------------------------------------------- +// Simple In-loop filtering (Paragraph 15.2) + +static void SimpleVFilter16NEON(uint8_t* p, int stride, int thresh) { + __asm__ volatile ( + "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride + + "vld1.u8 {q1}, [%[p]], %[stride] \n" // p1 + "vld1.u8 {q2}, [%[p]], %[stride] \n" // p0 + "vld1.u8 {q3}, [%[p]], %[stride] \n" // q0 + "vld1.u8 {q4}, [%[p]] \n" // q1 + + DO_FILTER2(q1, q2, q3, q4, %[thresh]) + + "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride + + "vst1.u8 {q2}, [%[p]], %[stride] \n" // store op0 + "vst1.u8 {q3}, [%[p]] \n" // store oq0 + : [p] "+r"(p) + : [stride] "r"(stride), [thresh] "r"(thresh) + : "memory", QRegs + ); +} + +static void SimpleHFilter16NEON(uint8_t* p, int stride, int thresh) { + __asm__ volatile ( + "sub r4, %[p], #2 \n" // base1 = p - 2 + "lsl r6, %[stride], #1 \n" // r6 = 2 * stride + "add r5, r4, %[stride] \n" // base2 = base1 + stride + + LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6) + LOAD8x4(d6, d7, d8, d9, [r4], [r5], r6) + "vswp d3, d6 \n" // p1:q1 p0:q3 + "vswp d5, d8 \n" // q0:q2 q1:q4 + "vswp q2, q3 \n" // p1:q1 p0:q2 q0:q3 q1:q4 + + DO_FILTER2(q1, q2, q3, q4, %[thresh]) + + "sub %[p], %[p], #1 \n" // p - 1 + + "vswp d5, d6 \n" + STORE8x2(d4, d5, [%[p]], %[stride]) + STORE8x2(d6, d7, [%[p]], %[stride]) + + : [p] "+r"(p) + : [stride] "r"(stride), [thresh] "r"(thresh) + : "memory", "r4", "r5", "r6", QRegs + ); +} + +static void SimpleVFilter16iNEON(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + SimpleVFilter16NEON(p, stride, thresh); + } +} + +static void SimpleHFilter16iNEON(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + SimpleHFilter16NEON(p, stride, thresh); + } +} + +static void TransformOneNEON(const int16_t *in, uint8_t *dst) { + const int kBPS = BPS; + const int16_t constants[] = {20091, 17734, 0, 0}; + /* kC1, kC2. Padded because vld1.16 loads 8 bytes + * Technically these are unsigned but vqdmulh is only available in signed. + * vqdmulh returns high half (effectively >> 16) but also doubles the value, + * changing the >> 16 to >> 15 and requiring an additional >> 1. + * We use this to our advantage with kC2. The canonical value is 35468. + * However, the high bit is set so treating it as signed will give incorrect + * results. We avoid this by down shifting by 1 here to clear the highest bit. + * Combined with the doubling effect of vqdmulh we get >> 16. + * This can not be applied to kC1 because the lowest bit is set. Down shifting + * the constant would reduce precision. + */ + + /* libwebp uses a trick to avoid some extra addition that libvpx does. + * Instead of: + * temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16); + * libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the + * same issue with kC1 and vqdmulh that we work around by down shifting kC2 + */ + + /* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */ + __asm__ volatile ( + "vld1.16 {q1, q2}, [%[in]] \n" + "vld1.16 {d0}, [%[constants]] \n" + + /* d2: in[0] + * d3: in[8] + * d4: in[4] + * d5: in[12] + */ + "vswp d3, d4 \n" + + /* q8 = {in[4], in[12]} * kC1 * 2 >> 16 + * q9 = {in[4], in[12]} * kC2 >> 16 + */ + "vqdmulh.s16 q8, q2, d0[0] \n" + "vqdmulh.s16 q9, q2, d0[1] \n" + + /* d22 = a = in[0] + in[8] + * d23 = b = in[0] - in[8] + */ + "vqadd.s16 d22, d2, d3 \n" + "vqsub.s16 d23, d2, d3 \n" + + /* The multiplication should be x * kC1 >> 16 + * However, with vqdmulh we get x * kC1 * 2 >> 16 + * (multiply, double, return high half) + * We avoided this in kC2 by pre-shifting the constant. + * q8 = in[4]/[12] * kC1 >> 16 + */ + "vshr.s16 q8, q8, #1 \n" + + /* Add {in[4], in[12]} back after the multiplication. This is handled by + * adding 1 << 16 to kC1 in the libwebp C code. + */ + "vqadd.s16 q8, q2, q8 \n" + + /* d20 = c = in[4]*kC2 - in[12]*kC1 + * d21 = d = in[4]*kC1 + in[12]*kC2 + */ + "vqsub.s16 d20, d18, d17 \n" + "vqadd.s16 d21, d19, d16 \n" + + /* d2 = tmp[0] = a + d + * d3 = tmp[1] = b + c + * d4 = tmp[2] = b - c + * d5 = tmp[3] = a - d + */ + "vqadd.s16 d2, d22, d21 \n" + "vqadd.s16 d3, d23, d20 \n" + "vqsub.s16 d4, d23, d20 \n" + "vqsub.s16 d5, d22, d21 \n" + + "vzip.16 q1, q2 \n" + "vzip.16 q1, q2 \n" + + "vswp d3, d4 \n" + + /* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16 + * q9 = {tmp[4], tmp[12]} * kC2 >> 16 + */ + "vqdmulh.s16 q8, q2, d0[0] \n" + "vqdmulh.s16 q9, q2, d0[1] \n" + + /* d22 = a = tmp[0] + tmp[8] + * d23 = b = tmp[0] - tmp[8] + */ + "vqadd.s16 d22, d2, d3 \n" + "vqsub.s16 d23, d2, d3 \n" + + /* See long winded explanations prior */ + "vshr.s16 q8, q8, #1 \n" + "vqadd.s16 q8, q2, q8 \n" + + /* d20 = c = in[4]*kC2 - in[12]*kC1 + * d21 = d = in[4]*kC1 + in[12]*kC2 + */ + "vqsub.s16 d20, d18, d17 \n" + "vqadd.s16 d21, d19, d16 \n" + + /* d2 = tmp[0] = a + d + * d3 = tmp[1] = b + c + * d4 = tmp[2] = b - c + * d5 = tmp[3] = a - d + */ + "vqadd.s16 d2, d22, d21 \n" + "vqadd.s16 d3, d23, d20 \n" + "vqsub.s16 d4, d23, d20 \n" + "vqsub.s16 d5, d22, d21 \n" + + "vld1.32 d6[0], [%[dst]], %[kBPS] \n" + "vld1.32 d6[1], [%[dst]], %[kBPS] \n" + "vld1.32 d7[0], [%[dst]], %[kBPS] \n" + "vld1.32 d7[1], [%[dst]], %[kBPS] \n" + + "sub %[dst], %[dst], %[kBPS], lsl #2 \n" + + /* (val) + 4 >> 3 */ + "vrshr.s16 d2, d2, #3 \n" + "vrshr.s16 d3, d3, #3 \n" + "vrshr.s16 d4, d4, #3 \n" + "vrshr.s16 d5, d5, #3 \n" + + "vzip.16 q1, q2 \n" + "vzip.16 q1, q2 \n" + + /* Must accumulate before saturating */ + "vmovl.u8 q8, d6 \n" + "vmovl.u8 q9, d7 \n" + + "vqadd.s16 q1, q1, q8 \n" + "vqadd.s16 q2, q2, q9 \n" + + "vqmovun.s16 d0, q1 \n" + "vqmovun.s16 d1, q2 \n" + + "vst1.32 d0[0], [%[dst]], %[kBPS] \n" + "vst1.32 d0[1], [%[dst]], %[kBPS] \n" + "vst1.32 d1[0], [%[dst]], %[kBPS] \n" + "vst1.32 d1[1], [%[dst]] \n" + + : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */ + : [kBPS] "r"(kBPS), [constants] "r"(constants) /* constants */ + : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" /* clobbered */ + ); +} + +static void TransformTwoNEON(const int16_t* in, uint8_t* dst, int do_two) { + TransformOneNEON(in, dst); + if (do_two) { + TransformOneNEON(in + 16, dst + 4); + } +} + +extern void VP8DspInitNEON(void); + +void VP8DspInitNEON(void) { + VP8Transform = TransformTwoNEON; + + VP8SimpleVFilter16 = SimpleVFilter16NEON; + VP8SimpleHFilter16 = SimpleHFilter16NEON; + VP8SimpleVFilter16i = SimpleVFilter16iNEON; + VP8SimpleHFilter16i = SimpleHFilter16iNEON; +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif // WEBP_USE_NEON diff --git a/drivers/webpold/dsp/dec_sse2.c b/drivers/webpold/dsp/dec_sse2.c new file mode 100644 index 0000000000..472b68ecb8 --- /dev/null +++ b/drivers/webpold/dsp/dec_sse2.c @@ -0,0 +1,903 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// SSE2 version of some decoding functions (idct, loop filtering). +// +// Author: somnath@google.com (Somnath Banerjee) +// cduvivier@google.com (Christian Duvivier) + +#include "./dsp.h" + +#if defined(WEBP_USE_SSE2) + +#include <emmintrin.h> +#include "../dec/vp8i.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +static void TransformSSE2(const int16_t* in, uint8_t* dst, int do_two) { + // This implementation makes use of 16-bit fixed point versions of two + // multiply constants: + // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 + // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 + // + // To be able to use signed 16-bit integers, we use the following trick to + // have constants within range: + // - Associated constants are obtained by subtracting the 16-bit fixed point + // version of one: + // k = K - (1 << 16) => K = k + (1 << 16) + // K1 = 85267 => k1 = 20091 + // K2 = 35468 => k2 = -30068 + // - The multiplication of a variable by a constant become the sum of the + // variable and the multiplication of that variable by the associated + // constant: + // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x + const __m128i k1 = _mm_set1_epi16(20091); + const __m128i k2 = _mm_set1_epi16(-30068); + __m128i T0, T1, T2, T3; + + // Load and concatenate the transform coefficients (we'll do two transforms + // in parallel). In the case of only one transform, the second half of the + // vectors will just contain random value we'll never use nor store. + __m128i in0, in1, in2, in3; + { + in0 = _mm_loadl_epi64((__m128i*)&in[0]); + in1 = _mm_loadl_epi64((__m128i*)&in[4]); + in2 = _mm_loadl_epi64((__m128i*)&in[8]); + in3 = _mm_loadl_epi64((__m128i*)&in[12]); + // a00 a10 a20 a30 x x x x + // a01 a11 a21 a31 x x x x + // a02 a12 a22 a32 x x x x + // a03 a13 a23 a33 x x x x + if (do_two) { + const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]); + const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]); + const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]); + const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]); + in0 = _mm_unpacklo_epi64(in0, inB0); + in1 = _mm_unpacklo_epi64(in1, inB1); + in2 = _mm_unpacklo_epi64(in2, inB2); + in3 = _mm_unpacklo_epi64(in3, inB3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + } + + // Vertical pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i a = _mm_add_epi16(in0, in2); + const __m128i b = _mm_sub_epi16(in0, in2); + // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 + const __m128i c1 = _mm_mulhi_epi16(in1, k2); + const __m128i c2 = _mm_mulhi_epi16(in3, k1); + const __m128i c3 = _mm_sub_epi16(in1, in3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 + const __m128i d1 = _mm_mulhi_epi16(in1, k1); + const __m128i d2 = _mm_mulhi_epi16(in3, k2); + const __m128i d3 = _mm_add_epi16(in1, in3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + + // Transpose the two 4x4. + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1); + const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3); + const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1); + const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3); + // a00 a10 a01 a11 a02 a12 a03 a13 + // a20 a30 a21 a31 a22 a32 a23 a33 + // b00 b10 b01 b11 b02 b12 b03 b13 + // b20 b30 b21 b31 b22 b32 b23 b33 + const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); + const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); + // a00 a10 a20 a30 a01 a11 a21 a31 + // b00 b10 b20 b30 b01 b11 b21 b31 + // a02 a12 a22 a32 a03 a13 a23 a33 + // b02 b12 a22 b32 b03 b13 b23 b33 + T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); + T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); + T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); + T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + + // Horizontal pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i four = _mm_set1_epi16(4); + const __m128i dc = _mm_add_epi16(T0, four); + const __m128i a = _mm_add_epi16(dc, T2); + const __m128i b = _mm_sub_epi16(dc, T2); + // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 + const __m128i c1 = _mm_mulhi_epi16(T1, k2); + const __m128i c2 = _mm_mulhi_epi16(T3, k1); + const __m128i c3 = _mm_sub_epi16(T1, T3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 + const __m128i d1 = _mm_mulhi_epi16(T1, k1); + const __m128i d2 = _mm_mulhi_epi16(T3, k2); + const __m128i d3 = _mm_add_epi16(T1, T3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); + const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); + const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); + const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); + + // Transpose the two 4x4. + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1); + const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3); + const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1); + const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3); + // a00 a10 a01 a11 a02 a12 a03 a13 + // a20 a30 a21 a31 a22 a32 a23 a33 + // b00 b10 b01 b11 b02 b12 b03 b13 + // b20 b30 b21 b31 b22 b32 b23 b33 + const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); + const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); + // a00 a10 a20 a30 a01 a11 a21 a31 + // b00 b10 b20 b30 b01 b11 b21 b31 + // a02 a12 a22 a32 a03 a13 a23 a33 + // b02 b12 a22 b32 b03 b13 b23 b33 + T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); + T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); + T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); + T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + + // Add inverse transform to 'dst' and store. + { + const __m128i zero = _mm_set1_epi16(0); + // Load the reference(s). + __m128i dst0, dst1, dst2, dst3; + if (do_two) { + // Load eight bytes/pixels per line. + dst0 = _mm_loadl_epi64((__m128i*)&dst[0 * BPS]); + dst1 = _mm_loadl_epi64((__m128i*)&dst[1 * BPS]); + dst2 = _mm_loadl_epi64((__m128i*)&dst[2 * BPS]); + dst3 = _mm_loadl_epi64((__m128i*)&dst[3 * BPS]); + } else { + // Load four bytes/pixels per line. + dst0 = _mm_cvtsi32_si128(*(int*)&dst[0 * BPS]); + dst1 = _mm_cvtsi32_si128(*(int*)&dst[1 * BPS]); + dst2 = _mm_cvtsi32_si128(*(int*)&dst[2 * BPS]); + dst3 = _mm_cvtsi32_si128(*(int*)&dst[3 * BPS]); + } + // Convert to 16b. + dst0 = _mm_unpacklo_epi8(dst0, zero); + dst1 = _mm_unpacklo_epi8(dst1, zero); + dst2 = _mm_unpacklo_epi8(dst2, zero); + dst3 = _mm_unpacklo_epi8(dst3, zero); + // Add the inverse transform(s). + dst0 = _mm_add_epi16(dst0, T0); + dst1 = _mm_add_epi16(dst1, T1); + dst2 = _mm_add_epi16(dst2, T2); + dst3 = _mm_add_epi16(dst3, T3); + // Unsigned saturate to 8b. + dst0 = _mm_packus_epi16(dst0, dst0); + dst1 = _mm_packus_epi16(dst1, dst1); + dst2 = _mm_packus_epi16(dst2, dst2); + dst3 = _mm_packus_epi16(dst3, dst3); + // Store the results. + if (do_two) { + // Store eight bytes/pixels per line. + _mm_storel_epi64((__m128i*)&dst[0 * BPS], dst0); + _mm_storel_epi64((__m128i*)&dst[1 * BPS], dst1); + _mm_storel_epi64((__m128i*)&dst[2 * BPS], dst2); + _mm_storel_epi64((__m128i*)&dst[3 * BPS], dst3); + } else { + // Store four bytes/pixels per line. + *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(dst0); + *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(dst1); + *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(dst2); + *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(dst3); + } + } +} + +//------------------------------------------------------------------------------ +// Loop Filter (Paragraph 15) + +// Compute abs(p - q) = subs(p - q) OR subs(q - p) +#define MM_ABS(p, q) _mm_or_si128( \ + _mm_subs_epu8((q), (p)), \ + _mm_subs_epu8((p), (q))) + +// Shift each byte of "a" by N bits while preserving by the sign bit. +// +// It first shifts the lower bytes of the words and then the upper bytes and +// then merges the results together. +#define SIGNED_SHIFT_N(a, N) { \ + __m128i t = a; \ + t = _mm_slli_epi16(t, 8); \ + t = _mm_srai_epi16(t, N); \ + t = _mm_srli_epi16(t, 8); \ + \ + a = _mm_srai_epi16(a, N + 8); \ + a = _mm_slli_epi16(a, 8); \ + \ + a = _mm_or_si128(t, a); \ +} + +#define FLIP_SIGN_BIT2(a, b) { \ + a = _mm_xor_si128(a, sign_bit); \ + b = _mm_xor_si128(b, sign_bit); \ +} + +#define FLIP_SIGN_BIT4(a, b, c, d) { \ + FLIP_SIGN_BIT2(a, b); \ + FLIP_SIGN_BIT2(c, d); \ +} + +#define GET_NOTHEV(p1, p0, q0, q1, hev_thresh, not_hev) { \ + const __m128i zero = _mm_setzero_si128(); \ + const __m128i t1 = MM_ABS(p1, p0); \ + const __m128i t2 = MM_ABS(q1, q0); \ + \ + const __m128i h = _mm_set1_epi8(hev_thresh); \ + const __m128i t3 = _mm_subs_epu8(t1, h); /* abs(p1 - p0) - hev_tresh */ \ + const __m128i t4 = _mm_subs_epu8(t2, h); /* abs(q1 - q0) - hev_tresh */ \ + \ + not_hev = _mm_or_si128(t3, t4); \ + not_hev = _mm_cmpeq_epi8(not_hev, zero); /* not_hev <= t1 && not_hev <= t2 */\ +} + +#define GET_BASE_DELTA(p1, p0, q0, q1, o) { \ + const __m128i qp0 = _mm_subs_epi8(q0, p0); /* q0 - p0 */ \ + o = _mm_subs_epi8(p1, q1); /* p1 - q1 */ \ + o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 1 * (q0 - p0) */ \ + o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 2 * (q0 - p0) */ \ + o = _mm_adds_epi8(o, qp0); /* p1 - q1 + 3 * (q0 - p0) */ \ +} + +#define DO_SIMPLE_FILTER(p0, q0, fl) { \ + const __m128i three = _mm_set1_epi8(3); \ + const __m128i four = _mm_set1_epi8(4); \ + __m128i v3 = _mm_adds_epi8(fl, three); \ + __m128i v4 = _mm_adds_epi8(fl, four); \ + \ + /* Do +4 side */ \ + SIGNED_SHIFT_N(v4, 3); /* v4 >> 3 */ \ + q0 = _mm_subs_epi8(q0, v4); /* q0 -= v4 */ \ + \ + /* Now do +3 side */ \ + SIGNED_SHIFT_N(v3, 3); /* v3 >> 3 */ \ + p0 = _mm_adds_epi8(p0, v3); /* p0 += v3 */ \ +} + +// Updates values of 2 pixels at MB edge during complex filtering. +// Update operations: +// q = q - a and p = p + a; where a = [(a_hi >> 7), (a_lo >> 7)] +#define UPDATE_2PIXELS(pi, qi, a_lo, a_hi) { \ + const __m128i a_lo7 = _mm_srai_epi16(a_lo, 7); \ + const __m128i a_hi7 = _mm_srai_epi16(a_hi, 7); \ + const __m128i a = _mm_packs_epi16(a_lo7, a_hi7); \ + pi = _mm_adds_epi8(pi, a); \ + qi = _mm_subs_epi8(qi, a); \ +} + +static void NeedsFilter(const __m128i* p1, const __m128i* p0, const __m128i* q0, + const __m128i* q1, int thresh, __m128i *mask) { + __m128i t1 = MM_ABS(*p1, *q1); // abs(p1 - q1) + *mask = _mm_set1_epi8(0xFE); + t1 = _mm_and_si128(t1, *mask); // set lsb of each byte to zero + t1 = _mm_srli_epi16(t1, 1); // abs(p1 - q1) / 2 + + *mask = MM_ABS(*p0, *q0); // abs(p0 - q0) + *mask = _mm_adds_epu8(*mask, *mask); // abs(p0 - q0) * 2 + *mask = _mm_adds_epu8(*mask, t1); // abs(p0 - q0) * 2 + abs(p1 - q1) / 2 + + t1 = _mm_set1_epi8(thresh); + *mask = _mm_subs_epu8(*mask, t1); // mask <= thresh + *mask = _mm_cmpeq_epi8(*mask, _mm_setzero_si128()); +} + +//------------------------------------------------------------------------------ +// Edge filtering functions + +// Applies filter on 2 pixels (p0 and q0) +static WEBP_INLINE void DoFilter2(const __m128i* p1, __m128i* p0, __m128i* q0, + const __m128i* q1, int thresh) { + __m128i a, mask; + const __m128i sign_bit = _mm_set1_epi8(0x80); + const __m128i p1s = _mm_xor_si128(*p1, sign_bit); + const __m128i q1s = _mm_xor_si128(*q1, sign_bit); + + NeedsFilter(p1, p0, q0, q1, thresh, &mask); + + // convert to signed values + FLIP_SIGN_BIT2(*p0, *q0); + + GET_BASE_DELTA(p1s, *p0, *q0, q1s, a); + a = _mm_and_si128(a, mask); // mask filter values we don't care about + DO_SIMPLE_FILTER(*p0, *q0, a); + + // unoffset + FLIP_SIGN_BIT2(*p0, *q0); +} + +// Applies filter on 4 pixels (p1, p0, q0 and q1) +static WEBP_INLINE void DoFilter4(__m128i* p1, __m128i *p0, + __m128i* q0, __m128i* q1, + const __m128i* mask, int hev_thresh) { + __m128i not_hev; + __m128i t1, t2, t3; + const __m128i sign_bit = _mm_set1_epi8(0x80); + + // compute hev mask + GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev); + + // convert to signed values + FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); + + t1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 + t1 = _mm_andnot_si128(not_hev, t1); // hev(p1 - q1) + t2 = _mm_subs_epi8(*q0, *p0); // q0 - p0 + t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 1 * (q0 - p0) + t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 2 * (q0 - p0) + t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 3 * (q0 - p0) + t1 = _mm_and_si128(t1, *mask); // mask filter values we don't care about + + // Do +4 side + t2 = _mm_set1_epi8(4); + t2 = _mm_adds_epi8(t1, t2); // 3 * (q0 - p0) + (p1 - q1) + 4 + SIGNED_SHIFT_N(t2, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3 + t3 = t2; // save t2 + *q0 = _mm_subs_epi8(*q0, t2); // q0 -= t2 + + // Now do +3 side + t2 = _mm_set1_epi8(3); + t2 = _mm_adds_epi8(t1, t2); // +3 instead of +4 + SIGNED_SHIFT_N(t2, 3); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3 + *p0 = _mm_adds_epi8(*p0, t2); // p0 += t2 + + t2 = _mm_set1_epi8(1); + t3 = _mm_adds_epi8(t3, t2); + SIGNED_SHIFT_N(t3, 1); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 4 + + t3 = _mm_and_si128(not_hev, t3); // if !hev + *q1 = _mm_subs_epi8(*q1, t3); // q1 -= t3 + *p1 = _mm_adds_epi8(*p1, t3); // p1 += t3 + + // unoffset + FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); +} + +// Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2) +static WEBP_INLINE void DoFilter6(__m128i *p2, __m128i* p1, __m128i *p0, + __m128i* q0, __m128i* q1, __m128i *q2, + const __m128i* mask, int hev_thresh) { + __m128i a, not_hev; + const __m128i sign_bit = _mm_set1_epi8(0x80); + + // compute hev mask + GET_NOTHEV(*p1, *p0, *q0, *q1, hev_thresh, not_hev); + + // convert to signed values + FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); + FLIP_SIGN_BIT2(*p2, *q2); + + GET_BASE_DELTA(*p1, *p0, *q0, *q1, a); + + { // do simple filter on pixels with hev + const __m128i m = _mm_andnot_si128(not_hev, *mask); + const __m128i f = _mm_and_si128(a, m); + DO_SIMPLE_FILTER(*p0, *q0, f); + } + { // do strong filter on pixels with not hev + const __m128i zero = _mm_setzero_si128(); + const __m128i nine = _mm_set1_epi16(0x0900); + const __m128i sixty_three = _mm_set1_epi16(63); + + const __m128i m = _mm_and_si128(not_hev, *mask); + const __m128i f = _mm_and_si128(a, m); + const __m128i f_lo = _mm_unpacklo_epi8(zero, f); + const __m128i f_hi = _mm_unpackhi_epi8(zero, f); + + const __m128i f9_lo = _mm_mulhi_epi16(f_lo, nine); // Filter (lo) * 9 + const __m128i f9_hi = _mm_mulhi_epi16(f_hi, nine); // Filter (hi) * 9 + const __m128i f18_lo = _mm_add_epi16(f9_lo, f9_lo); // Filter (lo) * 18 + const __m128i f18_hi = _mm_add_epi16(f9_hi, f9_hi); // Filter (hi) * 18 + + const __m128i a2_lo = _mm_add_epi16(f9_lo, sixty_three); // Filter * 9 + 63 + const __m128i a2_hi = _mm_add_epi16(f9_hi, sixty_three); // Filter * 9 + 63 + + const __m128i a1_lo = _mm_add_epi16(f18_lo, sixty_three); // F... * 18 + 63 + const __m128i a1_hi = _mm_add_epi16(f18_hi, sixty_three); // F... * 18 + 63 + + const __m128i a0_lo = _mm_add_epi16(f18_lo, a2_lo); // Filter * 27 + 63 + const __m128i a0_hi = _mm_add_epi16(f18_hi, a2_hi); // Filter * 27 + 63 + + UPDATE_2PIXELS(*p2, *q2, a2_lo, a2_hi); + UPDATE_2PIXELS(*p1, *q1, a1_lo, a1_hi); + UPDATE_2PIXELS(*p0, *q0, a0_lo, a0_hi); + } + + // unoffset + FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); + FLIP_SIGN_BIT2(*p2, *q2); +} + +// reads 8 rows across a vertical edge. +// +// TODO(somnath): Investigate _mm_shuffle* also see if it can be broken into +// two Load4x4() to avoid code duplication. +static WEBP_INLINE void Load8x4(const uint8_t* b, int stride, + __m128i* p, __m128i* q) { + __m128i t1, t2; + + // Load 0th, 1st, 4th and 5th rows + __m128i r0 = _mm_cvtsi32_si128(*((int*)&b[0 * stride])); // 03 02 01 00 + __m128i r1 = _mm_cvtsi32_si128(*((int*)&b[1 * stride])); // 13 12 11 10 + __m128i r4 = _mm_cvtsi32_si128(*((int*)&b[4 * stride])); // 43 42 41 40 + __m128i r5 = _mm_cvtsi32_si128(*((int*)&b[5 * stride])); // 53 52 51 50 + + r0 = _mm_unpacklo_epi32(r0, r4); // 43 42 41 40 03 02 01 00 + r1 = _mm_unpacklo_epi32(r1, r5); // 53 52 51 50 13 12 11 10 + + // t1 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00 + t1 = _mm_unpacklo_epi8(r0, r1); + + // Load 2nd, 3rd, 6th and 7th rows + r0 = _mm_cvtsi32_si128(*((int*)&b[2 * stride])); // 23 22 21 22 + r1 = _mm_cvtsi32_si128(*((int*)&b[3 * stride])); // 33 32 31 30 + r4 = _mm_cvtsi32_si128(*((int*)&b[6 * stride])); // 63 62 61 60 + r5 = _mm_cvtsi32_si128(*((int*)&b[7 * stride])); // 73 72 71 70 + + r0 = _mm_unpacklo_epi32(r0, r4); // 63 62 61 60 23 22 21 20 + r1 = _mm_unpacklo_epi32(r1, r5); // 73 72 71 70 33 32 31 30 + + // t2 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20 + t2 = _mm_unpacklo_epi8(r0, r1); + + // t1 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00 + // t2 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40 + r0 = t1; + t1 = _mm_unpacklo_epi16(t1, t2); + t2 = _mm_unpackhi_epi16(r0, t2); + + // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 + // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 + *p = _mm_unpacklo_epi32(t1, t2); + *q = _mm_unpackhi_epi32(t1, t2); +} + +static WEBP_INLINE void Load16x4(const uint8_t* r0, const uint8_t* r8, + int stride, + __m128i* p1, __m128i* p0, + __m128i* q0, __m128i* q1) { + __m128i t1, t2; + // Assume the pixels around the edge (|) are numbered as follows + // 00 01 | 02 03 + // 10 11 | 12 13 + // ... | ... + // e0 e1 | e2 e3 + // f0 f1 | f2 f3 + // + // r0 is pointing to the 0th row (00) + // r8 is pointing to the 8th row (80) + + // Load + // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 + // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 + // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80 + // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82 + Load8x4(r0, stride, p1, q0); + Load8x4(r8, stride, p0, q1); + + t1 = *p1; + t2 = *q0; + // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00 + // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01 + // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02 + // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03 + *p1 = _mm_unpacklo_epi64(t1, *p0); + *p0 = _mm_unpackhi_epi64(t1, *p0); + *q0 = _mm_unpacklo_epi64(t2, *q1); + *q1 = _mm_unpackhi_epi64(t2, *q1); +} + +static WEBP_INLINE void Store4x4(__m128i* x, uint8_t* dst, int stride) { + int i; + for (i = 0; i < 4; ++i, dst += stride) { + *((int32_t*)dst) = _mm_cvtsi128_si32(*x); + *x = _mm_srli_si128(*x, 4); + } +} + +// Transpose back and store +static WEBP_INLINE void Store16x4(uint8_t* r0, uint8_t* r8, int stride, + __m128i* p1, __m128i* p0, + __m128i* q0, __m128i* q1) { + __m128i t1; + + // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00 + // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80 + t1 = *p0; + *p0 = _mm_unpacklo_epi8(*p1, t1); + *p1 = _mm_unpackhi_epi8(*p1, t1); + + // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02 + // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82 + t1 = *q0; + *q0 = _mm_unpacklo_epi8(t1, *q1); + *q1 = _mm_unpackhi_epi8(t1, *q1); + + // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00 + // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40 + t1 = *p0; + *p0 = _mm_unpacklo_epi16(t1, *q0); + *q0 = _mm_unpackhi_epi16(t1, *q0); + + // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80 + // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0 + t1 = *p1; + *p1 = _mm_unpacklo_epi16(t1, *q1); + *q1 = _mm_unpackhi_epi16(t1, *q1); + + Store4x4(p0, r0, stride); + r0 += 4 * stride; + Store4x4(q0, r0, stride); + + Store4x4(p1, r8, stride); + r8 += 4 * stride; + Store4x4(q1, r8, stride); +} + +//------------------------------------------------------------------------------ +// Simple In-loop filtering (Paragraph 15.2) + +static void SimpleVFilter16SSE2(uint8_t* p, int stride, int thresh) { + // Load + __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]); + __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]); + __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]); + __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]); + + DoFilter2(&p1, &p0, &q0, &q1, thresh); + + // Store + _mm_storeu_si128((__m128i*)&p[-stride], p0); + _mm_storeu_si128((__m128i*)p, q0); +} + +static void SimpleHFilter16SSE2(uint8_t* p, int stride, int thresh) { + __m128i p1, p0, q0, q1; + + p -= 2; // beginning of p1 + + Load16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1); + DoFilter2(&p1, &p0, &q0, &q1, thresh); + Store16x4(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1); +} + +static void SimpleVFilter16iSSE2(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + SimpleVFilter16SSE2(p, stride, thresh); + } +} + +static void SimpleHFilter16iSSE2(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + SimpleHFilter16SSE2(p, stride, thresh); + } +} + +//------------------------------------------------------------------------------ +// Complex In-loop filtering (Paragraph 15.3) + +#define MAX_DIFF1(p3, p2, p1, p0, m) { \ + m = MM_ABS(p3, p2); \ + m = _mm_max_epu8(m, MM_ABS(p2, p1)); \ + m = _mm_max_epu8(m, MM_ABS(p1, p0)); \ +} + +#define MAX_DIFF2(p3, p2, p1, p0, m) { \ + m = _mm_max_epu8(m, MM_ABS(p3, p2)); \ + m = _mm_max_epu8(m, MM_ABS(p2, p1)); \ + m = _mm_max_epu8(m, MM_ABS(p1, p0)); \ +} + +#define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \ + e1 = _mm_loadu_si128((__m128i*)&(p)[0 * stride]); \ + e2 = _mm_loadu_si128((__m128i*)&(p)[1 * stride]); \ + e3 = _mm_loadu_si128((__m128i*)&(p)[2 * stride]); \ + e4 = _mm_loadu_si128((__m128i*)&(p)[3 * stride]); \ +} + +#define LOADUV_H_EDGE(p, u, v, stride) { \ + p = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \ + p = _mm_unpacklo_epi64(p, _mm_loadl_epi64((__m128i*)&(v)[(stride)])); \ +} + +#define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \ + LOADUV_H_EDGE(e1, u, v, 0 * stride); \ + LOADUV_H_EDGE(e2, u, v, 1 * stride); \ + LOADUV_H_EDGE(e3, u, v, 2 * stride); \ + LOADUV_H_EDGE(e4, u, v, 3 * stride); \ +} + +#define STOREUV(p, u, v, stride) { \ + _mm_storel_epi64((__m128i*)&u[(stride)], p); \ + p = _mm_srli_si128(p, 8); \ + _mm_storel_epi64((__m128i*)&v[(stride)], p); \ +} + +#define COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask) { \ + __m128i fl_yes; \ + const __m128i it = _mm_set1_epi8(ithresh); \ + mask = _mm_subs_epu8(mask, it); \ + mask = _mm_cmpeq_epi8(mask, _mm_setzero_si128()); \ + NeedsFilter(&p1, &p0, &q0, &q1, thresh, &fl_yes); \ + mask = _mm_and_si128(mask, fl_yes); \ +} + +// on macroblock edges +static void VFilter16SSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i t1; + __m128i mask; + __m128i p2, p1, p0, q0, q1, q2; + + // Load p3, p2, p1, p0 + LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0); + MAX_DIFF1(t1, p2, p1, p0, mask); + + // Load q0, q1, q2, q3 + LOAD_H_EDGES4(p, stride, q0, q1, q2, t1); + MAX_DIFF2(t1, q2, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + // Store + _mm_storeu_si128((__m128i*)&p[-3 * stride], p2); + _mm_storeu_si128((__m128i*)&p[-2 * stride], p1); + _mm_storeu_si128((__m128i*)&p[-1 * stride], p0); + _mm_storeu_si128((__m128i*)&p[0 * stride], q0); + _mm_storeu_si128((__m128i*)&p[1 * stride], q1); + _mm_storeu_si128((__m128i*)&p[2 * stride], q2); +} + +static void HFilter16SSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i p3, p2, p1, p0, q0, q1, q2, q3; + + uint8_t* const b = p - 4; + Load16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0 + MAX_DIFF1(p3, p2, p1, p0, mask); + + Load16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3 + MAX_DIFF2(q3, q2, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + Store16x4(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0); + Store16x4(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); +} + +// on three inner edges +static void VFilter16iSSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + __m128i mask; + __m128i t1, t2, p1, p0, q0, q1; + + for (k = 3; k > 0; --k) { + // Load p3, p2, p1, p0 + LOAD_H_EDGES4(p, stride, t2, t1, p1, p0); + MAX_DIFF1(t2, t1, p1, p0, mask); + + p += 4 * stride; + + // Load q0, q1, q2, q3 + LOAD_H_EDGES4(p, stride, q0, q1, t1, t2); + MAX_DIFF2(t2, t1, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); + + // Store + _mm_storeu_si128((__m128i*)&p[-2 * stride], p1); + _mm_storeu_si128((__m128i*)&p[-1 * stride], p0); + _mm_storeu_si128((__m128i*)&p[0 * stride], q0); + _mm_storeu_si128((__m128i*)&p[1 * stride], q1); + } +} + +static void HFilter16iSSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + uint8_t* b; + __m128i mask; + __m128i t1, t2, p1, p0, q0, q1; + + for (k = 3; k > 0; --k) { + b = p; + Load16x4(b, b + 8 * stride, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0 + MAX_DIFF1(t2, t1, p1, p0, mask); + + b += 4; // beginning of q0 + Load16x4(b, b + 8 * stride, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3 + MAX_DIFF2(t2, t1, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); + + b -= 2; // beginning of p1 + Store16x4(b, b + 8 * stride, stride, &p1, &p0, &q0, &q1); + + p += 4; + } +} + +// 8-pixels wide variant, for chroma filtering +static void VFilter8SSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i t1, p2, p1, p0, q0, q1, q2; + + // Load p3, p2, p1, p0 + LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0); + MAX_DIFF1(t1, p2, p1, p0, mask); + + // Load q0, q1, q2, q3 + LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1); + MAX_DIFF2(t1, q2, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + // Store + STOREUV(p2, u, v, -3 * stride); + STOREUV(p1, u, v, -2 * stride); + STOREUV(p0, u, v, -1 * stride); + STOREUV(q0, u, v, 0 * stride); + STOREUV(q1, u, v, 1 * stride); + STOREUV(q2, u, v, 2 * stride); +} + +static void HFilter8SSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i p3, p2, p1, p0, q0, q1, q2, q3; + + uint8_t* const tu = u - 4; + uint8_t* const tv = v - 4; + Load16x4(tu, tv, stride, &p3, &p2, &p1, &p0); // p3, p2, p1, p0 + MAX_DIFF1(p3, p2, p1, p0, mask); + + Load16x4(u, v, stride, &q0, &q1, &q2, &q3); // q0, q1, q2, q3 + MAX_DIFF2(q3, q2, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter6(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + Store16x4(tu, tv, stride, &p3, &p2, &p1, &p0); + Store16x4(u, v, stride, &q0, &q1, &q2, &q3); +} + +static void VFilter8iSSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i t1, t2, p1, p0, q0, q1; + + // Load p3, p2, p1, p0 + LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0); + MAX_DIFF1(t2, t1, p1, p0, mask); + + u += 4 * stride; + v += 4 * stride; + + // Load q0, q1, q2, q3 + LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2); + MAX_DIFF2(t2, t1, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); + + // Store + STOREUV(p1, u, v, -2 * stride); + STOREUV(p0, u, v, -1 * stride); + STOREUV(q0, u, v, 0 * stride); + STOREUV(q1, u, v, 1 * stride); +} + +static void HFilter8iSSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i t1, t2, p1, p0, q0, q1; + Load16x4(u, v, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0 + MAX_DIFF1(t2, t1, p1, p0, mask); + + u += 4; // beginning of q0 + v += 4; + Load16x4(u, v, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3 + MAX_DIFF2(t2, t1, q1, q0, mask); + + COMPLEX_FL_MASK(p1, p0, q0, q1, thresh, ithresh, mask); + DoFilter4(&p1, &p0, &q0, &q1, &mask, hev_thresh); + + u -= 2; // beginning of p1 + v -= 2; + Store16x4(u, v, stride, &p1, &p0, &q0, &q1); +} + +extern void VP8DspInitSSE2(void); + +void VP8DspInitSSE2(void) { + VP8Transform = TransformSSE2; + + VP8VFilter16 = VFilter16SSE2; + VP8HFilter16 = HFilter16SSE2; + VP8VFilter8 = VFilter8SSE2; + VP8HFilter8 = HFilter8SSE2; + VP8VFilter16i = VFilter16iSSE2; + VP8HFilter16i = HFilter16iSSE2; + VP8VFilter8i = VFilter8iSSE2; + VP8HFilter8i = HFilter8iSSE2; + + VP8SimpleVFilter16 = SimpleVFilter16SSE2; + VP8SimpleHFilter16 = SimpleHFilter16SSE2; + VP8SimpleVFilter16i = SimpleVFilter16iSSE2; + VP8SimpleHFilter16i = SimpleHFilter16iSSE2; +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif // WEBP_USE_SSE2 diff --git a/drivers/webpold/dsp/dsp.h b/drivers/webpold/dsp/dsp.h new file mode 100644 index 0000000000..fd686a8532 --- /dev/null +++ b/drivers/webpold/dsp/dsp.h @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// Speed-critical functions. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_DSP_DSP_H_ +#define WEBP_DSP_DSP_H_ + +#include "../types.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// CPU detection + +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) +#define WEBP_MSC_SSE2 // Visual C++ SSE2 targets +#endif + +#if defined(__SSE2__) || defined(WEBP_MSC_SSE2) +#define WEBP_USE_SSE2 +#endif + +#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__) && defined(__ARM_NEON__) +#define WEBP_ANDROID_NEON // Android targets that might support NEON +#endif + +#if ( (defined(__ARM_NEON__) && !defined(__aarch64__)) || defined(WEBP_ANDROID_NEON)) && !defined(PSP2_ENABLED) +#define WEBP_USE_NEON +#endif + +typedef enum { + kSSE2, + kSSE3, + kNEON +} CPUFeature; +// returns true if the CPU supports the feature. +typedef int (*VP8CPUInfo)(CPUFeature feature); +extern VP8CPUInfo VP8GetCPUInfo; + +//------------------------------------------------------------------------------ +// Encoding + +int VP8GetAlpha(const int histo[]); + +// Transforms +// VP8Idct: Does one of two inverse transforms. If do_two is set, the transforms +// will be done for (ref, in, dst) and (ref + 4, in + 16, dst + 4). +typedef void (*VP8Idct)(const uint8_t* ref, const int16_t* in, uint8_t* dst, + int do_two); +typedef void (*VP8Fdct)(const uint8_t* src, const uint8_t* ref, int16_t* out); +typedef void (*VP8WHT)(const int16_t* in, int16_t* out); +extern VP8Idct VP8ITransform; +extern VP8Fdct VP8FTransform; +extern VP8WHT VP8ITransformWHT; +extern VP8WHT VP8FTransformWHT; +// Predictions +// *dst is the destination block. *top and *left can be NULL. +typedef void (*VP8IntraPreds)(uint8_t *dst, const uint8_t* left, + const uint8_t* top); +typedef void (*VP8Intra4Preds)(uint8_t *dst, const uint8_t* top); +extern VP8Intra4Preds VP8EncPredLuma4; +extern VP8IntraPreds VP8EncPredLuma16; +extern VP8IntraPreds VP8EncPredChroma8; + +typedef int (*VP8Metric)(const uint8_t* pix, const uint8_t* ref); +extern VP8Metric VP8SSE16x16, VP8SSE16x8, VP8SSE8x8, VP8SSE4x4; +typedef int (*VP8WMetric)(const uint8_t* pix, const uint8_t* ref, + const uint16_t* const weights); +extern VP8WMetric VP8TDisto4x4, VP8TDisto16x16; + +typedef void (*VP8BlockCopy)(const uint8_t* src, uint8_t* dst); +extern VP8BlockCopy VP8Copy4x4; +// Quantization +struct VP8Matrix; // forward declaration +typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16], + int n, const struct VP8Matrix* const mtx); +extern VP8QuantizeBlock VP8EncQuantizeBlock; + +// Compute susceptibility based on DCT-coeff histograms: +// the higher, the "easier" the macroblock is to compress. +typedef int (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block); +extern const int VP8DspScan[16 + 4 + 4]; +extern VP8CHisto VP8CollectHistogram; + +void VP8EncDspInit(void); // must be called before using any of the above + +//------------------------------------------------------------------------------ +// Decoding + +typedef void (*VP8DecIdct)(const int16_t* coeffs, uint8_t* dst); +// when doing two transforms, coeffs is actually int16_t[2][16]. +typedef void (*VP8DecIdct2)(const int16_t* coeffs, uint8_t* dst, int do_two); +extern VP8DecIdct2 VP8Transform; +extern VP8DecIdct VP8TransformUV; +extern VP8DecIdct VP8TransformDC; +extern VP8DecIdct VP8TransformDCUV; +extern void (*VP8TransformWHT)(const int16_t* in, int16_t* out); + +// *dst is the destination block, with stride BPS. Boundary samples are +// assumed accessible when needed. +typedef void (*VP8PredFunc)(uint8_t* dst); +extern const VP8PredFunc VP8PredLuma16[/* NUM_B_DC_MODES */]; +extern const VP8PredFunc VP8PredChroma8[/* NUM_B_DC_MODES */]; +extern const VP8PredFunc VP8PredLuma4[/* NUM_BMODES */]; + +// simple filter (only for luma) +typedef void (*VP8SimpleFilterFunc)(uint8_t* p, int stride, int thresh); +extern VP8SimpleFilterFunc VP8SimpleVFilter16; +extern VP8SimpleFilterFunc VP8SimpleHFilter16; +extern VP8SimpleFilterFunc VP8SimpleVFilter16i; // filter 3 inner edges +extern VP8SimpleFilterFunc VP8SimpleHFilter16i; + +// regular filter (on both macroblock edges and inner edges) +typedef void (*VP8LumaFilterFunc)(uint8_t* luma, int stride, + int thresh, int ithresh, int hev_t); +typedef void (*VP8ChromaFilterFunc)(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_t); +// on outer edge +extern VP8LumaFilterFunc VP8VFilter16; +extern VP8LumaFilterFunc VP8HFilter16; +extern VP8ChromaFilterFunc VP8VFilter8; +extern VP8ChromaFilterFunc VP8HFilter8; + +// on inner edge +extern VP8LumaFilterFunc VP8VFilter16i; // filtering 3 inner edges altogether +extern VP8LumaFilterFunc VP8HFilter16i; +extern VP8ChromaFilterFunc VP8VFilter8i; // filtering u and v altogether +extern VP8ChromaFilterFunc VP8HFilter8i; + +// must be called before anything using the above +void VP8DspInit(void); + +//------------------------------------------------------------------------------ +// WebP I/O + +#define FANCY_UPSAMPLING // undefined to remove fancy upsampling support + +typedef void (*WebPUpsampleLinePairFunc)( + const uint8_t* top_y, const uint8_t* bottom_y, + const uint8_t* top_u, const uint8_t* top_v, + const uint8_t* cur_u, const uint8_t* cur_v, + uint8_t* top_dst, uint8_t* bottom_dst, int len); + +#ifdef FANCY_UPSAMPLING + +// Fancy upsampling functions to convert YUV to RGB(A) modes +extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; + +// Initializes SSE2 version of the fancy upsamplers. +void WebPInitUpsamplersSSE2(void); + +#endif // FANCY_UPSAMPLING + +// Point-sampling methods. +typedef void (*WebPSampleLinePairFunc)( + const uint8_t* top_y, const uint8_t* bottom_y, + const uint8_t* u, const uint8_t* v, + uint8_t* top_dst, uint8_t* bottom_dst, int len); + +extern const WebPSampleLinePairFunc WebPSamplers[/* MODE_LAST */]; + +// General function for converting two lines of ARGB or RGBA. +// 'alpha_is_last' should be true if 0xff000000 is stored in memory as +// as 0x00, 0x00, 0x00, 0xff (little endian). +WebPUpsampleLinePairFunc WebPGetLinePairConverter(int alpha_is_last); + +// YUV444->RGB converters +typedef void (*WebPYUV444Converter)(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len); + +extern const WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */]; + +// Main function to be called +void WebPInitUpsamplers(void); + +//------------------------------------------------------------------------------ +// Pre-multiply planes with alpha values + +// Apply alpha pre-multiply on an rgba, bgra or argb plane of size w * h. +// alpha_first should be 0 for argb, 1 for rgba or bgra (where alpha is last). +extern void (*WebPApplyAlphaMultiply)( + uint8_t* rgba, int alpha_first, int w, int h, int stride); + +// Same, buf specifically for RGBA4444 format +extern void (*WebPApplyAlphaMultiply4444)( + uint8_t* rgba4444, int w, int h, int stride); + +// To be called first before using the above. +void WebPInitPremultiply(void); + +void WebPInitPremultiplySSE2(void); // should not be called directly. + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif /* WEBP_DSP_DSP_H_ */ diff --git a/drivers/webpold/dsp/enc.c b/drivers/webpold/dsp/enc.c new file mode 100644 index 0000000000..02234564be --- /dev/null +++ b/drivers/webpold/dsp/enc.c @@ -0,0 +1,743 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// Speed-critical encoding functions. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include <stdlib.h> // for abs() +#include "./dsp.h" +#include "../enc/vp8enci.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Compute susceptibility based on DCT-coeff histograms: +// the higher, the "easier" the macroblock is to compress. + +static int ClipAlpha(int alpha) { + return alpha < 0 ? 0 : alpha > 255 ? 255 : alpha; +} + +int VP8GetAlpha(const int histo[MAX_COEFF_THRESH + 1]) { + int num = 0, den = 0, val = 0; + int k; + int alpha; + // note: changing this loop to avoid the numerous "k + 1" slows things down. + for (k = 0; k < MAX_COEFF_THRESH; ++k) { + if (histo[k + 1]) { + val += histo[k + 1]; + num += val * (k + 1); + den += (k + 1) * (k + 1); + } + } + // we scale the value to a usable [0..255] range + alpha = den ? 10 * num / den - 5 : 0; + return ClipAlpha(alpha); +} + +const int VP8DspScan[16 + 4 + 4] = { + // Luma + 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS, + 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS, + 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS, + 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS, + + 0 + 0 * BPS, 4 + 0 * BPS, 0 + 4 * BPS, 4 + 4 * BPS, // U + 8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V +}; + +static int CollectHistogram(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block) { + int histo[MAX_COEFF_THRESH + 1] = { 0 }; + int16_t out[16]; + int j, k; + for (j = start_block; j < end_block; ++j) { + VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + + // Convert coefficients to bin (within out[]). + for (k = 0; k < 16; ++k) { + const int v = abs(out[k]) >> 2; + out[k] = (v > MAX_COEFF_THRESH) ? MAX_COEFF_THRESH : v; + } + + // Use bin to update histogram. + for (k = 0; k < 16; ++k) { + histo[out[k]]++; + } + } + + return VP8GetAlpha(histo); +} + +//------------------------------------------------------------------------------ +// run-time tables (~4k) + +static uint8_t clip1[255 + 510 + 1]; // clips [-255,510] to [0,255] + +// We declare this variable 'volatile' to prevent instruction reordering +// and make sure it's set to true _last_ (so as to be thread-safe) +static volatile int tables_ok = 0; + +static void InitTables(void) { + if (!tables_ok) { + int i; + for (i = -255; i <= 255 + 255; ++i) { + clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i; + } + tables_ok = 1; + } +} + +static WEBP_INLINE uint8_t clip_8b(int v) { + return (!(v & ~0xff)) ? v : v < 0 ? 0 : 255; +} + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +#define STORE(x, y, v) \ + dst[(x) + (y) * BPS] = clip_8b(ref[(x) + (y) * BPS] + ((v) >> 3)) + +static const int kC1 = 20091 + (1 << 16); +static const int kC2 = 35468; +#define MUL(a, b) (((a) * (b)) >> 16) + +static WEBP_INLINE void ITransformOne(const uint8_t* ref, const int16_t* in, + uint8_t* dst) { + int C[4 * 4], *tmp; + int i; + tmp = C; + for (i = 0; i < 4; ++i) { // vertical pass + const int a = in[0] + in[8]; + const int b = in[0] - in[8]; + const int c = MUL(in[4], kC2) - MUL(in[12], kC1); + const int d = MUL(in[4], kC1) + MUL(in[12], kC2); + tmp[0] = a + d; + tmp[1] = b + c; + tmp[2] = b - c; + tmp[3] = a - d; + tmp += 4; + in++; + } + + tmp = C; + for (i = 0; i < 4; ++i) { // horizontal pass + const int dc = tmp[0] + 4; + const int a = dc + tmp[8]; + const int b = dc - tmp[8]; + const int c = MUL(tmp[4], kC2) - MUL(tmp[12], kC1); + const int d = MUL(tmp[4], kC1) + MUL(tmp[12], kC2); + STORE(0, i, a + d); + STORE(1, i, b + c); + STORE(2, i, b - c); + STORE(3, i, a - d); + tmp++; + } +} + +static void ITransform(const uint8_t* ref, const int16_t* in, uint8_t* dst, + int do_two) { + ITransformOne(ref, in, dst); + if (do_two) { + ITransformOne(ref + 4, in + 16, dst + 4); + } +} + +static void FTransform(const uint8_t* src, const uint8_t* ref, int16_t* out) { + int i; + int tmp[16]; + for (i = 0; i < 4; ++i, src += BPS, ref += BPS) { + const int d0 = src[0] - ref[0]; + const int d1 = src[1] - ref[1]; + const int d2 = src[2] - ref[2]; + const int d3 = src[3] - ref[3]; + const int a0 = (d0 + d3) << 3; + const int a1 = (d1 + d2) << 3; + const int a2 = (d1 - d2) << 3; + const int a3 = (d0 - d3) << 3; + tmp[0 + i * 4] = (a0 + a1); + tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 14500) >> 12; + tmp[2 + i * 4] = (a0 - a1); + tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 7500) >> 12; + } + for (i = 0; i < 4; ++i) { + const int a0 = (tmp[0 + i] + tmp[12 + i]); + const int a1 = (tmp[4 + i] + tmp[ 8 + i]); + const int a2 = (tmp[4 + i] - tmp[ 8 + i]); + const int a3 = (tmp[0 + i] - tmp[12 + i]); + out[0 + i] = (a0 + a1 + 7) >> 4; + out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0); + out[8 + i] = (a0 - a1 + 7) >> 4; + out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16); + } +} + +static void ITransformWHT(const int16_t* in, int16_t* out) { + int tmp[16]; + int i; + for (i = 0; i < 4; ++i) { + const int a0 = in[0 + i] + in[12 + i]; + const int a1 = in[4 + i] + in[ 8 + i]; + const int a2 = in[4 + i] - in[ 8 + i]; + const int a3 = in[0 + i] - in[12 + i]; + tmp[0 + i] = a0 + a1; + tmp[8 + i] = a0 - a1; + tmp[4 + i] = a3 + a2; + tmp[12 + i] = a3 - a2; + } + for (i = 0; i < 4; ++i) { + const int dc = tmp[0 + i * 4] + 3; // w/ rounder + const int a0 = dc + tmp[3 + i * 4]; + const int a1 = tmp[1 + i * 4] + tmp[2 + i * 4]; + const int a2 = tmp[1 + i * 4] - tmp[2 + i * 4]; + const int a3 = dc - tmp[3 + i * 4]; + out[ 0] = (a0 + a1) >> 3; + out[16] = (a3 + a2) >> 3; + out[32] = (a0 - a1) >> 3; + out[48] = (a3 - a2) >> 3; + out += 64; + } +} + +static void FTransformWHT(const int16_t* in, int16_t* out) { + int tmp[16]; + int i; + for (i = 0; i < 4; ++i, in += 64) { + const int a0 = (in[0 * 16] + in[2 * 16]) << 2; + const int a1 = (in[1 * 16] + in[3 * 16]) << 2; + const int a2 = (in[1 * 16] - in[3 * 16]) << 2; + const int a3 = (in[0 * 16] - in[2 * 16]) << 2; + tmp[0 + i * 4] = (a0 + a1) + (a0 != 0); + tmp[1 + i * 4] = a3 + a2; + tmp[2 + i * 4] = a3 - a2; + tmp[3 + i * 4] = a0 - a1; + } + for (i = 0; i < 4; ++i) { + const int a0 = (tmp[0 + i] + tmp[8 + i]); + const int a1 = (tmp[4 + i] + tmp[12+ i]); + const int a2 = (tmp[4 + i] - tmp[12+ i]); + const int a3 = (tmp[0 + i] - tmp[8 + i]); + const int b0 = a0 + a1; + const int b1 = a3 + a2; + const int b2 = a3 - a2; + const int b3 = a0 - a1; + out[ 0 + i] = (b0 + (b0 > 0) + 3) >> 3; + out[ 4 + i] = (b1 + (b1 > 0) + 3) >> 3; + out[ 8 + i] = (b2 + (b2 > 0) + 3) >> 3; + out[12 + i] = (b3 + (b3 > 0) + 3) >> 3; + } +} + +#undef MUL +#undef STORE + +//------------------------------------------------------------------------------ +// Intra predictions + +#define DST(x, y) dst[(x) + (y) * BPS] + +static WEBP_INLINE void Fill(uint8_t* dst, int value, int size) { + int j; + for (j = 0; j < size; ++j) { + memset(dst + j * BPS, value, size); + } +} + +static WEBP_INLINE void VerticalPred(uint8_t* dst, + const uint8_t* top, int size) { + int j; + if (top) { + for (j = 0; j < size; ++j) memcpy(dst + j * BPS, top, size); + } else { + Fill(dst, 127, size); + } +} + +static WEBP_INLINE void HorizontalPred(uint8_t* dst, + const uint8_t* left, int size) { + if (left) { + int j; + for (j = 0; j < size; ++j) { + memset(dst + j * BPS, left[j], size); + } + } else { + Fill(dst, 129, size); + } +} + +static WEBP_INLINE void TrueMotion(uint8_t* dst, const uint8_t* left, + const uint8_t* top, int size) { + int y; + if (left) { + if (top) { + const uint8_t* const clip = clip1 + 255 - left[-1]; + for (y = 0; y < size; ++y) { + const uint8_t* const clip_table = clip + left[y]; + int x; + for (x = 0; x < size; ++x) { + dst[x] = clip_table[top[x]]; + } + dst += BPS; + } + } else { + HorizontalPred(dst, left, size); + } + } else { + // true motion without left samples (hence: with default 129 value) + // is equivalent to VE prediction where you just copy the top samples. + // Note that if top samples are not available, the default value is + // then 129, and not 127 as in the VerticalPred case. + if (top) { + VerticalPred(dst, top, size); + } else { + Fill(dst, 129, size); + } + } +} + +static WEBP_INLINE void DCMode(uint8_t* dst, const uint8_t* left, + const uint8_t* top, + int size, int round, int shift) { + int DC = 0; + int j; + if (top) { + for (j = 0; j < size; ++j) DC += top[j]; + if (left) { // top and left present + for (j = 0; j < size; ++j) DC += left[j]; + } else { // top, but no left + DC += DC; + } + DC = (DC + round) >> shift; + } else if (left) { // left but no top + for (j = 0; j < size; ++j) DC += left[j]; + DC += DC; + DC = (DC + round) >> shift; + } else { // no top, no left, nothing. + DC = 0x80; + } + Fill(dst, DC, size); +} + +//------------------------------------------------------------------------------ +// Chroma 8x8 prediction (paragraph 12.2) + +static void IntraChromaPreds(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + // U block + DCMode(C8DC8 + dst, left, top, 8, 8, 4); + VerticalPred(C8VE8 + dst, top, 8); + HorizontalPred(C8HE8 + dst, left, 8); + TrueMotion(C8TM8 + dst, left, top, 8); + // V block + dst += 8; + if (top) top += 8; + if (left) left += 16; + DCMode(C8DC8 + dst, left, top, 8, 8, 4); + VerticalPred(C8VE8 + dst, top, 8); + HorizontalPred(C8HE8 + dst, left, 8); + TrueMotion(C8TM8 + dst, left, top, 8); +} + +//------------------------------------------------------------------------------ +// luma 16x16 prediction (paragraph 12.3) + +static void Intra16Preds(uint8_t* dst, + const uint8_t* left, const uint8_t* top) { + DCMode(I16DC16 + dst, left, top, 16, 16, 5); + VerticalPred(I16VE16 + dst, top, 16); + HorizontalPred(I16HE16 + dst, left, 16); + TrueMotion(I16TM16 + dst, left, top, 16); +} + +//------------------------------------------------------------------------------ +// luma 4x4 prediction + +#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) +#define AVG2(a, b) (((a) + (b) + 1) >> 1) + +static void VE4(uint8_t* dst, const uint8_t* top) { // vertical + const uint8_t vals[4] = { + AVG3(top[-1], top[0], top[1]), + AVG3(top[ 0], top[1], top[2]), + AVG3(top[ 1], top[2], top[3]), + AVG3(top[ 2], top[3], top[4]) + }; + int i; + for (i = 0; i < 4; ++i) { + memcpy(dst + i * BPS, vals, 4); + } +} + +static void HE4(uint8_t* dst, const uint8_t* top) { // horizontal + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + *(uint32_t*)(dst + 0 * BPS) = 0x01010101U * AVG3(X, I, J); + *(uint32_t*)(dst + 1 * BPS) = 0x01010101U * AVG3(I, J, K); + *(uint32_t*)(dst + 2 * BPS) = 0x01010101U * AVG3(J, K, L); + *(uint32_t*)(dst + 3 * BPS) = 0x01010101U * AVG3(K, L, L); +} + +static void DC4(uint8_t* dst, const uint8_t* top) { + uint32_t dc = 4; + int i; + for (i = 0; i < 4; ++i) dc += top[i] + top[-5 + i]; + Fill(dst, dc >> 3, 4); +} + +static void RD4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + DST(0, 3) = AVG3(J, K, L); + DST(0, 2) = DST(1, 3) = AVG3(I, J, K); + DST(0, 1) = DST(1, 2) = DST(2, 3) = AVG3(X, I, J); + DST(0, 0) = DST(1, 1) = DST(2, 2) = DST(3, 3) = AVG3(A, X, I); + DST(1, 0) = DST(2, 1) = DST(3, 2) = AVG3(B, A, X); + DST(2, 0) = DST(3, 1) = AVG3(C, B, A); + DST(3, 0) = AVG3(D, C, B); +} + +static void LD4(uint8_t* dst, const uint8_t* top) { + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + const int E = top[4]; + const int F = top[5]; + const int G = top[6]; + const int H = top[7]; + DST(0, 0) = AVG3(A, B, C); + DST(1, 0) = DST(0, 1) = AVG3(B, C, D); + DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); + DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F); + DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); + DST(3, 2) = DST(2, 3) = AVG3(F, G, H); + DST(3, 3) = AVG3(G, H, H); +} + +static void VR4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + DST(0, 0) = DST(1, 2) = AVG2(X, A); + DST(1, 0) = DST(2, 2) = AVG2(A, B); + DST(2, 0) = DST(3, 2) = AVG2(B, C); + DST(3, 0) = AVG2(C, D); + + DST(0, 3) = AVG3(K, J, I); + DST(0, 2) = AVG3(J, I, X); + DST(0, 1) = DST(1, 3) = AVG3(I, X, A); + DST(1, 1) = DST(2, 3) = AVG3(X, A, B); + DST(2, 1) = DST(3, 3) = AVG3(A, B, C); + DST(3, 1) = AVG3(B, C, D); +} + +static void VL4(uint8_t* dst, const uint8_t* top) { + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + const int E = top[4]; + const int F = top[5]; + const int G = top[6]; + const int H = top[7]; + DST(0, 0) = AVG2(A, B); + DST(1, 0) = DST(0, 2) = AVG2(B, C); + DST(2, 0) = DST(1, 2) = AVG2(C, D); + DST(3, 0) = DST(2, 2) = AVG2(D, E); + + DST(0, 1) = AVG3(A, B, C); + DST(1, 1) = DST(0, 3) = AVG3(B, C, D); + DST(2, 1) = DST(1, 3) = AVG3(C, D, E); + DST(3, 1) = DST(2, 3) = AVG3(D, E, F); + DST(3, 2) = AVG3(E, F, G); + DST(3, 3) = AVG3(F, G, H); +} + +static void HU4(uint8_t* dst, const uint8_t* top) { + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + DST(0, 0) = AVG2(I, J); + DST(2, 0) = DST(0, 1) = AVG2(J, K); + DST(2, 1) = DST(0, 2) = AVG2(K, L); + DST(1, 0) = AVG3(I, J, K); + DST(3, 0) = DST(1, 1) = AVG3(J, K, L); + DST(3, 1) = DST(1, 2) = AVG3(K, L, L); + DST(3, 2) = DST(2, 2) = + DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; +} + +static void HD4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + + DST(0, 0) = DST(2, 1) = AVG2(I, X); + DST(0, 1) = DST(2, 2) = AVG2(J, I); + DST(0, 2) = DST(2, 3) = AVG2(K, J); + DST(0, 3) = AVG2(L, K); + + DST(3, 0) = AVG3(A, B, C); + DST(2, 0) = AVG3(X, A, B); + DST(1, 0) = DST(3, 1) = AVG3(I, X, A); + DST(1, 1) = DST(3, 2) = AVG3(J, I, X); + DST(1, 2) = DST(3, 3) = AVG3(K, J, I); + DST(1, 3) = AVG3(L, K, J); +} + +static void TM4(uint8_t* dst, const uint8_t* top) { + int x, y; + const uint8_t* const clip = clip1 + 255 - top[-1]; + for (y = 0; y < 4; ++y) { + const uint8_t* const clip_table = clip + top[-2 - y]; + for (x = 0; x < 4; ++x) { + dst[x] = clip_table[top[x]]; + } + dst += BPS; + } +} + +#undef DST +#undef AVG3 +#undef AVG2 + +// Left samples are top[-5 .. -2], top_left is top[-1], top are +// located at top[0..3], and top right is top[4..7] +static void Intra4Preds(uint8_t* dst, const uint8_t* top) { + DC4(I4DC4 + dst, top); + TM4(I4TM4 + dst, top); + VE4(I4VE4 + dst, top); + HE4(I4HE4 + dst, top); + RD4(I4RD4 + dst, top); + VR4(I4VR4 + dst, top); + LD4(I4LD4 + dst, top); + VL4(I4VL4 + dst, top); + HD4(I4HD4 + dst, top); + HU4(I4HU4 + dst, top); +} + +//------------------------------------------------------------------------------ +// Metric + +static WEBP_INLINE int GetSSE(const uint8_t* a, const uint8_t* b, + int w, int h) { + int count = 0; + int y, x; + for (y = 0; y < h; ++y) { + for (x = 0; x < w; ++x) { + const int diff = (int)a[x] - b[x]; + count += diff * diff; + } + a += BPS; + b += BPS; + } + return count; +} + +static int SSE16x16(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 16, 16); +} +static int SSE16x8(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 16, 8); +} +static int SSE8x8(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 8, 8); +} +static int SSE4x4(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 4, 4); +} + +//------------------------------------------------------------------------------ +// Texture distortion +// +// We try to match the spectral content (weighted) between source and +// reconstructed samples. + +// Hadamard transform +// Returns the weighted sum of the absolute value of transformed coefficients. +static int TTransform(const uint8_t* in, const uint16_t* w) { + int sum = 0; + int tmp[16]; + int i; + // horizontal pass + for (i = 0; i < 4; ++i, in += BPS) { + const int a0 = (in[0] + in[2]) << 2; + const int a1 = (in[1] + in[3]) << 2; + const int a2 = (in[1] - in[3]) << 2; + const int a3 = (in[0] - in[2]) << 2; + tmp[0 + i * 4] = a0 + a1 + (a0 != 0); + tmp[1 + i * 4] = a3 + a2; + tmp[2 + i * 4] = a3 - a2; + tmp[3 + i * 4] = a0 - a1; + } + // vertical pass + for (i = 0; i < 4; ++i, ++w) { + const int a0 = (tmp[0 + i] + tmp[8 + i]); + const int a1 = (tmp[4 + i] + tmp[12+ i]); + const int a2 = (tmp[4 + i] - tmp[12+ i]); + const int a3 = (tmp[0 + i] - tmp[8 + i]); + const int b0 = a0 + a1; + const int b1 = a3 + a2; + const int b2 = a3 - a2; + const int b3 = a0 - a1; + // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3 + sum += w[ 0] * ((abs(b0) + 3) >> 3); + sum += w[ 4] * ((abs(b1) + 3) >> 3); + sum += w[ 8] * ((abs(b2) + 3) >> 3); + sum += w[12] * ((abs(b3) + 3) >> 3); + } + return sum; +} + +static int Disto4x4(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + const int sum1 = TTransform(a, w); + const int sum2 = TTransform(b, w); + return (abs(sum2 - sum1) + 8) >> 4; +} + +static int Disto16x16(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4(a + x + y, b + x + y, w); + } + } + return D; +} + +//------------------------------------------------------------------------------ +// Quantization +// + +static const uint8_t kZigzag[16] = { + 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 +}; + +// Simple quantization +static int QuantizeBlock(int16_t in[16], int16_t out[16], + int n, const VP8Matrix* const mtx) { + int last = -1; + for (; n < 16; ++n) { + const int j = kZigzag[n]; + const int sign = (in[j] < 0); + int coeff = (sign ? -in[j] : in[j]) + mtx->sharpen_[j]; + if (coeff > 2047) coeff = 2047; + if (coeff > mtx->zthresh_[j]) { + const int Q = mtx->q_[j]; + const int iQ = mtx->iq_[j]; + const int B = mtx->bias_[j]; + out[n] = QUANTDIV(coeff, iQ, B); + if (sign) out[n] = -out[n]; + in[j] = out[n] * Q; + if (out[n]) last = n; + } else { + out[n] = 0; + in[j] = 0; + } + } + return (last >= 0); +} + +//------------------------------------------------------------------------------ +// Block copy + +static WEBP_INLINE void Copy(const uint8_t* src, uint8_t* dst, int size) { + int y; + for (y = 0; y < size; ++y) { + memcpy(dst, src, size); + src += BPS; + dst += BPS; + } +} + +static void Copy4x4(const uint8_t* src, uint8_t* dst) { Copy(src, dst, 4); } + +//------------------------------------------------------------------------------ +// Initialization + +// Speed-critical function pointers. We have to initialize them to the default +// implementations within VP8EncDspInit(). +VP8CHisto VP8CollectHistogram; +VP8Idct VP8ITransform; +VP8Fdct VP8FTransform; +VP8WHT VP8ITransformWHT; +VP8WHT VP8FTransformWHT; +VP8Intra4Preds VP8EncPredLuma4; +VP8IntraPreds VP8EncPredLuma16; +VP8IntraPreds VP8EncPredChroma8; +VP8Metric VP8SSE16x16; +VP8Metric VP8SSE8x8; +VP8Metric VP8SSE16x8; +VP8Metric VP8SSE4x4; +VP8WMetric VP8TDisto4x4; +VP8WMetric VP8TDisto16x16; +VP8QuantizeBlock VP8EncQuantizeBlock; +VP8BlockCopy VP8Copy4x4; + +extern void VP8EncDspInitSSE2(void); + +void VP8EncDspInit(void) { + InitTables(); + + // default C implementations + VP8CollectHistogram = CollectHistogram; + VP8ITransform = ITransform; + VP8FTransform = FTransform; + VP8ITransformWHT = ITransformWHT; + VP8FTransformWHT = FTransformWHT; + VP8EncPredLuma4 = Intra4Preds; + VP8EncPredLuma16 = Intra16Preds; + VP8EncPredChroma8 = IntraChromaPreds; + VP8SSE16x16 = SSE16x16; + VP8SSE8x8 = SSE8x8; + VP8SSE16x8 = SSE16x8; + VP8SSE4x4 = SSE4x4; + VP8TDisto4x4 = Disto4x4; + VP8TDisto16x16 = Disto16x16; + VP8EncQuantizeBlock = QuantizeBlock; + VP8Copy4x4 = Copy4x4; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8EncDspInitSSE2(); + } +#endif + } +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif diff --git a/drivers/webpold/dsp/enc_sse2.c b/drivers/webpold/dsp/enc_sse2.c new file mode 100644 index 0000000000..b046761dc1 --- /dev/null +++ b/drivers/webpold/dsp/enc_sse2.c @@ -0,0 +1,837 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// SSE2 version of speed-critical encoding functions. +// +// Author: Christian Duvivier (cduvivier@google.com) + +#include "./dsp.h" + +#if defined(WEBP_USE_SSE2) +#include <stdlib.h> // for abs() +#include <emmintrin.h> + +#include "../enc/vp8enci.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Compute susceptibility based on DCT-coeff histograms: +// the higher, the "easier" the macroblock is to compress. + +static int CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block) { + int histo[MAX_COEFF_THRESH + 1] = { 0 }; + int16_t out[16]; + int j, k; + const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH); + for (j = start_block; j < end_block; ++j) { + VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + + // Convert coefficients to bin (within out[]). + { + // Load. + const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]); + const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]); + // sign(out) = out >> 15 (0x0000 if positive, 0xffff if negative) + const __m128i sign0 = _mm_srai_epi16(out0, 15); + const __m128i sign1 = _mm_srai_epi16(out1, 15); + // abs(out) = (out ^ sign) - sign + const __m128i xor0 = _mm_xor_si128(out0, sign0); + const __m128i xor1 = _mm_xor_si128(out1, sign1); + const __m128i abs0 = _mm_sub_epi16(xor0, sign0); + const __m128i abs1 = _mm_sub_epi16(xor1, sign1); + // v = abs(out) >> 2 + const __m128i v0 = _mm_srai_epi16(abs0, 2); + const __m128i v1 = _mm_srai_epi16(abs1, 2); + // bin = min(v, MAX_COEFF_THRESH) + const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh); + const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh); + // Store. + _mm_storeu_si128((__m128i*)&out[0], bin0); + _mm_storeu_si128((__m128i*)&out[8], bin1); + } + + // Use bin to update histogram. + for (k = 0; k < 16; ++k) { + histo[out[k]]++; + } + } + + return VP8GetAlpha(histo); +} + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +// Does one or two inverse transforms. +static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst, + int do_two) { + // This implementation makes use of 16-bit fixed point versions of two + // multiply constants: + // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 + // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 + // + // To be able to use signed 16-bit integers, we use the following trick to + // have constants within range: + // - Associated constants are obtained by subtracting the 16-bit fixed point + // version of one: + // k = K - (1 << 16) => K = k + (1 << 16) + // K1 = 85267 => k1 = 20091 + // K2 = 35468 => k2 = -30068 + // - The multiplication of a variable by a constant become the sum of the + // variable and the multiplication of that variable by the associated + // constant: + // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x + const __m128i k1 = _mm_set1_epi16(20091); + const __m128i k2 = _mm_set1_epi16(-30068); + __m128i T0, T1, T2, T3; + + // Load and concatenate the transform coefficients (we'll do two inverse + // transforms in parallel). In the case of only one inverse transform, the + // second half of the vectors will just contain random value we'll never + // use nor store. + __m128i in0, in1, in2, in3; + { + in0 = _mm_loadl_epi64((__m128i*)&in[0]); + in1 = _mm_loadl_epi64((__m128i*)&in[4]); + in2 = _mm_loadl_epi64((__m128i*)&in[8]); + in3 = _mm_loadl_epi64((__m128i*)&in[12]); + // a00 a10 a20 a30 x x x x + // a01 a11 a21 a31 x x x x + // a02 a12 a22 a32 x x x x + // a03 a13 a23 a33 x x x x + if (do_two) { + const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]); + const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]); + const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]); + const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]); + in0 = _mm_unpacklo_epi64(in0, inB0); + in1 = _mm_unpacklo_epi64(in1, inB1); + in2 = _mm_unpacklo_epi64(in2, inB2); + in3 = _mm_unpacklo_epi64(in3, inB3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + } + + // Vertical pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i a = _mm_add_epi16(in0, in2); + const __m128i b = _mm_sub_epi16(in0, in2); + // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 + const __m128i c1 = _mm_mulhi_epi16(in1, k2); + const __m128i c2 = _mm_mulhi_epi16(in3, k1); + const __m128i c3 = _mm_sub_epi16(in1, in3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 + const __m128i d1 = _mm_mulhi_epi16(in1, k1); + const __m128i d2 = _mm_mulhi_epi16(in3, k2); + const __m128i d3 = _mm_add_epi16(in1, in3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + + // Transpose the two 4x4. + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1); + const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3); + const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1); + const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3); + // a00 a10 a01 a11 a02 a12 a03 a13 + // a20 a30 a21 a31 a22 a32 a23 a33 + // b00 b10 b01 b11 b02 b12 b03 b13 + // b20 b30 b21 b31 b22 b32 b23 b33 + const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); + const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); + // a00 a10 a20 a30 a01 a11 a21 a31 + // b00 b10 b20 b30 b01 b11 b21 b31 + // a02 a12 a22 a32 a03 a13 a23 a33 + // b02 b12 a22 b32 b03 b13 b23 b33 + T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); + T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); + T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); + T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + + // Horizontal pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i four = _mm_set1_epi16(4); + const __m128i dc = _mm_add_epi16(T0, four); + const __m128i a = _mm_add_epi16(dc, T2); + const __m128i b = _mm_sub_epi16(dc, T2); + // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 + const __m128i c1 = _mm_mulhi_epi16(T1, k2); + const __m128i c2 = _mm_mulhi_epi16(T3, k1); + const __m128i c3 = _mm_sub_epi16(T1, T3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 + const __m128i d1 = _mm_mulhi_epi16(T1, k1); + const __m128i d2 = _mm_mulhi_epi16(T3, k2); + const __m128i d3 = _mm_add_epi16(T1, T3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); + const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); + const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); + const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); + + // Transpose the two 4x4. + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1); + const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3); + const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1); + const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3); + // a00 a10 a01 a11 a02 a12 a03 a13 + // a20 a30 a21 a31 a22 a32 a23 a33 + // b00 b10 b01 b11 b02 b12 b03 b13 + // b20 b30 b21 b31 b22 b32 b23 b33 + const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); + const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); + // a00 a10 a20 a30 a01 a11 a21 a31 + // b00 b10 b20 b30 b01 b11 b21 b31 + // a02 a12 a22 a32 a03 a13 a23 a33 + // b02 b12 a22 b32 b03 b13 b23 b33 + T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); + T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); + T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); + T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + + // Add inverse transform to 'ref' and store. + { + const __m128i zero = _mm_set1_epi16(0); + // Load the reference(s). + __m128i ref0, ref1, ref2, ref3; + if (do_two) { + // Load eight bytes/pixels per line. + ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]); + ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]); + ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]); + ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]); + } else { + // Load four bytes/pixels per line. + ref0 = _mm_cvtsi32_si128(*(int*)&ref[0 * BPS]); + ref1 = _mm_cvtsi32_si128(*(int*)&ref[1 * BPS]); + ref2 = _mm_cvtsi32_si128(*(int*)&ref[2 * BPS]); + ref3 = _mm_cvtsi32_si128(*(int*)&ref[3 * BPS]); + } + // Convert to 16b. + ref0 = _mm_unpacklo_epi8(ref0, zero); + ref1 = _mm_unpacklo_epi8(ref1, zero); + ref2 = _mm_unpacklo_epi8(ref2, zero); + ref3 = _mm_unpacklo_epi8(ref3, zero); + // Add the inverse transform(s). + ref0 = _mm_add_epi16(ref0, T0); + ref1 = _mm_add_epi16(ref1, T1); + ref2 = _mm_add_epi16(ref2, T2); + ref3 = _mm_add_epi16(ref3, T3); + // Unsigned saturate to 8b. + ref0 = _mm_packus_epi16(ref0, ref0); + ref1 = _mm_packus_epi16(ref1, ref1); + ref2 = _mm_packus_epi16(ref2, ref2); + ref3 = _mm_packus_epi16(ref3, ref3); + // Store the results. + if (do_two) { + // Store eight bytes/pixels per line. + _mm_storel_epi64((__m128i*)&dst[0 * BPS], ref0); + _mm_storel_epi64((__m128i*)&dst[1 * BPS], ref1); + _mm_storel_epi64((__m128i*)&dst[2 * BPS], ref2); + _mm_storel_epi64((__m128i*)&dst[3 * BPS], ref3); + } else { + // Store four bytes/pixels per line. + *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(ref0); + *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(ref1); + *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(ref2); + *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(ref3); + } + } +} + +static void FTransformSSE2(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + const __m128i zero = _mm_setzero_si128(); + const __m128i seven = _mm_set1_epi16(7); + const __m128i k7500 = _mm_set1_epi32(7500); + const __m128i k14500 = _mm_set1_epi32(14500); + const __m128i k51000 = _mm_set1_epi32(51000); + const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16)); + const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217, + 5352, 2217, 5352, 2217); + const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352, + 2217, -5352, 2217, -5352); + + __m128i v01, v32; + + // Difference between src and ref and initial transpose. + { + // Load src and convert to 16b. + const __m128i src0 = _mm_loadl_epi64((__m128i*)&src[0 * BPS]); + const __m128i src1 = _mm_loadl_epi64((__m128i*)&src[1 * BPS]); + const __m128i src2 = _mm_loadl_epi64((__m128i*)&src[2 * BPS]); + const __m128i src3 = _mm_loadl_epi64((__m128i*)&src[3 * BPS]); + const __m128i src_0 = _mm_unpacklo_epi8(src0, zero); + const __m128i src_1 = _mm_unpacklo_epi8(src1, zero); + const __m128i src_2 = _mm_unpacklo_epi8(src2, zero); + const __m128i src_3 = _mm_unpacklo_epi8(src3, zero); + // Load ref and convert to 16b. + const __m128i ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]); + const __m128i ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]); + const __m128i ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]); + const __m128i ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]); + const __m128i ref_0 = _mm_unpacklo_epi8(ref0, zero); + const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero); + const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero); + const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero); + // Compute difference. + const __m128i diff0 = _mm_sub_epi16(src_0, ref_0); + const __m128i diff1 = _mm_sub_epi16(src_1, ref_1); + const __m128i diff2 = _mm_sub_epi16(src_2, ref_2); + const __m128i diff3 = _mm_sub_epi16(src_3, ref_3); + + // Transpose. + // 00 01 02 03 0 0 0 0 + // 10 11 12 13 0 0 0 0 + // 20 21 22 23 0 0 0 0 + // 30 31 32 33 0 0 0 0 + const __m128i transpose0_0 = _mm_unpacklo_epi16(diff0, diff1); + const __m128i transpose0_1 = _mm_unpacklo_epi16(diff2, diff3); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); + // a02 a12 a22 a32 a03 a13 a23 a33 + // a00 a10 a20 a30 a01 a11 a21 a31 + // a03 a13 a23 a33 a02 a12 a22 a32 + } + + // First pass and subsequent transpose. + { + // Same operations are done on the (0,3) and (1,2) pairs. + // b0 = (a0 + a3) << 3 + // b1 = (a1 + a2) << 3 + // b3 = (a0 - a3) << 3 + // b2 = (a1 - a2) << 3 + const __m128i a01 = _mm_add_epi16(v01, v32); + const __m128i a32 = _mm_sub_epi16(v01, v32); + const __m128i b01 = _mm_slli_epi16(a01, 3); + const __m128i b32 = _mm_slli_epi16(a32, 3); + const __m128i b11 = _mm_unpackhi_epi64(b01, b01); + const __m128i b22 = _mm_unpackhi_epi64(b32, b32); + + // e0 = b0 + b1 + // e2 = b0 - b1 + const __m128i e0 = _mm_add_epi16(b01, b11); + const __m128i e2 = _mm_sub_epi16(b01, b11); + const __m128i e02 = _mm_unpacklo_epi64(e0, e2); + + // e1 = (b3 * 5352 + b2 * 2217 + 14500) >> 12 + // e3 = (b3 * 2217 - b2 * 5352 + 7500) >> 12 + const __m128i b23 = _mm_unpacklo_epi16(b22, b32); + const __m128i c1 = _mm_madd_epi16(b23, k5352_2217); + const __m128i c3 = _mm_madd_epi16(b23, k2217_5352); + const __m128i d1 = _mm_add_epi32(c1, k14500); + const __m128i d3 = _mm_add_epi32(c3, k7500); + const __m128i e1 = _mm_srai_epi32(d1, 12); + const __m128i e3 = _mm_srai_epi32(d3, 12); + const __m128i e13 = _mm_packs_epi32(e1, e3); + + // Transpose. + // 00 01 02 03 20 21 22 23 + // 10 11 12 13 30 31 32 33 + const __m128i transpose0_0 = _mm_unpacklo_epi16(e02, e13); + const __m128i transpose0_1 = _mm_unpackhi_epi16(e02, e13); + // 00 10 01 11 02 12 03 13 + // 20 30 21 31 22 32 23 33 + const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); + // 02 12 22 32 03 13 23 33 + // 00 10 20 30 01 11 21 31 + // 03 13 23 33 02 12 22 32 + } + + // Second pass + { + // Same operations are done on the (0,3) and (1,2) pairs. + // a0 = v0 + v3 + // a1 = v1 + v2 + // a3 = v0 - v3 + // a2 = v1 - v2 + const __m128i a01 = _mm_add_epi16(v01, v32); + const __m128i a32 = _mm_sub_epi16(v01, v32); + const __m128i a11 = _mm_unpackhi_epi64(a01, a01); + const __m128i a22 = _mm_unpackhi_epi64(a32, a32); + + // d0 = (a0 + a1 + 7) >> 4; + // d2 = (a0 - a1 + 7) >> 4; + const __m128i b0 = _mm_add_epi16(a01, a11); + const __m128i b2 = _mm_sub_epi16(a01, a11); + const __m128i c0 = _mm_add_epi16(b0, seven); + const __m128i c2 = _mm_add_epi16(b2, seven); + const __m128i d0 = _mm_srai_epi16(c0, 4); + const __m128i d2 = _mm_srai_epi16(c2, 4); + + // f1 = ((b3 * 5352 + b2 * 2217 + 12000) >> 16) + // f3 = ((b3 * 2217 - b2 * 5352 + 51000) >> 16) + const __m128i b23 = _mm_unpacklo_epi16(a22, a32); + const __m128i c1 = _mm_madd_epi16(b23, k5352_2217); + const __m128i c3 = _mm_madd_epi16(b23, k2217_5352); + const __m128i d1 = _mm_add_epi32(c1, k12000_plus_one); + const __m128i d3 = _mm_add_epi32(c3, k51000); + const __m128i e1 = _mm_srai_epi32(d1, 16); + const __m128i e3 = _mm_srai_epi32(d3, 16); + const __m128i f1 = _mm_packs_epi32(e1, e1); + const __m128i f3 = _mm_packs_epi32(e3, e3); + // f1 = f1 + (a3 != 0); + // The compare will return (0xffff, 0) for (==0, !=0). To turn that into the + // desired (0, 1), we add one earlier through k12000_plus_one. + const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero)); + + _mm_storel_epi64((__m128i*)&out[ 0], d0); + _mm_storel_epi64((__m128i*)&out[ 4], g1); + _mm_storel_epi64((__m128i*)&out[ 8], d2); + _mm_storel_epi64((__m128i*)&out[12], f3); + } +} + +//------------------------------------------------------------------------------ +// Metric + +static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) { + const __m128i zero = _mm_set1_epi16(0); + + // Load values. + const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]); + const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]); + const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]); + const __m128i a3 = _mm_loadl_epi64((__m128i*)&a[BPS * 3]); + const __m128i b0 = _mm_loadl_epi64((__m128i*)&b[BPS * 0]); + const __m128i b1 = _mm_loadl_epi64((__m128i*)&b[BPS * 1]); + const __m128i b2 = _mm_loadl_epi64((__m128i*)&b[BPS * 2]); + const __m128i b3 = _mm_loadl_epi64((__m128i*)&b[BPS * 3]); + + // Combine pair of lines and convert to 16b. + const __m128i a01 = _mm_unpacklo_epi32(a0, a1); + const __m128i a23 = _mm_unpacklo_epi32(a2, a3); + const __m128i b01 = _mm_unpacklo_epi32(b0, b1); + const __m128i b23 = _mm_unpacklo_epi32(b2, b3); + const __m128i a01s = _mm_unpacklo_epi8(a01, zero); + const __m128i a23s = _mm_unpacklo_epi8(a23, zero); + const __m128i b01s = _mm_unpacklo_epi8(b01, zero); + const __m128i b23s = _mm_unpacklo_epi8(b23, zero); + + // Compute differences; (a-b)^2 = (abs(a-b))^2 = (sat8(a-b) + sat8(b-a))^2 + // TODO(cduvivier): Dissassemble and figure out why this is fastest. We don't + // need absolute values, there is no need to do calculation + // in 8bit as we are already in 16bit, ... Yet this is what + // benchmarks the fastest! + const __m128i d0 = _mm_subs_epu8(a01s, b01s); + const __m128i d1 = _mm_subs_epu8(b01s, a01s); + const __m128i d2 = _mm_subs_epu8(a23s, b23s); + const __m128i d3 = _mm_subs_epu8(b23s, a23s); + + // Square and add them all together. + const __m128i madd0 = _mm_madd_epi16(d0, d0); + const __m128i madd1 = _mm_madd_epi16(d1, d1); + const __m128i madd2 = _mm_madd_epi16(d2, d2); + const __m128i madd3 = _mm_madd_epi16(d3, d3); + const __m128i sum0 = _mm_add_epi32(madd0, madd1); + const __m128i sum1 = _mm_add_epi32(madd2, madd3); + const __m128i sum2 = _mm_add_epi32(sum0, sum1); + int32_t tmp[4]; + _mm_storeu_si128((__m128i*)tmp, sum2); + return (tmp[3] + tmp[2] + tmp[1] + tmp[0]); +} + +//------------------------------------------------------------------------------ +// Texture distortion +// +// We try to match the spectral content (weighted) between source and +// reconstructed samples. + +// Hadamard transform +// Returns the difference between the weighted sum of the absolute value of +// transformed coefficients. +static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB, + const uint16_t* const w) { + int32_t sum[4]; + __m128i tmp_0, tmp_1, tmp_2, tmp_3; + const __m128i zero = _mm_setzero_si128(); + const __m128i one = _mm_set1_epi16(1); + const __m128i three = _mm_set1_epi16(3); + + // Load, combine and tranpose inputs. + { + const __m128i inA_0 = _mm_loadl_epi64((__m128i*)&inA[BPS * 0]); + const __m128i inA_1 = _mm_loadl_epi64((__m128i*)&inA[BPS * 1]); + const __m128i inA_2 = _mm_loadl_epi64((__m128i*)&inA[BPS * 2]); + const __m128i inA_3 = _mm_loadl_epi64((__m128i*)&inA[BPS * 3]); + const __m128i inB_0 = _mm_loadl_epi64((__m128i*)&inB[BPS * 0]); + const __m128i inB_1 = _mm_loadl_epi64((__m128i*)&inB[BPS * 1]); + const __m128i inB_2 = _mm_loadl_epi64((__m128i*)&inB[BPS * 2]); + const __m128i inB_3 = _mm_loadl_epi64((__m128i*)&inB[BPS * 3]); + + // Combine inA and inB (we'll do two transforms in parallel). + const __m128i inAB_0 = _mm_unpacklo_epi8(inA_0, inB_0); + const __m128i inAB_1 = _mm_unpacklo_epi8(inA_1, inB_1); + const __m128i inAB_2 = _mm_unpacklo_epi8(inA_2, inB_2); + const __m128i inAB_3 = _mm_unpacklo_epi8(inA_3, inB_3); + // a00 b00 a01 b01 a02 b03 a03 b03 0 0 0 0 0 0 0 0 + // a10 b10 a11 b11 a12 b12 a13 b13 0 0 0 0 0 0 0 0 + // a20 b20 a21 b21 a22 b22 a23 b23 0 0 0 0 0 0 0 0 + // a30 b30 a31 b31 a32 b32 a33 b33 0 0 0 0 0 0 0 0 + + // Transpose the two 4x4, discarding the filling zeroes. + const __m128i transpose0_0 = _mm_unpacklo_epi8(inAB_0, inAB_2); + const __m128i transpose0_1 = _mm_unpacklo_epi8(inAB_1, inAB_3); + // a00 a20 b00 b20 a01 a21 b01 b21 a02 a22 b02 b22 a03 a23 b03 b23 + // a10 a30 b10 b30 a11 a31 b11 b31 a12 a32 b12 b32 a13 a33 b13 b33 + const __m128i transpose1_0 = _mm_unpacklo_epi8(transpose0_0, transpose0_1); + const __m128i transpose1_1 = _mm_unpackhi_epi8(transpose0_0, transpose0_1); + // a00 a10 a20 a30 b00 b10 b20 b30 a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 a03 a13 a23 a33 b03 b13 b23 b33 + + // Convert to 16b. + tmp_0 = _mm_unpacklo_epi8(transpose1_0, zero); + tmp_1 = _mm_unpackhi_epi8(transpose1_0, zero); + tmp_2 = _mm_unpacklo_epi8(transpose1_1, zero); + tmp_3 = _mm_unpackhi_epi8(transpose1_1, zero); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + + // Horizontal pass and subsequent transpose. + { + // Calculate a and b (two 4x4 at once). + const __m128i a0 = _mm_slli_epi16(_mm_add_epi16(tmp_0, tmp_2), 2); + const __m128i a1 = _mm_slli_epi16(_mm_add_epi16(tmp_1, tmp_3), 2); + const __m128i a2 = _mm_slli_epi16(_mm_sub_epi16(tmp_1, tmp_3), 2); + const __m128i a3 = _mm_slli_epi16(_mm_sub_epi16(tmp_0, tmp_2), 2); + // b0_extra = (a0 != 0); + const __m128i b0_extra = _mm_andnot_si128(_mm_cmpeq_epi16 (a0, zero), one); + const __m128i b0_base = _mm_add_epi16(a0, a1); + const __m128i b1 = _mm_add_epi16(a3, a2); + const __m128i b2 = _mm_sub_epi16(a3, a2); + const __m128i b3 = _mm_sub_epi16(a0, a1); + const __m128i b0 = _mm_add_epi16(b0_base, b0_extra); + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + + // Transpose the two 4x4. + const __m128i transpose0_0 = _mm_unpacklo_epi16(b0, b1); + const __m128i transpose0_1 = _mm_unpacklo_epi16(b2, b3); + const __m128i transpose0_2 = _mm_unpackhi_epi16(b0, b1); + const __m128i transpose0_3 = _mm_unpackhi_epi16(b2, b3); + // a00 a10 a01 a11 a02 a12 a03 a13 + // a20 a30 a21 a31 a22 a32 a23 a33 + // b00 b10 b01 b11 b02 b12 b03 b13 + // b20 b30 b21 b31 b22 b32 b23 b33 + const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); + const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); + // a00 a10 a20 a30 a01 a11 a21 a31 + // b00 b10 b20 b30 b01 b11 b21 b31 + // a02 a12 a22 a32 a03 a13 a23 a33 + // b02 b12 a22 b32 b03 b13 b23 b33 + tmp_0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); + tmp_1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); + tmp_2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); + tmp_3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + + // Vertical pass and difference of weighted sums. + { + // Load all inputs. + // TODO(cduvivier): Make variable declarations and allocations aligned so + // we can use _mm_load_si128 instead of _mm_loadu_si128. + const __m128i w_0 = _mm_loadu_si128((__m128i*)&w[0]); + const __m128i w_8 = _mm_loadu_si128((__m128i*)&w[8]); + + // Calculate a and b (two 4x4 at once). + const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); + const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); + const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); + const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); + const __m128i b0 = _mm_add_epi16(a0, a1); + const __m128i b1 = _mm_add_epi16(a3, a2); + const __m128i b2 = _mm_sub_epi16(a3, a2); + const __m128i b3 = _mm_sub_epi16(a0, a1); + + // Separate the transforms of inA and inB. + __m128i A_b0 = _mm_unpacklo_epi64(b0, b1); + __m128i A_b2 = _mm_unpacklo_epi64(b2, b3); + __m128i B_b0 = _mm_unpackhi_epi64(b0, b1); + __m128i B_b2 = _mm_unpackhi_epi64(b2, b3); + + { + // sign(b) = b >> 15 (0x0000 if positive, 0xffff if negative) + const __m128i sign_A_b0 = _mm_srai_epi16(A_b0, 15); + const __m128i sign_A_b2 = _mm_srai_epi16(A_b2, 15); + const __m128i sign_B_b0 = _mm_srai_epi16(B_b0, 15); + const __m128i sign_B_b2 = _mm_srai_epi16(B_b2, 15); + + // b = abs(b) = (b ^ sign) - sign + A_b0 = _mm_xor_si128(A_b0, sign_A_b0); + A_b2 = _mm_xor_si128(A_b2, sign_A_b2); + B_b0 = _mm_xor_si128(B_b0, sign_B_b0); + B_b2 = _mm_xor_si128(B_b2, sign_B_b2); + A_b0 = _mm_sub_epi16(A_b0, sign_A_b0); + A_b2 = _mm_sub_epi16(A_b2, sign_A_b2); + B_b0 = _mm_sub_epi16(B_b0, sign_B_b0); + B_b2 = _mm_sub_epi16(B_b2, sign_B_b2); + } + + // b = abs(b) + 3 + A_b0 = _mm_add_epi16(A_b0, three); + A_b2 = _mm_add_epi16(A_b2, three); + B_b0 = _mm_add_epi16(B_b0, three); + B_b2 = _mm_add_epi16(B_b2, three); + + // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3 + // b = (abs(b) + 3) >> 3 + A_b0 = _mm_srai_epi16(A_b0, 3); + A_b2 = _mm_srai_epi16(A_b2, 3); + B_b0 = _mm_srai_epi16(B_b0, 3); + B_b2 = _mm_srai_epi16(B_b2, 3); + + // weighted sums + A_b0 = _mm_madd_epi16(A_b0, w_0); + A_b2 = _mm_madd_epi16(A_b2, w_8); + B_b0 = _mm_madd_epi16(B_b0, w_0); + B_b2 = _mm_madd_epi16(B_b2, w_8); + A_b0 = _mm_add_epi32(A_b0, A_b2); + B_b0 = _mm_add_epi32(B_b0, B_b2); + + // difference of weighted sums + A_b0 = _mm_sub_epi32(A_b0, B_b0); + _mm_storeu_si128((__m128i*)&sum[0], A_b0); + } + return sum[0] + sum[1] + sum[2] + sum[3]; +} + +static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + const int diff_sum = TTransformSSE2(a, b, w); + return (abs(diff_sum) + 8) >> 4; +} + +static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4SSE2(a + x + y, b + x + y, w); + } + } + return D; +} + + +//------------------------------------------------------------------------------ +// Quantization +// + +// Simple quantization +static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16], + int n, const VP8Matrix* const mtx) { + const __m128i max_coeff_2047 = _mm_set1_epi16(2047); + const __m128i zero = _mm_set1_epi16(0); + __m128i sign0, sign8; + __m128i coeff0, coeff8; + __m128i out0, out8; + __m128i packed_out; + + // Load all inputs. + // TODO(cduvivier): Make variable declarations and allocations aligned so that + // we can use _mm_load_si128 instead of _mm_loadu_si128. + __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]); + __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); + const __m128i sharpen0 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[0]); + const __m128i sharpen8 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[8]); + const __m128i iq0 = _mm_loadu_si128((__m128i*)&mtx->iq_[0]); + const __m128i iq8 = _mm_loadu_si128((__m128i*)&mtx->iq_[8]); + const __m128i bias0 = _mm_loadu_si128((__m128i*)&mtx->bias_[0]); + const __m128i bias8 = _mm_loadu_si128((__m128i*)&mtx->bias_[8]); + const __m128i q0 = _mm_loadu_si128((__m128i*)&mtx->q_[0]); + const __m128i q8 = _mm_loadu_si128((__m128i*)&mtx->q_[8]); + const __m128i zthresh0 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[0]); + const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]); + + // sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative) + sign0 = _mm_srai_epi16(in0, 15); + sign8 = _mm_srai_epi16(in8, 15); + + // coeff = abs(in) = (in ^ sign) - sign + coeff0 = _mm_xor_si128(in0, sign0); + coeff8 = _mm_xor_si128(in8, sign8); + coeff0 = _mm_sub_epi16(coeff0, sign0); + coeff8 = _mm_sub_epi16(coeff8, sign8); + + // coeff = abs(in) + sharpen + coeff0 = _mm_add_epi16(coeff0, sharpen0); + coeff8 = _mm_add_epi16(coeff8, sharpen8); + + // if (coeff > 2047) coeff = 2047 + coeff0 = _mm_min_epi16(coeff0, max_coeff_2047); + coeff8 = _mm_min_epi16(coeff8, max_coeff_2047); + + // out = (coeff * iQ + B) >> QFIX; + { + // doing calculations with 32b precision (QFIX=17) + // out = (coeff * iQ) + __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0); + __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0); + __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8); + __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8); + __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H); + __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H); + __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H); + __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H); + // expand bias from 16b to 32b + __m128i bias_00 = _mm_unpacklo_epi16(bias0, zero); + __m128i bias_04 = _mm_unpackhi_epi16(bias0, zero); + __m128i bias_08 = _mm_unpacklo_epi16(bias8, zero); + __m128i bias_12 = _mm_unpackhi_epi16(bias8, zero); + // out = (coeff * iQ + B) + out_00 = _mm_add_epi32(out_00, bias_00); + out_04 = _mm_add_epi32(out_04, bias_04); + out_08 = _mm_add_epi32(out_08, bias_08); + out_12 = _mm_add_epi32(out_12, bias_12); + // out = (coeff * iQ + B) >> QFIX; + out_00 = _mm_srai_epi32(out_00, QFIX); + out_04 = _mm_srai_epi32(out_04, QFIX); + out_08 = _mm_srai_epi32(out_08, QFIX); + out_12 = _mm_srai_epi32(out_12, QFIX); + // pack result as 16b + out0 = _mm_packs_epi32(out_00, out_04); + out8 = _mm_packs_epi32(out_08, out_12); + } + + // get sign back (if (sign[j]) out_n = -out_n) + out0 = _mm_xor_si128(out0, sign0); + out8 = _mm_xor_si128(out8, sign8); + out0 = _mm_sub_epi16(out0, sign0); + out8 = _mm_sub_epi16(out8, sign8); + + // in = out * Q + in0 = _mm_mullo_epi16(out0, q0); + in8 = _mm_mullo_epi16(out8, q8); + + // if (coeff <= mtx->zthresh_) {in=0; out=0;} + { + __m128i cmp0 = _mm_cmpgt_epi16(coeff0, zthresh0); + __m128i cmp8 = _mm_cmpgt_epi16(coeff8, zthresh8); + in0 = _mm_and_si128(in0, cmp0); + in8 = _mm_and_si128(in8, cmp8); + _mm_storeu_si128((__m128i*)&in[0], in0); + _mm_storeu_si128((__m128i*)&in[8], in8); + out0 = _mm_and_si128(out0, cmp0); + out8 = _mm_and_si128(out8, cmp8); + } + + // zigzag the output before storing it. + // + // The zigzag pattern can almost be reproduced with a small sequence of + // shuffles. After it, we only need to swap the 7th (ending up in third + // position instead of twelfth) and 8th values. + { + __m128i outZ0, outZ8; + outZ0 = _mm_shufflehi_epi16(out0, _MM_SHUFFLE(2, 1, 3, 0)); + outZ0 = _mm_shuffle_epi32 (outZ0, _MM_SHUFFLE(3, 1, 2, 0)); + outZ0 = _mm_shufflehi_epi16(outZ0, _MM_SHUFFLE(3, 1, 0, 2)); + outZ8 = _mm_shufflelo_epi16(out8, _MM_SHUFFLE(3, 0, 2, 1)); + outZ8 = _mm_shuffle_epi32 (outZ8, _MM_SHUFFLE(3, 1, 2, 0)); + outZ8 = _mm_shufflelo_epi16(outZ8, _MM_SHUFFLE(1, 3, 2, 0)); + _mm_storeu_si128((__m128i*)&out[0], outZ0); + _mm_storeu_si128((__m128i*)&out[8], outZ8); + packed_out = _mm_packs_epi16(outZ0, outZ8); + } + { + const int16_t outZ_12 = out[12]; + const int16_t outZ_3 = out[3]; + out[3] = outZ_12; + out[12] = outZ_3; + } + + // detect if all 'out' values are zeroes or not + { + int32_t tmp[4]; + _mm_storeu_si128((__m128i*)tmp, packed_out); + if (n) { + tmp[0] &= ~0xff; + } + return (tmp[3] || tmp[2] || tmp[1] || tmp[0]); + } +} + +extern void VP8EncDspInitSSE2(void); +void VP8EncDspInitSSE2(void) { + VP8CollectHistogram = CollectHistogramSSE2; + VP8EncQuantizeBlock = QuantizeBlockSSE2; + VP8ITransform = ITransformSSE2; + VP8FTransform = FTransformSSE2; + VP8SSE4x4 = SSE4x4SSE2; + VP8TDisto4x4 = Disto4x4SSE2; + VP8TDisto16x16 = Disto16x16SSE2; +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif // WEBP_USE_SSE2 diff --git a/drivers/webpold/dsp/lossless.c b/drivers/webpold/dsp/lossless.c new file mode 100644 index 0000000000..62a6b7b15a --- /dev/null +++ b/drivers/webpold/dsp/lossless.c @@ -0,0 +1,1138 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// Image transforms and color space conversion methods for lossless decoder. +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) +// Urvang Joshi (urvang@google.com) + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +#include <math.h> +#include <stdlib.h> +#include "./lossless.h" +#include "../dec/vp8li.h" +#include "../dsp/yuv.h" +#include "../dsp/dsp.h" +#include "../enc/histogram.h" + +#define MAX_DIFF_COST (1e30f) + +// lookup table for small values of log2(int) +#define APPROX_LOG_MAX 4096 +#define LOG_2_RECIPROCAL 1.44269504088896338700465094007086 +#define LOG_LOOKUP_IDX_MAX 256 +static const float kLog2Table[LOG_LOOKUP_IDX_MAX] = { + 0.0000000000000000f, 0.0000000000000000f, + 1.0000000000000000f, 1.5849625007211560f, + 2.0000000000000000f, 2.3219280948873621f, + 2.5849625007211560f, 2.8073549220576041f, + 3.0000000000000000f, 3.1699250014423121f, + 3.3219280948873621f, 3.4594316186372973f, + 3.5849625007211560f, 3.7004397181410921f, + 3.8073549220576041f, 3.9068905956085187f, + 4.0000000000000000f, 4.0874628412503390f, + 4.1699250014423121f, 4.2479275134435852f, + 4.3219280948873626f, 4.3923174227787606f, + 4.4594316186372973f, 4.5235619560570130f, + 4.5849625007211560f, 4.6438561897747243f, + 4.7004397181410917f, 4.7548875021634682f, + 4.8073549220576037f, 4.8579809951275718f, + 4.9068905956085187f, 4.9541963103868749f, + 5.0000000000000000f, 5.0443941193584533f, + 5.0874628412503390f, 5.1292830169449663f, + 5.1699250014423121f, 5.2094533656289501f, + 5.2479275134435852f, 5.2854022188622487f, + 5.3219280948873626f, 5.3575520046180837f, + 5.3923174227787606f, 5.4262647547020979f, + 5.4594316186372973f, 5.4918530963296747f, + 5.5235619560570130f, 5.5545888516776376f, + 5.5849625007211560f, 5.6147098441152083f, + 5.6438561897747243f, 5.6724253419714951f, + 5.7004397181410917f, 5.7279204545631987f, + 5.7548875021634682f, 5.7813597135246599f, + 5.8073549220576037f, 5.8328900141647412f, + 5.8579809951275718f, 5.8826430493618415f, + 5.9068905956085187f, 5.9307373375628866f, + 5.9541963103868749f, 5.9772799234999167f, + 6.0000000000000000f, 6.0223678130284543f, + 6.0443941193584533f, 6.0660891904577720f, + 6.0874628412503390f, 6.1085244567781691f, + 6.1292830169449663f, 6.1497471195046822f, + 6.1699250014423121f, 6.1898245588800175f, + 6.2094533656289501f, 6.2288186904958804f, + 6.2479275134435852f, 6.2667865406949010f, + 6.2854022188622487f, 6.3037807481771030f, + 6.3219280948873626f, 6.3398500028846243f, + 6.3575520046180837f, 6.3750394313469245f, + 6.3923174227787606f, 6.4093909361377017f, + 6.4262647547020979f, 6.4429434958487279f, + 6.4594316186372973f, 6.4757334309663976f, + 6.4918530963296747f, 6.5077946401986963f, + 6.5235619560570130f, 6.5391588111080309f, + 6.5545888516776376f, 6.5698556083309478f, + 6.5849625007211560f, 6.5999128421871278f, + 6.6147098441152083f, 6.6293566200796094f, + 6.6438561897747243f, 6.6582114827517946f, + 6.6724253419714951f, 6.6865005271832185f, + 6.7004397181410917f, 6.7142455176661224f, + 6.7279204545631987f, 6.7414669864011464f, + 6.7548875021634682f, 6.7681843247769259f, + 6.7813597135246599f, 6.7944158663501061f, + 6.8073549220576037f, 6.8201789624151878f, + 6.8328900141647412f, 6.8454900509443747f, + 6.8579809951275718f, 6.8703647195834047f, + 6.8826430493618415f, 6.8948177633079437f, + 6.9068905956085187f, 6.9188632372745946f, + 6.9307373375628866f, 6.9425145053392398f, + 6.9541963103868749f, 6.9657842846620869f, + 6.9772799234999167f, 6.9886846867721654f, + 7.0000000000000000f, 7.0112272554232539f, + 7.0223678130284543f, 7.0334230015374501f, + 7.0443941193584533f, 7.0552824355011898f, + 7.0660891904577720f, 7.0768155970508308f, + 7.0874628412503390f, 7.0980320829605263f, + 7.1085244567781691f, 7.1189410727235076f, + 7.1292830169449663f, 7.1395513523987936f, + 7.1497471195046822f, 7.1598713367783890f, + 7.1699250014423121f, 7.1799090900149344f, + 7.1898245588800175f, 7.1996723448363644f, + 7.2094533656289501f, 7.2191685204621611f, + 7.2288186904958804f, 7.2384047393250785f, + 7.2479275134435852f, 7.2573878426926521f, + 7.2667865406949010f, 7.2761244052742375f, + 7.2854022188622487f, 7.2946207488916270f, + 7.3037807481771030f, 7.3128829552843557f, + 7.3219280948873626f, 7.3309168781146167f, + 7.3398500028846243f, 7.3487281542310771f, + 7.3575520046180837f, 7.3663222142458160f, + 7.3750394313469245f, 7.3837042924740519f, + 7.3923174227787606f, 7.4008794362821843f, + 7.4093909361377017f, 7.4178525148858982f, + 7.4262647547020979f, 7.4346282276367245f, + 7.4429434958487279f, 7.4512111118323289f, + 7.4594316186372973f, 7.4676055500829976f, + 7.4757334309663976f, 7.4838157772642563f, + 7.4918530963296747f, 7.4998458870832056f, + 7.5077946401986963f, 7.5156998382840427f, + 7.5235619560570130f, 7.5313814605163118f, + 7.5391588111080309f, 7.5468944598876364f, + 7.5545888516776376f, 7.5622424242210728f, + 7.5698556083309478f, 7.5774288280357486f, + 7.5849625007211560f, 7.5924570372680806f, + 7.5999128421871278f, 7.6073303137496104f, + 7.6147098441152083f, 7.6220518194563764f, + 7.6293566200796094f, 7.6366246205436487f, + 7.6438561897747243f, 7.6510516911789281f, + 7.6582114827517946f, 7.6653359171851764f, + 7.6724253419714951f, 7.6794800995054464f, + 7.6865005271832185f, 7.6934869574993252f, + 7.7004397181410917f, 7.7073591320808825f, + 7.7142455176661224f, 7.7210991887071855f, + 7.7279204545631987f, 7.7347096202258383f, + 7.7414669864011464f, 7.7481928495894605f, + 7.7548875021634682f, 7.7615512324444795f, + 7.7681843247769259f, 7.7747870596011736f, + 7.7813597135246599f, 7.7879025593914317f, + 7.7944158663501061f, 7.8008998999203047f, + 7.8073549220576037f, 7.8137811912170374f, + 7.8201789624151878f, 7.8265484872909150f, + 7.8328900141647412f, 7.8392037880969436f, + 7.8454900509443747f, 7.8517490414160571f, + 7.8579809951275718f, 7.8641861446542797f, + 7.8703647195834047f, 7.8765169465649993f, + 7.8826430493618415f, 7.8887432488982591f, + 7.8948177633079437f, 7.9008668079807486f, + 7.9068905956085187f, 7.9128893362299619f, + 7.9188632372745946f, 7.9248125036057812f, + 7.9307373375628866f, 7.9366379390025709f, + 7.9425145053392398f, 7.9483672315846778f, + 7.9541963103868749f, 7.9600019320680805f, + 7.9657842846620869f, 7.9715435539507719f, + 7.9772799234999167f, 7.9829935746943103f, + 7.9886846867721654f, 7.9943534368588577f +}; + +float VP8LFastLog2(int v) { + if (v < LOG_LOOKUP_IDX_MAX) { + return kLog2Table[v]; + } else if (v < APPROX_LOG_MAX) { + int log_cnt = 0; + while (v >= LOG_LOOKUP_IDX_MAX) { + ++log_cnt; + v = v >> 1; + } + return kLog2Table[v] + (float)log_cnt; + } else { + return (float)(LOG_2_RECIPROCAL * log((double)v)); + } +} + +//------------------------------------------------------------------------------ +// Image transforms. + +// In-place sum of each component with mod 256. +static WEBP_INLINE void AddPixelsEq(uint32_t* a, uint32_t b) { + const uint32_t alpha_and_green = (*a & 0xff00ff00u) + (b & 0xff00ff00u); + const uint32_t red_and_blue = (*a & 0x00ff00ffu) + (b & 0x00ff00ffu); + *a = (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu); +} + +static WEBP_INLINE uint32_t Average2(uint32_t a0, uint32_t a1) { + return (((a0 ^ a1) & 0xfefefefeL) >> 1) + (a0 & a1); +} + +static WEBP_INLINE uint32_t Average3(uint32_t a0, uint32_t a1, uint32_t a2) { + return Average2(Average2(a0, a2), a1); +} + +static WEBP_INLINE uint32_t Average4(uint32_t a0, uint32_t a1, + uint32_t a2, uint32_t a3) { + return Average2(Average2(a0, a1), Average2(a2, a3)); +} + +static WEBP_INLINE uint32_t Clip255(uint32_t a) { + if (a < 256) { + return a; + } + // return 0, when a is a negative integer. + // return 255, when a is positive. + return ~a >> 24; +} + +static WEBP_INLINE int AddSubtractComponentFull(int a, int b, int c) { + return Clip255(a + b - c); +} + +static WEBP_INLINE uint32_t ClampedAddSubtractFull(uint32_t c0, uint32_t c1, + uint32_t c2) { + const int a = AddSubtractComponentFull(c0 >> 24, c1 >> 24, c2 >> 24); + const int r = AddSubtractComponentFull((c0 >> 16) & 0xff, + (c1 >> 16) & 0xff, + (c2 >> 16) & 0xff); + const int g = AddSubtractComponentFull((c0 >> 8) & 0xff, + (c1 >> 8) & 0xff, + (c2 >> 8) & 0xff); + const int b = AddSubtractComponentFull(c0 & 0xff, c1 & 0xff, c2 & 0xff); + return (a << 24) | (r << 16) | (g << 8) | b; +} + +static WEBP_INLINE int AddSubtractComponentHalf(int a, int b) { + return Clip255(a + (a - b) / 2); +} + +static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1, + uint32_t c2) { + const uint32_t ave = Average2(c0, c1); + const int a = AddSubtractComponentHalf(ave >> 24, c2 >> 24); + const int r = AddSubtractComponentHalf((ave >> 16) & 0xff, (c2 >> 16) & 0xff); + const int g = AddSubtractComponentHalf((ave >> 8) & 0xff, (c2 >> 8) & 0xff); + const int b = AddSubtractComponentHalf((ave >> 0) & 0xff, (c2 >> 0) & 0xff); + return (a << 24) | (r << 16) | (g << 8) | b; +} + +static WEBP_INLINE int Sub3(int a, int b, int c) { + const int pa = b - c; + const int pb = a - c; + return abs(pa) - abs(pb); +} + +static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) { + const int pa_minus_pb = + Sub3((a >> 24) , (b >> 24) , (c >> 24) ) + + Sub3((a >> 16) & 0xff, (b >> 16) & 0xff, (c >> 16) & 0xff) + + Sub3((a >> 8) & 0xff, (b >> 8) & 0xff, (c >> 8) & 0xff) + + Sub3((a ) & 0xff, (b ) & 0xff, (c ) & 0xff); + + return (pa_minus_pb <= 0) ? a : b; +} + +//------------------------------------------------------------------------------ +// Predictors + +static uint32_t Predictor0(uint32_t left, const uint32_t* const top) { + (void)top; + (void)left; + return ARGB_BLACK; +} +static uint32_t Predictor1(uint32_t left, const uint32_t* const top) { + (void)top; + return left; +} +static uint32_t Predictor2(uint32_t left, const uint32_t* const top) { + (void)left; + return top[0]; +} +static uint32_t Predictor3(uint32_t left, const uint32_t* const top) { + (void)left; + return top[1]; +} +static uint32_t Predictor4(uint32_t left, const uint32_t* const top) { + (void)left; + return top[-1]; +} +static uint32_t Predictor5(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average3(left, top[0], top[1]); + return pred; +} +static uint32_t Predictor6(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(left, top[-1]); + return pred; +} +static uint32_t Predictor7(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(left, top[0]); + return pred; +} +static uint32_t Predictor8(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(top[-1], top[0]); + (void)left; + return pred; +} +static uint32_t Predictor9(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(top[0], top[1]); + (void)left; + return pred; +} +static uint32_t Predictor10(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average4(left, top[-1], top[0], top[1]); + return pred; +} +static uint32_t Predictor11(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Select(top[0], left, top[-1]); + return pred; +} +static uint32_t Predictor12(uint32_t left, const uint32_t* const top) { + const uint32_t pred = ClampedAddSubtractFull(left, top[0], top[-1]); + return pred; +} +static uint32_t Predictor13(uint32_t left, const uint32_t* const top) { + const uint32_t pred = ClampedAddSubtractHalf(left, top[0], top[-1]); + return pred; +} + +typedef uint32_t (*PredictorFunc)(uint32_t left, const uint32_t* const top); +static const PredictorFunc kPredictors[16] = { + Predictor0, Predictor1, Predictor2, Predictor3, + Predictor4, Predictor5, Predictor6, Predictor7, + Predictor8, Predictor9, Predictor10, Predictor11, + Predictor12, Predictor13, + Predictor0, Predictor0 // <- padding security sentinels +}; + +// TODO(vikasa): Replace 256 etc with defines. +static float PredictionCostSpatial(const int* counts, + int weight_0, double exp_val) { + const int significant_symbols = 16; + const double exp_decay_factor = 0.6; + double bits = weight_0 * counts[0]; + int i; + for (i = 1; i < significant_symbols; ++i) { + bits += exp_val * (counts[i] + counts[256 - i]); + exp_val *= exp_decay_factor; + } + return (float)(-0.1 * bits); +} + +// Compute the Shanon's entropy: Sum(p*log2(p)) +static float ShannonEntropy(const int* const array, int n) { + int i; + float retval = 0.f; + int sum = 0; + for (i = 0; i < n; ++i) { + if (array[i] != 0) { + sum += array[i]; + retval -= VP8LFastSLog2(array[i]); + } + } + retval += VP8LFastSLog2(sum); + return retval; +} + +static float PredictionCostSpatialHistogram(int accumulated[4][256], + int tile[4][256]) { + int i; + int k; + int combo[256]; + double retval = 0; + for (i = 0; i < 4; ++i) { + const double exp_val = 0.94; + retval += PredictionCostSpatial(&tile[i][0], 1, exp_val); + retval += ShannonEntropy(&tile[i][0], 256); + for (k = 0; k < 256; ++k) { + combo[k] = accumulated[i][k] + tile[i][k]; + } + retval += ShannonEntropy(&combo[0], 256); + } + return (float)retval; +} + +static int GetBestPredictorForTile(int width, int height, + int tile_x, int tile_y, int bits, + int accumulated[4][256], + const uint32_t* const argb_scratch) { + const int kNumPredModes = 14; + const int col_start = tile_x << bits; + const int row_start = tile_y << bits; + const int tile_size = 1 << bits; + const int ymax = (tile_size <= height - row_start) ? + tile_size : height - row_start; + const int xmax = (tile_size <= width - col_start) ? + tile_size : width - col_start; + int histo[4][256]; + float best_diff = MAX_DIFF_COST; + int best_mode = 0; + + int mode; + for (mode = 0; mode < kNumPredModes; ++mode) { + const uint32_t* current_row = argb_scratch; + const PredictorFunc pred_func = kPredictors[mode]; + float cur_diff; + int y; + memset(&histo[0][0], 0, sizeof(histo)); + for (y = 0; y < ymax; ++y) { + int x; + const int row = row_start + y; + const uint32_t* const upper_row = current_row; + current_row = upper_row + width; + for (x = 0; x < xmax; ++x) { + const int col = col_start + x; + uint32_t predict; + uint32_t predict_diff; + if (row == 0) { + predict = (col == 0) ? ARGB_BLACK : current_row[col - 1]; // Left. + } else if (col == 0) { + predict = upper_row[col]; // Top. + } else { + predict = pred_func(current_row[col - 1], upper_row + col); + } + predict_diff = VP8LSubPixels(current_row[col], predict); + ++histo[0][predict_diff >> 24]; + ++histo[1][((predict_diff >> 16) & 0xff)]; + ++histo[2][((predict_diff >> 8) & 0xff)]; + ++histo[3][(predict_diff & 0xff)]; + } + } + cur_diff = PredictionCostSpatialHistogram(accumulated, histo); + if (cur_diff < best_diff) { + best_diff = cur_diff; + best_mode = mode; + } + } + + return best_mode; +} + +static void CopyTileWithPrediction(int width, int height, + int tile_x, int tile_y, int bits, int mode, + const uint32_t* const argb_scratch, + uint32_t* const argb) { + const int col_start = tile_x << bits; + const int row_start = tile_y << bits; + const int tile_size = 1 << bits; + const int ymax = (tile_size <= height - row_start) ? + tile_size : height - row_start; + const int xmax = (tile_size <= width - col_start) ? + tile_size : width - col_start; + const PredictorFunc pred_func = kPredictors[mode]; + const uint32_t* current_row = argb_scratch; + + int y; + for (y = 0; y < ymax; ++y) { + int x; + const int row = row_start + y; + const uint32_t* const upper_row = current_row; + current_row = upper_row + width; + for (x = 0; x < xmax; ++x) { + const int col = col_start + x; + const int pix = row * width + col; + uint32_t predict; + if (row == 0) { + predict = (col == 0) ? ARGB_BLACK : current_row[col - 1]; // Left. + } else if (col == 0) { + predict = upper_row[col]; // Top. + } else { + predict = pred_func(current_row[col - 1], upper_row + col); + } + argb[pix] = VP8LSubPixels(current_row[col], predict); + } + } +} + +void VP8LResidualImage(int width, int height, int bits, + uint32_t* const argb, uint32_t* const argb_scratch, + uint32_t* const image) { + const int max_tile_size = 1 << bits; + const int tiles_per_row = VP8LSubSampleSize(width, bits); + const int tiles_per_col = VP8LSubSampleSize(height, bits); + uint32_t* const upper_row = argb_scratch; + uint32_t* const current_tile_rows = argb_scratch + width; + int tile_y; + int histo[4][256]; + memset(histo, 0, sizeof(histo)); + for (tile_y = 0; tile_y < tiles_per_col; ++tile_y) { + const int tile_y_offset = tile_y * max_tile_size; + const int this_tile_height = + (tile_y < tiles_per_col - 1) ? max_tile_size : height - tile_y_offset; + int tile_x; + if (tile_y > 0) { + memcpy(upper_row, current_tile_rows + (max_tile_size - 1) * width, + width * sizeof(*upper_row)); + } + memcpy(current_tile_rows, &argb[tile_y_offset * width], + this_tile_height * width * sizeof(*current_tile_rows)); + for (tile_x = 0; tile_x < tiles_per_row; ++tile_x) { + int pred; + int y; + const int tile_x_offset = tile_x * max_tile_size; + int all_x_max = tile_x_offset + max_tile_size; + if (all_x_max > width) { + all_x_max = width; + } + pred = GetBestPredictorForTile(width, height, tile_x, tile_y, bits, histo, + argb_scratch); + image[tile_y * tiles_per_row + tile_x] = 0xff000000u | (pred << 8); + CopyTileWithPrediction(width, height, tile_x, tile_y, bits, pred, + argb_scratch, argb); + for (y = 0; y < max_tile_size; ++y) { + int ix; + int all_x; + int all_y = tile_y_offset + y; + if (all_y >= height) { + break; + } + ix = all_y * width + tile_x_offset; + for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) { + const uint32_t a = argb[ix]; + ++histo[0][a >> 24]; + ++histo[1][((a >> 16) & 0xff)]; + ++histo[2][((a >> 8) & 0xff)]; + ++histo[3][(a & 0xff)]; + } + } + } + } +} + +// Inverse prediction. +static void PredictorInverseTransform(const VP8LTransform* const transform, + int y_start, int y_end, uint32_t* data) { + const int width = transform->xsize_; + if (y_start == 0) { // First Row follows the L (mode=1) mode. + int x; + const uint32_t pred0 = Predictor0(data[-1], NULL); + AddPixelsEq(data, pred0); + for (x = 1; x < width; ++x) { + const uint32_t pred1 = Predictor1(data[x - 1], NULL); + AddPixelsEq(data + x, pred1); + } + data += width; + ++y_start; + } + + { + int y = y_start; + const int mask = (1 << transform->bits_) - 1; + const int tiles_per_row = VP8LSubSampleSize(width, transform->bits_); + const uint32_t* pred_mode_base = + transform->data_ + (y >> transform->bits_) * tiles_per_row; + + while (y < y_end) { + int x; + const uint32_t pred2 = Predictor2(data[-1], data - width); + const uint32_t* pred_mode_src = pred_mode_base; + PredictorFunc pred_func; + + // First pixel follows the T (mode=2) mode. + AddPixelsEq(data, pred2); + + // .. the rest: + pred_func = kPredictors[((*pred_mode_src++) >> 8) & 0xf]; + for (x = 1; x < width; ++x) { + uint32_t pred; + if ((x & mask) == 0) { // start of tile. Read predictor function. + pred_func = kPredictors[((*pred_mode_src++) >> 8) & 0xf]; + } + pred = pred_func(data[x - 1], data + x - width); + AddPixelsEq(data + x, pred); + } + data += width; + ++y; + if ((y & mask) == 0) { // Use the same mask, since tiles are squares. + pred_mode_base += tiles_per_row; + } + } + } +} + +void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs) { + int i; + for (i = 0; i < num_pixs; ++i) { + const uint32_t argb = argb_data[i]; + const uint32_t green = (argb >> 8) & 0xff; + const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff; + const uint32_t new_b = ((argb & 0xff) - green) & 0xff; + argb_data[i] = (argb & 0xff00ff00) | (new_r << 16) | new_b; + } +} + +// Add green to blue and red channels (i.e. perform the inverse transform of +// 'subtract green'). +static void AddGreenToBlueAndRed(const VP8LTransform* const transform, + int y_start, int y_end, uint32_t* data) { + const int width = transform->xsize_; + const uint32_t* const data_end = data + (y_end - y_start) * width; + while (data < data_end) { + const uint32_t argb = *data; + // "* 0001001u" is equivalent to "(green << 16) + green)" + const uint32_t green = ((argb >> 8) & 0xff); + uint32_t red_blue = (argb & 0x00ff00ffu); + red_blue += (green << 16) | green; + red_blue &= 0x00ff00ffu; + *data++ = (argb & 0xff00ff00u) | red_blue; + } +} + +typedef struct { + // Note: the members are uint8_t, so that any negative values are + // automatically converted to "mod 256" values. + uint8_t green_to_red_; + uint8_t green_to_blue_; + uint8_t red_to_blue_; +} Multipliers; + +static WEBP_INLINE void MultipliersClear(Multipliers* m) { + m->green_to_red_ = 0; + m->green_to_blue_ = 0; + m->red_to_blue_ = 0; +} + +static WEBP_INLINE uint32_t ColorTransformDelta(int8_t color_pred, + int8_t color) { + return (uint32_t)((int)(color_pred) * color) >> 5; +} + +static WEBP_INLINE void ColorCodeToMultipliers(uint32_t color_code, + Multipliers* const m) { + m->green_to_red_ = (color_code >> 0) & 0xff; + m->green_to_blue_ = (color_code >> 8) & 0xff; + m->red_to_blue_ = (color_code >> 16) & 0xff; +} + +static WEBP_INLINE uint32_t MultipliersToColorCode(Multipliers* const m) { + return 0xff000000u | + ((uint32_t)(m->red_to_blue_) << 16) | + ((uint32_t)(m->green_to_blue_) << 8) | + m->green_to_red_; +} + +static WEBP_INLINE uint32_t TransformColor(const Multipliers* const m, + uint32_t argb, int inverse) { + const uint32_t green = argb >> 8; + const uint32_t red = argb >> 16; + uint32_t new_red = red; + uint32_t new_blue = argb; + + if (inverse) { + new_red += ColorTransformDelta(m->green_to_red_, green); + new_red &= 0xff; + new_blue += ColorTransformDelta(m->green_to_blue_, green); + new_blue += ColorTransformDelta(m->red_to_blue_, new_red); + new_blue &= 0xff; + } else { + new_red -= ColorTransformDelta(m->green_to_red_, green); + new_red &= 0xff; + new_blue -= ColorTransformDelta(m->green_to_blue_, green); + new_blue -= ColorTransformDelta(m->red_to_blue_, red); + new_blue &= 0xff; + } + return (argb & 0xff00ff00u) | (new_red << 16) | (new_blue); +} + +static WEBP_INLINE int SkipRepeatedPixels(const uint32_t* const argb, + int ix, int xsize) { + const uint32_t v = argb[ix]; + if (ix >= xsize + 3) { + if (v == argb[ix - xsize] && + argb[ix - 1] == argb[ix - xsize - 1] && + argb[ix - 2] == argb[ix - xsize - 2] && + argb[ix - 3] == argb[ix - xsize - 3]) { + return 1; + } + return v == argb[ix - 3] && v == argb[ix - 2] && v == argb[ix - 1]; + } else if (ix >= 3) { + return v == argb[ix - 3] && v == argb[ix - 2] && v == argb[ix - 1]; + } + return 0; +} + +static float PredictionCostCrossColor(const int accumulated[256], + const int counts[256]) { + // Favor low entropy, locally and globally. + int i; + int combo[256]; + for (i = 0; i < 256; ++i) { + combo[i] = accumulated[i] + counts[i]; + } + return ShannonEntropy(combo, 256) + + ShannonEntropy(counts, 256) + + PredictionCostSpatial(counts, 3, 2.4); // Favor small absolute values. +} + +static Multipliers GetBestColorTransformForTile( + int tile_x, int tile_y, int bits, + Multipliers prevX, + Multipliers prevY, + int step, int xsize, int ysize, + int* accumulated_red_histo, + int* accumulated_blue_histo, + const uint32_t* const argb) { + float best_diff = MAX_DIFF_COST; + float cur_diff; + const int halfstep = step / 2; + const int max_tile_size = 1 << bits; + const int tile_y_offset = tile_y * max_tile_size; + const int tile_x_offset = tile_x * max_tile_size; + int green_to_red; + int green_to_blue; + int red_to_blue; + int all_x_max = tile_x_offset + max_tile_size; + int all_y_max = tile_y_offset + max_tile_size; + Multipliers best_tx; + MultipliersClear(&best_tx); + if (all_x_max > xsize) { + all_x_max = xsize; + } + if (all_y_max > ysize) { + all_y_max = ysize; + } + for (green_to_red = -64; green_to_red <= 64; green_to_red += halfstep) { + int histo[256] = { 0 }; + int all_y; + Multipliers tx; + MultipliersClear(&tx); + tx.green_to_red_ = green_to_red & 0xff; + + for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) { + uint32_t predict; + int ix = all_y * xsize + tile_x_offset; + int all_x; + for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) { + if (SkipRepeatedPixels(argb, ix, xsize)) { + continue; + } + predict = TransformColor(&tx, argb[ix], 0); + ++histo[(predict >> 16) & 0xff]; // red. + } + } + cur_diff = PredictionCostCrossColor(&accumulated_red_histo[0], &histo[0]); + if (tx.green_to_red_ == prevX.green_to_red_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (tx.green_to_red_ == prevY.green_to_red_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (tx.green_to_red_ == 0) { + cur_diff -= 3; + } + if (cur_diff < best_diff) { + best_diff = cur_diff; + best_tx = tx; + } + } + best_diff = MAX_DIFF_COST; + green_to_red = best_tx.green_to_red_; + for (green_to_blue = -32; green_to_blue <= 32; green_to_blue += step) { + for (red_to_blue = -32; red_to_blue <= 32; red_to_blue += step) { + int all_y; + int histo[256] = { 0 }; + Multipliers tx; + tx.green_to_red_ = green_to_red; + tx.green_to_blue_ = green_to_blue; + tx.red_to_blue_ = red_to_blue; + for (all_y = tile_y_offset; all_y < all_y_max; ++all_y) { + uint32_t predict; + int all_x; + int ix = all_y * xsize + tile_x_offset; + for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) { + if (SkipRepeatedPixels(argb, ix, xsize)) { + continue; + } + predict = TransformColor(&tx, argb[ix], 0); + ++histo[predict & 0xff]; // blue. + } + } + cur_diff = + PredictionCostCrossColor(&accumulated_blue_histo[0], &histo[0]); + if (tx.green_to_blue_ == prevX.green_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (tx.green_to_blue_ == prevY.green_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (tx.red_to_blue_ == prevX.red_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (tx.red_to_blue_ == prevY.red_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (tx.green_to_blue_ == 0) { + cur_diff -= 3; + } + if (tx.red_to_blue_ == 0) { + cur_diff -= 3; + } + if (cur_diff < best_diff) { + best_diff = cur_diff; + best_tx = tx; + } + } + } + return best_tx; +} + +static void CopyTileWithColorTransform(int xsize, int ysize, + int tile_x, int tile_y, int bits, + Multipliers color_transform, + uint32_t* const argb) { + int y; + int xscan = 1 << bits; + int yscan = 1 << bits; + tile_x <<= bits; + tile_y <<= bits; + if (xscan > xsize - tile_x) { + xscan = xsize - tile_x; + } + if (yscan > ysize - tile_y) { + yscan = ysize - tile_y; + } + yscan += tile_y; + for (y = tile_y; y < yscan; ++y) { + int ix = y * xsize + tile_x; + const int end_ix = ix + xscan; + for (; ix < end_ix; ++ix) { + argb[ix] = TransformColor(&color_transform, argb[ix], 0); + } + } +} + +void VP8LColorSpaceTransform(int width, int height, int bits, int step, + uint32_t* const argb, uint32_t* image) { + const int max_tile_size = 1 << bits; + int tile_xsize = VP8LSubSampleSize(width, bits); + int tile_ysize = VP8LSubSampleSize(height, bits); + int accumulated_red_histo[256] = { 0 }; + int accumulated_blue_histo[256] = { 0 }; + int tile_y; + int tile_x; + Multipliers prevX; + Multipliers prevY; + MultipliersClear(&prevY); + MultipliersClear(&prevX); + for (tile_y = 0; tile_y < tile_ysize; ++tile_y) { + for (tile_x = 0; tile_x < tile_xsize; ++tile_x) { + Multipliers color_transform; + int all_x_max; + int y; + const int tile_y_offset = tile_y * max_tile_size; + const int tile_x_offset = tile_x * max_tile_size; + if (tile_y != 0) { + ColorCodeToMultipliers(image[tile_y * tile_xsize + tile_x - 1], &prevX); + ColorCodeToMultipliers(image[(tile_y - 1) * tile_xsize + tile_x], + &prevY); + } else if (tile_x != 0) { + ColorCodeToMultipliers(image[tile_y * tile_xsize + tile_x - 1], &prevX); + } + color_transform = + GetBestColorTransformForTile(tile_x, tile_y, bits, + prevX, prevY, + step, width, height, + &accumulated_red_histo[0], + &accumulated_blue_histo[0], + argb); + image[tile_y * tile_xsize + tile_x] = + MultipliersToColorCode(&color_transform); + CopyTileWithColorTransform(width, height, tile_x, tile_y, bits, + color_transform, argb); + + // Gather accumulated histogram data. + all_x_max = tile_x_offset + max_tile_size; + if (all_x_max > width) { + all_x_max = width; + } + for (y = 0; y < max_tile_size; ++y) { + int ix; + int all_x; + int all_y = tile_y_offset + y; + if (all_y >= height) { + break; + } + ix = all_y * width + tile_x_offset; + for (all_x = tile_x_offset; all_x < all_x_max; ++all_x, ++ix) { + if (ix >= 2 && + argb[ix] == argb[ix - 2] && + argb[ix] == argb[ix - 1]) { + continue; // repeated pixels are handled by backward references + } + if (ix >= width + 2 && + argb[ix - 2] == argb[ix - width - 2] && + argb[ix - 1] == argb[ix - width - 1] && + argb[ix] == argb[ix - width]) { + continue; // repeated pixels are handled by backward references + } + ++accumulated_red_histo[(argb[ix] >> 16) & 0xff]; + ++accumulated_blue_histo[argb[ix] & 0xff]; + } + } + } + } +} + +// Color space inverse transform. +static void ColorSpaceInverseTransform(const VP8LTransform* const transform, + int y_start, int y_end, uint32_t* data) { + const int width = transform->xsize_; + const int mask = (1 << transform->bits_) - 1; + const int tiles_per_row = VP8LSubSampleSize(width, transform->bits_); + int y = y_start; + const uint32_t* pred_row = + transform->data_ + (y >> transform->bits_) * tiles_per_row; + + while (y < y_end) { + const uint32_t* pred = pred_row; + Multipliers m = { 0, 0, 0 }; + int x; + + for (x = 0; x < width; ++x) { + if ((x & mask) == 0) ColorCodeToMultipliers(*pred++, &m); + data[x] = TransformColor(&m, data[x], 1); + } + data += width; + ++y; + if ((y & mask) == 0) pred_row += tiles_per_row;; + } +} + +// Separate out pixels packed together using pixel-bundling. +static void ColorIndexInverseTransform( + const VP8LTransform* const transform, + int y_start, int y_end, const uint32_t* src, uint32_t* dst) { + int y; + const int bits_per_pixel = 8 >> transform->bits_; + const int width = transform->xsize_; + const uint32_t* const color_map = transform->data_; + if (bits_per_pixel < 8) { + const int pixels_per_byte = 1 << transform->bits_; + const int count_mask = pixels_per_byte - 1; + const uint32_t bit_mask = (1 << bits_per_pixel) - 1; + for (y = y_start; y < y_end; ++y) { + uint32_t packed_pixels = 0; + int x; + for (x = 0; x < width; ++x) { + // We need to load fresh 'packed_pixels' once every 'pixels_per_byte' + // increments of x. Fortunately, pixels_per_byte is a power of 2, so + // can just use a mask for that, instead of decrementing a counter. + if ((x & count_mask) == 0) packed_pixels = ((*src++) >> 8) & 0xff; + *dst++ = color_map[packed_pixels & bit_mask]; + packed_pixels >>= bits_per_pixel; + } + } + } else { + for (y = y_start; y < y_end; ++y) { + int x; + for (x = 0; x < width; ++x) { + *dst++ = color_map[((*src++) >> 8) & 0xff]; + } + } + } +} + +void VP8LInverseTransform(const VP8LTransform* const transform, + int row_start, int row_end, + const uint32_t* const in, uint32_t* const out) { + assert(row_start < row_end); + assert(row_end <= transform->ysize_); + switch (transform->type_) { + case SUBTRACT_GREEN: + AddGreenToBlueAndRed(transform, row_start, row_end, out); + break; + case PREDICTOR_TRANSFORM: + PredictorInverseTransform(transform, row_start, row_end, out); + if (row_end != transform->ysize_) { + // The last predicted row in this iteration will be the top-pred row + // for the first row in next iteration. + const int width = transform->xsize_; + memcpy(out - width, out + (row_end - row_start - 1) * width, + width * sizeof(*out)); + } + break; + case CROSS_COLOR_TRANSFORM: + ColorSpaceInverseTransform(transform, row_start, row_end, out); + break; + case COLOR_INDEXING_TRANSFORM: + if (in == out && transform->bits_ > 0) { + // Move packed pixels to the end of unpacked region, so that unpacking + // can occur seamlessly. + // Also, note that this is the only transform that applies on + // the effective width of VP8LSubSampleSize(xsize_, bits_). All other + // transforms work on effective width of xsize_. + const int out_stride = (row_end - row_start) * transform->xsize_; + const int in_stride = (row_end - row_start) * + VP8LSubSampleSize(transform->xsize_, transform->bits_); + uint32_t* const src = out + out_stride - in_stride; + memmove(src, out, in_stride * sizeof(*src)); + ColorIndexInverseTransform(transform, row_start, row_end, src, out); + } else { + ColorIndexInverseTransform(transform, row_start, row_end, in, out); + } + break; + } +} + +//------------------------------------------------------------------------------ +// Color space conversion. + +static int is_big_endian(void) { + static const union { + uint16_t w; + uint8_t b[2]; + } tmp = { 1 }; + return (tmp.b[0] != 1); +} + +static void ConvertBGRAToRGB(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = (argb >> 16) & 0xff; + *dst++ = (argb >> 8) & 0xff; + *dst++ = (argb >> 0) & 0xff; + } +} + +static void ConvertBGRAToRGBA(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = (argb >> 16) & 0xff; + *dst++ = (argb >> 8) & 0xff; + *dst++ = (argb >> 0) & 0xff; + *dst++ = (argb >> 24) & 0xff; + } +} + +static void ConvertBGRAToRGBA4444(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf); + *dst++ = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf); + } +} + +static void ConvertBGRAToRGB565(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7); + *dst++ = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f); + } +} + +static void ConvertBGRAToBGR(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = (argb >> 0) & 0xff; + *dst++ = (argb >> 8) & 0xff; + *dst++ = (argb >> 16) & 0xff; + } +} + +static void CopyOrSwap(const uint32_t* src, int num_pixels, uint8_t* dst, + int swap_on_big_endian) { + if (is_big_endian() == swap_on_big_endian) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + uint32_t argb = *src++; +#if !defined(__BIG_ENDIAN__) && (defined(__i386__) || defined(__x86_64__)) + __asm__ volatile("bswap %0" : "=r"(argb) : "0"(argb)); + *(uint32_t*)dst = argb; + dst += sizeof(argb); +#elif !defined(__BIG_ENDIAN__) && defined(_MSC_VER) + argb = _byteswap_ulong(argb); + *(uint32_t*)dst = argb; + dst += sizeof(argb); +#else + *dst++ = (argb >> 24) & 0xff; + *dst++ = (argb >> 16) & 0xff; + *dst++ = (argb >> 8) & 0xff; + *dst++ = (argb >> 0) & 0xff; +#endif + } + } else { + memcpy(dst, src, num_pixels * sizeof(*src)); + } +} + +void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels, + WEBP_CSP_MODE out_colorspace, uint8_t* const rgba) { + switch (out_colorspace) { + case MODE_RGB: + ConvertBGRAToRGB(in_data, num_pixels, rgba); + break; + case MODE_RGBA: + ConvertBGRAToRGBA(in_data, num_pixels, rgba); + break; + case MODE_rgbA: + ConvertBGRAToRGBA(in_data, num_pixels, rgba); + WebPApplyAlphaMultiply(rgba, 0, num_pixels, 1, 0); + break; + case MODE_BGR: + ConvertBGRAToBGR(in_data, num_pixels, rgba); + break; + case MODE_BGRA: + CopyOrSwap(in_data, num_pixels, rgba, 1); + break; + case MODE_bgrA: + CopyOrSwap(in_data, num_pixels, rgba, 1); + WebPApplyAlphaMultiply(rgba, 0, num_pixels, 1, 0); + break; + case MODE_ARGB: + CopyOrSwap(in_data, num_pixels, rgba, 0); + break; + case MODE_Argb: + CopyOrSwap(in_data, num_pixels, rgba, 0); + WebPApplyAlphaMultiply(rgba, 1, num_pixels, 1, 0); + break; + case MODE_RGBA_4444: + ConvertBGRAToRGBA4444(in_data, num_pixels, rgba); + break; + case MODE_rgbA_4444: + ConvertBGRAToRGBA4444(in_data, num_pixels, rgba); + WebPApplyAlphaMultiply4444(rgba, num_pixels, 1, 0); + break; + case MODE_RGB_565: + ConvertBGRAToRGB565(in_data, num_pixels, rgba); + break; + default: + assert(0); // Code flow should not reach here. + } +} + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif diff --git a/drivers/webpold/dsp/lossless.h b/drivers/webpold/dsp/lossless.h new file mode 100644 index 0000000000..7c7d5555ed --- /dev/null +++ b/drivers/webpold/dsp/lossless.h @@ -0,0 +1,82 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// Image transforms and color space conversion methods for lossless decoder. +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) + +#ifndef WEBP_DSP_LOSSLESS_H_ +#define WEBP_DSP_LOSSLESS_H_ + +#include "../types.h" +#include "../decode.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Image transforms. + +struct VP8LTransform; // Defined in dec/vp8li.h. + +// Performs inverse transform of data given transform information, start and end +// rows. Transform will be applied to rows [row_start, row_end[. +// The *in and *out pointers refer to source and destination data respectively +// corresponding to the intermediate row (row_start). +void VP8LInverseTransform(const struct VP8LTransform* const transform, + int row_start, int row_end, + const uint32_t* const in, uint32_t* const out); + +// Subtracts green from blue and red channels. +void VP8LSubtractGreenFromBlueAndRed(uint32_t* argb_data, int num_pixs); + +void VP8LResidualImage(int width, int height, int bits, + uint32_t* const argb, uint32_t* const argb_scratch, + uint32_t* const image); + +void VP8LColorSpaceTransform(int width, int height, int bits, int step, + uint32_t* const argb, uint32_t* image); + +//------------------------------------------------------------------------------ +// Color space conversion. + +// Converts from BGRA to other color spaces. +void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels, + WEBP_CSP_MODE out_colorspace, uint8_t* const rgba); + +//------------------------------------------------------------------------------ +// Misc methods. + +// Computes sampled size of 'size' when sampling using 'sampling bits'. +static WEBP_INLINE uint32_t VP8LSubSampleSize(uint32_t size, + uint32_t sampling_bits) { + return (size + (1 << sampling_bits) - 1) >> sampling_bits; +} + +// Faster logarithm for integers, with the property of log2(0) == 0. +float VP8LFastLog2(int v); +// Fast calculation of v * log2(v) for integer input. +static WEBP_INLINE float VP8LFastSLog2(int v) { return VP8LFastLog2(v) * v; } + +// In-place difference of each component with mod 256. +static WEBP_INLINE uint32_t VP8LSubPixels(uint32_t a, uint32_t b) { + const uint32_t alpha_and_green = + 0x00ff00ffu + (a & 0xff00ff00u) - (b & 0xff00ff00u); + const uint32_t red_and_blue = + 0xff00ff00u + (a & 0x00ff00ffu) - (b & 0x00ff00ffu); + return (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu); +} + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif // WEBP_DSP_LOSSLESS_H_ diff --git a/drivers/webpold/dsp/upsampling.c b/drivers/webpold/dsp/upsampling.c new file mode 100644 index 0000000000..4855eb1432 --- /dev/null +++ b/drivers/webpold/dsp/upsampling.c @@ -0,0 +1,357 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// YUV to RGB upsampling functions. +// +// Author: somnath@google.com (Somnath Banerjee) + +#include "./dsp.h" +#include "./yuv.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Fancy upsampler + +#ifdef FANCY_UPSAMPLING + +// Fancy upsampling functions to convert YUV to RGB +WebPUpsampleLinePairFunc WebPUpsamplers[MODE_LAST]; + +// Given samples laid out in a square as: +// [a b] +// [c d] +// we interpolate u/v as: +// ([9*a + 3*b + 3*c + d 3*a + 9*b + 3*c + d] + [8 8]) / 16 +// ([3*a + b + 9*c + 3*d a + 3*b + 3*c + 9*d] [8 8]) / 16 + +// We process u and v together stashed into 32bit (16bit each). +#define LOAD_UV(u,v) ((u) | ((v) << 16)) + +#define UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int x; \ + const int last_pixel_pair = (len - 1) >> 1; \ + uint32_t tl_uv = LOAD_UV(top_u[0], top_v[0]); /* top-left sample */ \ + uint32_t l_uv = LOAD_UV(cur_u[0], cur_v[0]); /* left-sample */ \ + if (top_y) { \ + const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \ + FUNC(top_y[0], uv0 & 0xff, (uv0 >> 16), top_dst); \ + } \ + if (bottom_y) { \ + const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \ + FUNC(bottom_y[0], uv0 & 0xff, (uv0 >> 16), bottom_dst); \ + } \ + for (x = 1; x <= last_pixel_pair; ++x) { \ + const uint32_t t_uv = LOAD_UV(top_u[x], top_v[x]); /* top sample */ \ + const uint32_t uv = LOAD_UV(cur_u[x], cur_v[x]); /* sample */ \ + /* precompute invariant values associated with first and second diagonals*/\ + const uint32_t avg = tl_uv + t_uv + l_uv + uv + 0x00080008u; \ + const uint32_t diag_12 = (avg + 2 * (t_uv + l_uv)) >> 3; \ + const uint32_t diag_03 = (avg + 2 * (tl_uv + uv)) >> 3; \ + if (top_y) { \ + const uint32_t uv0 = (diag_12 + tl_uv) >> 1; \ + const uint32_t uv1 = (diag_03 + t_uv) >> 1; \ + FUNC(top_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \ + top_dst + (2 * x - 1) * XSTEP); \ + FUNC(top_y[2 * x - 0], uv1 & 0xff, (uv1 >> 16), \ + top_dst + (2 * x - 0) * XSTEP); \ + } \ + if (bottom_y) { \ + const uint32_t uv0 = (diag_03 + l_uv) >> 1; \ + const uint32_t uv1 = (diag_12 + uv) >> 1; \ + FUNC(bottom_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \ + bottom_dst + (2 * x - 1) * XSTEP); \ + FUNC(bottom_y[2 * x + 0], uv1 & 0xff, (uv1 >> 16), \ + bottom_dst + (2 * x + 0) * XSTEP); \ + } \ + tl_uv = t_uv; \ + l_uv = uv; \ + } \ + if (!(len & 1)) { \ + if (top_y) { \ + const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \ + FUNC(top_y[len - 1], uv0 & 0xff, (uv0 >> 16), \ + top_dst + (len - 1) * XSTEP); \ + } \ + if (bottom_y) { \ + const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \ + FUNC(bottom_y[len - 1], uv0 & 0xff, (uv0 >> 16), \ + bottom_dst + (len - 1) * XSTEP); \ + } \ + } \ +} + +// All variants implemented. +UPSAMPLE_FUNC(UpsampleRgbLinePair, VP8YuvToRgb, 3) +UPSAMPLE_FUNC(UpsampleBgrLinePair, VP8YuvToBgr, 3) +UPSAMPLE_FUNC(UpsampleRgbaLinePair, VP8YuvToRgba, 4) +UPSAMPLE_FUNC(UpsampleBgraLinePair, VP8YuvToBgra, 4) +UPSAMPLE_FUNC(UpsampleArgbLinePair, VP8YuvToArgb, 4) +UPSAMPLE_FUNC(UpsampleRgba4444LinePair, VP8YuvToRgba4444, 2) +UPSAMPLE_FUNC(UpsampleRgb565LinePair, VP8YuvToRgb565, 2) + +#undef LOAD_UV +#undef UPSAMPLE_FUNC + +#endif // FANCY_UPSAMPLING + +//------------------------------------------------------------------------------ +// simple point-sampling + +#define SAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* u, const uint8_t* v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int i; \ + for (i = 0; i < len - 1; i += 2) { \ + FUNC(top_y[0], u[0], v[0], top_dst); \ + FUNC(top_y[1], u[0], v[0], top_dst + XSTEP); \ + FUNC(bottom_y[0], u[0], v[0], bottom_dst); \ + FUNC(bottom_y[1], u[0], v[0], bottom_dst + XSTEP); \ + top_y += 2; \ + bottom_y += 2; \ + u++; \ + v++; \ + top_dst += 2 * XSTEP; \ + bottom_dst += 2 * XSTEP; \ + } \ + if (i == len - 1) { /* last one */ \ + FUNC(top_y[0], u[0], v[0], top_dst); \ + FUNC(bottom_y[0], u[0], v[0], bottom_dst); \ + } \ +} + +// All variants implemented. +SAMPLE_FUNC(SampleRgbLinePair, VP8YuvToRgb, 3) +SAMPLE_FUNC(SampleBgrLinePair, VP8YuvToBgr, 3) +SAMPLE_FUNC(SampleRgbaLinePair, VP8YuvToRgba, 4) +SAMPLE_FUNC(SampleBgraLinePair, VP8YuvToBgra, 4) +SAMPLE_FUNC(SampleArgbLinePair, VP8YuvToArgb, 4) +SAMPLE_FUNC(SampleRgba4444LinePair, VP8YuvToRgba4444, 2) +SAMPLE_FUNC(SampleRgb565LinePair, VP8YuvToRgb565, 2) + +#undef SAMPLE_FUNC + +const WebPSampleLinePairFunc WebPSamplers[MODE_LAST] = { + SampleRgbLinePair, // MODE_RGB + SampleRgbaLinePair, // MODE_RGBA + SampleBgrLinePair, // MODE_BGR + SampleBgraLinePair, // MODE_BGRA + SampleArgbLinePair, // MODE_ARGB + SampleRgba4444LinePair, // MODE_RGBA_4444 + SampleRgb565LinePair, // MODE_RGB_565 + SampleRgbaLinePair, // MODE_rgbA + SampleBgraLinePair, // MODE_bgrA + SampleArgbLinePair, // MODE_Argb + SampleRgba4444LinePair // MODE_rgbA_4444 +}; + +//------------------------------------------------------------------------------ + +#if !defined(FANCY_UPSAMPLING) +#define DUAL_SAMPLE_FUNC(FUNC_NAME, FUNC) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bot_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* bot_u, const uint8_t* bot_v, \ + uint8_t* top_dst, uint8_t* bot_dst, int len) { \ + const int half_len = len >> 1; \ + int x; \ + if (top_dst != NULL) { \ + for (x = 0; x < half_len; ++x) { \ + FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x + 0); \ + FUNC(top_y[2 * x + 1], top_u[x], top_v[x], top_dst + 8 * x + 4); \ + } \ + if (len & 1) FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x); \ + } \ + if (bot_dst != NULL) { \ + for (x = 0; x < half_len; ++x) { \ + FUNC(bot_y[2 * x + 0], bot_u[x], bot_v[x], bot_dst + 8 * x + 0); \ + FUNC(bot_y[2 * x + 1], bot_u[x], bot_v[x], bot_dst + 8 * x + 4); \ + } \ + if (len & 1) FUNC(bot_y[2 * x + 0], bot_u[x], bot_v[x], bot_dst + 8 * x); \ + } \ +} + +DUAL_SAMPLE_FUNC(DualLineSamplerBGRA, VP8YuvToBgra) +DUAL_SAMPLE_FUNC(DualLineSamplerARGB, VP8YuvToArgb) +#undef DUAL_SAMPLE_FUNC + +#endif // !FANCY_UPSAMPLING + +WebPUpsampleLinePairFunc WebPGetLinePairConverter(int alpha_is_last) { + WebPInitUpsamplers(); + VP8YUVInit(); +#ifdef FANCY_UPSAMPLING + return WebPUpsamplers[alpha_is_last ? MODE_BGRA : MODE_ARGB]; +#else + return (alpha_is_last ? DualLineSamplerBGRA : DualLineSamplerARGB); +#endif +} + +//------------------------------------------------------------------------------ +// YUV444 converter + +#define YUV444_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + int i; \ + for (i = 0; i < len; ++i) FUNC(y[i], u[i], v[i], &dst[i * XSTEP]); \ +} + +YUV444_FUNC(Yuv444ToRgb, VP8YuvToRgb, 3) +YUV444_FUNC(Yuv444ToBgr, VP8YuvToBgr, 3) +YUV444_FUNC(Yuv444ToRgba, VP8YuvToRgba, 4) +YUV444_FUNC(Yuv444ToBgra, VP8YuvToBgra, 4) +YUV444_FUNC(Yuv444ToArgb, VP8YuvToArgb, 4) +YUV444_FUNC(Yuv444ToRgba4444, VP8YuvToRgba4444, 2) +YUV444_FUNC(Yuv444ToRgb565, VP8YuvToRgb565, 2) + +#undef YUV444_FUNC + +const WebPYUV444Converter WebPYUV444Converters[MODE_LAST] = { + Yuv444ToRgb, // MODE_RGB + Yuv444ToRgba, // MODE_RGBA + Yuv444ToBgr, // MODE_BGR + Yuv444ToBgra, // MODE_BGRA + Yuv444ToArgb, // MODE_ARGB + Yuv444ToRgba4444, // MODE_RGBA_4444 + Yuv444ToRgb565, // MODE_RGB_565 + Yuv444ToRgba, // MODE_rgbA + Yuv444ToBgra, // MODE_bgrA + Yuv444ToArgb, // MODE_Argb + Yuv444ToRgba4444 // MODE_rgbA_4444 +}; + +//------------------------------------------------------------------------------ +// Premultiplied modes + +// non dithered-modes + +// (x * a * 32897) >> 23 is bit-wise equivalent to (int)(x * a / 255.) +// for all 8bit x or a. For bit-wise equivalence to (int)(x * a / 255. + .5), +// one can use instead: (x * a * 65793 + (1 << 23)) >> 24 +#if 1 // (int)(x * a / 255.) +#define MULTIPLIER(a) ((a) * 32897UL) +#define PREMULTIPLY(x, m) (((x) * (m)) >> 23) +#else // (int)(x * a / 255. + .5) +#define MULTIPLIER(a) ((a) * 65793UL) +#define PREMULTIPLY(x, m) (((x) * (m) + (1UL << 23)) >> 24) +#endif + +static void ApplyAlphaMultiply(uint8_t* rgba, int alpha_first, + int w, int h, int stride) { + while (h-- > 0) { + uint8_t* const rgb = rgba + (alpha_first ? 1 : 0); + const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3); + int i; + for (i = 0; i < w; ++i) { + const uint32_t a = alpha[4 * i]; + if (a != 0xff) { + const uint32_t mult = MULTIPLIER(a); + rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult); + rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult); + rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult); + } + } + rgba += stride; + } +} +#undef MULTIPLIER +#undef PREMULTIPLY + +// rgbA4444 + +#define MULTIPLIER(a) ((a) * 0x1111) // 0x1111 ~= (1 << 16) / 15 + +static WEBP_INLINE uint8_t dither_hi(uint8_t x) { + return (x & 0xf0) | (x >> 4); +} + +static WEBP_INLINE uint8_t dither_lo(uint8_t x) { + return (x & 0x0f) | (x << 4); +} + +static WEBP_INLINE uint8_t multiply(uint8_t x, uint32_t m) { + return (x * m) >> 16; +} + +static void ApplyAlphaMultiply4444(uint8_t* rgba4444, + int w, int h, int stride) { + while (h-- > 0) { + int i; + for (i = 0; i < w; ++i) { + const uint8_t a = (rgba4444[2 * i + 1] & 0x0f); + const uint32_t mult = MULTIPLIER(a); + const uint8_t r = multiply(dither_hi(rgba4444[2 * i + 0]), mult); + const uint8_t g = multiply(dither_lo(rgba4444[2 * i + 0]), mult); + const uint8_t b = multiply(dither_hi(rgba4444[2 * i + 1]), mult); + rgba4444[2 * i + 0] = (r & 0xf0) | ((g >> 4) & 0x0f); + rgba4444[2 * i + 1] = (b & 0xf0) | a; + } + rgba4444 += stride; + } +} +#undef MULTIPLIER + +void (*WebPApplyAlphaMultiply)(uint8_t*, int, int, int, int) + = ApplyAlphaMultiply; +void (*WebPApplyAlphaMultiply4444)(uint8_t*, int, int, int) + = ApplyAlphaMultiply4444; + +//------------------------------------------------------------------------------ +// Main call + +void WebPInitUpsamplers(void) { +#ifdef FANCY_UPSAMPLING + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair; + WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair; + WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair; + WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair; + WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair; + WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPInitUpsamplersSSE2(); + } +#endif + } +#endif // FANCY_UPSAMPLING +} + +void WebPInitPremultiply(void) { + WebPApplyAlphaMultiply = ApplyAlphaMultiply; + WebPApplyAlphaMultiply4444 = ApplyAlphaMultiply4444; + +#ifdef FANCY_UPSAMPLING + WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair; + WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair; + WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair; + WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair; + + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPInitPremultiplySSE2(); + } +#endif + } +#endif // FANCY_UPSAMPLING +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif diff --git a/drivers/webpold/dsp/upsampling_sse2.c b/drivers/webpold/dsp/upsampling_sse2.c new file mode 100644 index 0000000000..8cb275a02b --- /dev/null +++ b/drivers/webpold/dsp/upsampling_sse2.c @@ -0,0 +1,209 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// SSE2 version of YUV to RGB upsampling functions. +// +// Author: somnath@google.com (Somnath Banerjee) + +#include "./dsp.h" + +#if defined(WEBP_USE_SSE2) + +#include <assert.h> +#include <emmintrin.h> +#include <string.h> +#include "./yuv.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +#ifdef FANCY_UPSAMPLING + +// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows +// u = (9*a + 3*b + 3*c + d + 8) / 16 +// = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2 +// = (a + m + 1) / 2 +// where m = (a + 3*b + 3*c + d) / 8 +// = ((a + b + c + d) / 2 + b + c) / 4 +// +// Let's say k = (a + b + c + d) / 4. +// We can compute k as +// k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1 +// where s = (a + d + 1) / 2 and t = (b + c + 1) / 2 +// +// Then m can be written as +// m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1 + +// Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1 +#define GET_M(ij, in, out) do { \ + const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ + const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ + const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \ + const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\ + const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ + (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \ +} while (0) + +// pack and store two alterning pixel rows +#define PACK_AND_STORE(a, b, da, db, out) do { \ + const __m128i ta = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ + const __m128i tb = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ + const __m128i t1 = _mm_unpacklo_epi8(ta, tb); \ + const __m128i t2 = _mm_unpackhi_epi8(ta, tb); \ + _mm_store_si128(((__m128i*)(out)) + 0, t1); \ + _mm_store_si128(((__m128i*)(out)) + 1, t2); \ +} while (0) + +// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels. +#define UPSAMPLE_32PIXELS(r1, r2, out) { \ + const __m128i one = _mm_set1_epi8(1); \ + const __m128i a = _mm_loadu_si128((__m128i*)&(r1)[0]); \ + const __m128i b = _mm_loadu_si128((__m128i*)&(r1)[1]); \ + const __m128i c = _mm_loadu_si128((__m128i*)&(r2)[0]); \ + const __m128i d = _mm_loadu_si128((__m128i*)&(r2)[1]); \ + \ + const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ + const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ + const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \ + \ + const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \ + const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \ + \ + const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \ + const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \ + const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \ + const __m128i t4 = _mm_avg_epu8(s, t); \ + const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \ + __m128i diag1, diag2; \ + \ + GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \ + GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \ + \ + /* pack the alternate pixels */ \ + PACK_AND_STORE(a, b, diag1, diag2, &(out)[0 * 32]); \ + PACK_AND_STORE(c, d, diag2, diag1, &(out)[2 * 32]); \ +} + +// Turn the macro into a function for reducing code-size when non-critical +static void Upsample32Pixels(const uint8_t r1[], const uint8_t r2[], + uint8_t* const out) { + UPSAMPLE_32PIXELS(r1, r2, out); +} + +#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ + uint8_t r1[17], r2[17]; \ + memcpy(r1, (tb), (num_pixels)); \ + memcpy(r2, (bb), (num_pixels)); \ + /* replicate last byte */ \ + memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \ + memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \ + /* using the shared function instead of the macro saves ~3k code size */ \ + Upsample32Pixels(r1, r2, out); \ +} + +#define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, uv, \ + top_dst, bottom_dst, cur_x, num_pixels) { \ + int n; \ + if (top_y) { \ + for (n = 0; n < (num_pixels); ++n) { \ + FUNC(top_y[(cur_x) + n], (uv)[n], (uv)[32 + n], \ + top_dst + ((cur_x) + n) * XSTEP); \ + } \ + } \ + if (bottom_y) { \ + for (n = 0; n < (num_pixels); ++n) { \ + FUNC(bottom_y[(cur_x) + n], (uv)[64 + n], (uv)[64 + 32 + n], \ + bottom_dst + ((cur_x) + n) * XSTEP); \ + } \ + } \ +} + +#define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int b; \ + /* 16 byte aligned array to cache reconstructed u and v */ \ + uint8_t uv_buf[4 * 32 + 15]; \ + uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ + const int uv_len = (len + 1) >> 1; \ + /* 17 pixels must be read-able for each block */ \ + const int num_blocks = (uv_len - 1) >> 4; \ + const int leftover = uv_len - num_blocks * 16; \ + const int last_pos = 1 + 32 * num_blocks; \ + \ + const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ + const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ + \ + assert(len > 0); \ + /* Treat the first pixel in regular way */ \ + if (top_y) { \ + const int u0 = (top_u[0] + u_diag) >> 1; \ + const int v0 = (top_v[0] + v_diag) >> 1; \ + FUNC(top_y[0], u0, v0, top_dst); \ + } \ + if (bottom_y) { \ + const int u0 = (cur_u[0] + u_diag) >> 1; \ + const int v0 = (cur_v[0] + v_diag) >> 1; \ + FUNC(bottom_y[0], u0, v0, bottom_dst); \ + } \ + \ + for (b = 0; b < num_blocks; ++b) { \ + UPSAMPLE_32PIXELS(top_u, cur_u, r_uv + 0 * 32); \ + UPSAMPLE_32PIXELS(top_v, cur_v, r_uv + 1 * 32); \ + CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \ + 32 * b + 1, 32) \ + top_u += 16; \ + cur_u += 16; \ + top_v += 16; \ + cur_v += 16; \ + } \ + \ + UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv + 0 * 32); \ + UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 1 * 32); \ + CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, r_uv, top_dst, bottom_dst, \ + last_pos, len - last_pos); \ +} + +// SSE2 variants of the fancy upsampler. +SSE2_UPSAMPLE_FUNC(UpsampleRgbLinePairSSE2, VP8YuvToRgb, 3) +SSE2_UPSAMPLE_FUNC(UpsampleBgrLinePairSSE2, VP8YuvToBgr, 3) +SSE2_UPSAMPLE_FUNC(UpsampleRgbaLinePairSSE2, VP8YuvToRgba, 4) +SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePairSSE2, VP8YuvToBgra, 4) + +#undef GET_M +#undef PACK_AND_STORE +#undef UPSAMPLE_32PIXELS +#undef UPSAMPLE_LAST_BLOCK +#undef CONVERT2RGB +#undef SSE2_UPSAMPLE_FUNC + +//------------------------------------------------------------------------------ + +extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; + +void WebPInitUpsamplersSSE2(void) { + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePairSSE2; + WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePairSSE2; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePairSSE2; + WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePairSSE2; +} + +void WebPInitPremultiplySSE2(void) { + WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePairSSE2; + WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePairSSE2; +} + +#endif // FANCY_UPSAMPLING + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif // WEBP_USE_SSE2 diff --git a/drivers/webpold/dsp/yuv.c b/drivers/webpold/dsp/yuv.c new file mode 100644 index 0000000000..7f05f9a3aa --- /dev/null +++ b/drivers/webpold/dsp/yuv.c @@ -0,0 +1,52 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// YUV->RGB conversion function +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "./yuv.h" + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +enum { YUV_HALF = 1 << (YUV_FIX - 1) }; + +int16_t VP8kVToR[256], VP8kUToB[256]; +int32_t VP8kVToG[256], VP8kUToG[256]; +uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN]; +uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN]; + +static int done = 0; + +static WEBP_INLINE uint8_t clip(int v, int max_value) { + return v < 0 ? 0 : v > max_value ? max_value : v; +} + +void VP8YUVInit(void) { + int i; + if (done) { + return; + } + for (i = 0; i < 256; ++i) { + VP8kVToR[i] = (89858 * (i - 128) + YUV_HALF) >> YUV_FIX; + VP8kUToG[i] = -22014 * (i - 128) + YUV_HALF; + VP8kVToG[i] = -45773 * (i - 128); + VP8kUToB[i] = (113618 * (i - 128) + YUV_HALF) >> YUV_FIX; + } + for (i = YUV_RANGE_MIN; i < YUV_RANGE_MAX; ++i) { + const int k = ((i - 16) * 76283 + YUV_HALF) >> YUV_FIX; + VP8kClip[i - YUV_RANGE_MIN] = clip(k, 255); + VP8kClip4Bits[i - YUV_RANGE_MIN] = clip((k + 8) >> 4, 15); + } + done = 1; +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif diff --git a/drivers/webpold/dsp/yuv.h b/drivers/webpold/dsp/yuv.h new file mode 100644 index 0000000000..a569109c54 --- /dev/null +++ b/drivers/webpold/dsp/yuv.h @@ -0,0 +1,128 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// This code is licensed under the same terms as WebM: +// Software License Agreement: http://www.webmproject.org/license/software/ +// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ +// ----------------------------------------------------------------------------- +// +// inline YUV<->RGB conversion function +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_DSP_YUV_H_ +#define WEBP_DSP_YUV_H_ + +#include "../dec/decode_vp8.h" + +//------------------------------------------------------------------------------ +// YUV -> RGB conversion + +#if defined(__cplusplus) || defined(c_plusplus) +extern "C" { +#endif + +enum { YUV_FIX = 16, // fixed-point precision + YUV_RANGE_MIN = -227, // min value of r/g/b output + YUV_RANGE_MAX = 256 + 226 // max value of r/g/b output +}; +extern int16_t VP8kVToR[256], VP8kUToB[256]; +extern int32_t VP8kVToG[256], VP8kUToG[256]; +extern uint8_t VP8kClip[YUV_RANGE_MAX - YUV_RANGE_MIN]; +extern uint8_t VP8kClip4Bits[YUV_RANGE_MAX - YUV_RANGE_MIN]; + +static WEBP_INLINE void VP8YuvToRgb(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const rgb) { + const int r_off = VP8kVToR[v]; + const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX; + const int b_off = VP8kUToB[u]; + rgb[0] = VP8kClip[y + r_off - YUV_RANGE_MIN]; + rgb[1] = VP8kClip[y + g_off - YUV_RANGE_MIN]; + rgb[2] = VP8kClip[y + b_off - YUV_RANGE_MIN]; +} + +static WEBP_INLINE void VP8YuvToRgb565(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const rgb) { + const int r_off = VP8kVToR[v]; + const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX; + const int b_off = VP8kUToB[u]; + rgb[0] = ((VP8kClip[y + r_off - YUV_RANGE_MIN] & 0xf8) | + (VP8kClip[y + g_off - YUV_RANGE_MIN] >> 5)); + rgb[1] = (((VP8kClip[y + g_off - YUV_RANGE_MIN] << 3) & 0xe0) | + (VP8kClip[y + b_off - YUV_RANGE_MIN] >> 3)); +} + +static WEBP_INLINE void VP8YuvToArgb(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const argb) { + argb[0] = 0xff; + VP8YuvToRgb(y, u, v, argb + 1); +} + +static WEBP_INLINE void VP8YuvToRgba4444(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const argb) { + const int r_off = VP8kVToR[v]; + const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX; + const int b_off = VP8kUToB[u]; + // Don't update alpha (last 4 bits of argb[1]) + argb[0] = ((VP8kClip4Bits[y + r_off - YUV_RANGE_MIN] << 4) | + VP8kClip4Bits[y + g_off - YUV_RANGE_MIN]); + argb[1] = 0x0f | (VP8kClip4Bits[y + b_off - YUV_RANGE_MIN] << 4); +} + +static WEBP_INLINE void VP8YuvToBgr(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const bgr) { + const int r_off = VP8kVToR[v]; + const int g_off = (VP8kVToG[v] + VP8kUToG[u]) >> YUV_FIX; + const int b_off = VP8kUToB[u]; + bgr[0] = VP8kClip[y + b_off - YUV_RANGE_MIN]; + bgr[1] = VP8kClip[y + g_off - YUV_RANGE_MIN]; + bgr[2] = VP8kClip[y + r_off - YUV_RANGE_MIN]; +} + +static WEBP_INLINE void VP8YuvToBgra(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const bgra) { + VP8YuvToBgr(y, u, v, bgra); + bgra[3] = 0xff; +} + +static WEBP_INLINE void VP8YuvToRgba(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const rgba) { + VP8YuvToRgb(y, u, v, rgba); + rgba[3] = 0xff; +} + +// Must be called before everything, to initialize the tables. +void VP8YUVInit(void); + +//------------------------------------------------------------------------------ +// RGB -> YUV conversion +// The exact naming is Y'CbCr, following the ITU-R BT.601 standard. +// More information at: http://en.wikipedia.org/wiki/YCbCr +// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16 +// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128 +// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128 +// We use 16bit fixed point operations. + +static WEBP_INLINE int VP8ClipUV(int v) { + v = (v + (257 << (YUV_FIX + 2 - 1))) >> (YUV_FIX + 2); + return ((v & ~0xff) == 0) ? v : (v < 0) ? 0 : 255; +} + +static WEBP_INLINE int VP8RGBToY(int r, int g, int b) { + const int kRound = (1 << (YUV_FIX - 1)) + (16 << YUV_FIX); + const int luma = 16839 * r + 33059 * g + 6420 * b; + return (luma + kRound) >> YUV_FIX; // no need to clip +} + +static WEBP_INLINE int VP8RGBToU(int r, int g, int b) { + return VP8ClipUV(-9719 * r - 19081 * g + 28800 * b); +} + +static WEBP_INLINE int VP8RGBToV(int r, int g, int b) { + return VP8ClipUV(+28800 * r - 24116 * g - 4684 * b); +} + +#if defined(__cplusplus) || defined(c_plusplus) +} // extern "C" +#endif + +#endif /* WEBP_DSP_YUV_H_ */ |