summaryrefslogtreecommitdiff
path: root/thirdparty/jpeg-compressor/jpgd_idct.h
blob: 876425a959ef5bfc68df2b2f1614772b3074f079 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
// Copyright 2009 Intel Corporation
// All Rights Reserved
//
// Permission is granted to use, copy, distribute and prepare derivative works of this
// software for any purpose and without fee, provided, that the above copyright notice
// and this statement appear in all copies.  Intel makes no representations about the
// suitability of this software for any purpose.  THIS SOFTWARE IS PROVIDED "AS IS."
// INTEL SPECIFICALLY DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, AND ALL LIABILITY,
// INCLUDING CONSEQUENTIAL AND OTHER INDIRECT DAMAGES, FOR THE USE OF THIS SOFTWARE,
// INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PROPRIETARY RIGHTS, AND INCLUDING THE
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  Intel does not
// assume any responsibility for any errors which may appear in this software nor any
// responsibility to update it.
//
// From:
// https://software.intel.com/sites/default/files/m/d/4/1/d/8/UsingIntelAVXToImplementIDCT-r1_5.pdf
// https://software.intel.com/file/29048
//
// Requires SSE
//
#ifdef _MSC_VER
#include <intrin.h>
#endif
#include <immintrin.h>

#ifdef _MSC_VER
	#define JPGD_SIMD_ALIGN(type, name) __declspec(align(16)) type name
#else
	#define JPGD_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
#endif

#define BITS_INV_ACC 4
#define SHIFT_INV_ROW 16 - BITS_INV_ACC
#define SHIFT_INV_COL 1 + BITS_INV_ACC
const short IRND_INV_ROW = 1024 * (6 - BITS_INV_ACC);	//1 << (SHIFT_INV_ROW-1)
const short IRND_INV_COL = 16 * (BITS_INV_ACC - 3);		// 1 << (SHIFT_INV_COL-1)
const short IRND_INV_CORR = IRND_INV_COL - 1;			// correction -1.0 and round

JPGD_SIMD_ALIGN(short, shortM128_one_corr[8]) = {1, 1, 1, 1, 1, 1, 1, 1};
JPGD_SIMD_ALIGN(short, shortM128_round_inv_row[8]) = {IRND_INV_ROW, 0, IRND_INV_ROW, 0, IRND_INV_ROW, 0, IRND_INV_ROW, 0};
JPGD_SIMD_ALIGN(short, shortM128_round_inv_col[8]) = {IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL, IRND_INV_COL};
JPGD_SIMD_ALIGN(short, shortM128_round_inv_corr[8])= {IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR, IRND_INV_CORR};
JPGD_SIMD_ALIGN(short, shortM128_tg_1_16[8]) = {13036, 13036, 13036, 13036, 13036, 13036, 13036, 13036}; // tg * (2<<16) + 0.5
JPGD_SIMD_ALIGN(short, shortM128_tg_2_16[8]) = {27146, 27146, 27146, 27146, 27146, 27146, 27146, 27146}; // tg * (2<<16) + 0.5
JPGD_SIMD_ALIGN(short, shortM128_tg_3_16[8]) = {-21746, -21746, -21746, -21746, -21746, -21746, -21746, -21746}; // tg * (2<<16) + 0.5
JPGD_SIMD_ALIGN(short, shortM128_cos_4_16[8]) = {-19195, -19195, -19195, -19195, -19195, -19195, -19195, -19195};// cos * (2<<16) + 0.5

//-----------------------------------------------------------------------------
// Table for rows 0,4 - constants are multiplied on cos_4_16
// w15 w14 w11 w10 w07 w06 w03 w02
// w29 w28 w25 w24 w21 w20 w17 w16
// w31 w30 w27 w26 w23 w22 w19 w18
//movq -> w05 w04 w01 w00
JPGD_SIMD_ALIGN(short, shortM128_tab_i_04[]) = {
	16384, 21407, 16384, 8867,
	16384, -8867, 16384, -21407, // w13 w12 w09 w08
	16384, 8867, -16384, -21407, // w07 w06 w03 w02
	-16384, 21407, 16384, -8867, // w15 w14 w11 w10
	22725, 19266, 19266, -4520, // w21 w20 w17 w16
	12873, -22725, 4520, -12873, // w29 w28 w25 w24
	12873, 4520, -22725, -12873, // w23 w22 w19 w18
	4520, 19266, 19266, -22725}; // w31 w30 w27 w26

	// Table for rows 1,7 - constants are multiplied on cos_1_16
//movq -> w05 w04 w01 w00
JPGD_SIMD_ALIGN(short, shortM128_tab_i_17[]) = {
	22725, 29692, 22725, 12299,
	22725, -12299, 22725, -29692, // w13 w12 w09 w08
	22725, 12299, -22725, -29692, // w07 w06 w03 w02
	-22725, 29692, 22725, -12299, // w15 w14 w11 w10
	31521, 26722, 26722, -6270, // w21 w20 w17 w16
	17855, -31521, 6270, -17855, // w29 w28 w25 w24
	17855, 6270, -31521, -17855, // w23 w22 w19 w18
	6270, 26722, 26722, -31521}; // w31 w30 w27 w26

// Table for rows 2,6 - constants are multiplied on cos_2_16
//movq -> w05 w04 w01 w00
JPGD_SIMD_ALIGN(short, shortM128_tab_i_26[]) = {
	21407, 27969, 21407, 11585,
	21407, -11585, 21407, -27969, // w13 w12 w09 w08
	21407, 11585, -21407, -27969, // w07 w06 w03 w02
	-21407, 27969, 21407, -11585, // w15 w14 w11 w10
	29692, 25172, 25172, -5906,	// w21 w20 w17 w16
	16819, -29692, 5906, -16819, // w29 w28 w25 w24
	16819, 5906, -29692, -16819, // w23 w22 w19 w18
	5906, 25172, 25172, -29692}; // w31 w30 w27 w26
// Table for rows 3,5 - constants are multiplied on cos_3_16
//movq -> w05 w04 w01 w00
JPGD_SIMD_ALIGN(short, shortM128_tab_i_35[]) = {
	19266, 25172, 19266, 10426,
	19266, -10426, 19266, -25172, // w13 w12 w09 w08
	19266, 10426, -19266, -25172, // w07 w06 w03 w02
	-19266, 25172, 19266, -10426, // w15 w14 w11 w10
	26722, 22654, 22654, -5315, // w21 w20 w17 w16
	15137, -26722, 5315, -15137, // w29 w28 w25 w24
	15137, 5315, -26722, -15137, // w23 w22 w19 w18
	5315, 22654, 22654, -26722}; // w31 w30 w27 w26

JPGD_SIMD_ALIGN(short, shortM128_128[8]) = { 128, 128, 128, 128, 128, 128, 128, 128 };

void idctSSEShortU8(const short *pInput, uint8_t * pOutputUB)
{
	__m128i r_xmm0, r_xmm4;
	__m128i r_xmm1, r_xmm2, r_xmm3, r_xmm5, r_xmm6, r_xmm7;
	__m128i row0, row1, row2, row3, row4, row5, row6, row7;
	short * pTab_i_04 = shortM128_tab_i_04;
	short * pTab_i_26 = shortM128_tab_i_26;

	//Get pointers for this input and output
	pTab_i_04 = shortM128_tab_i_04;
	pTab_i_26 = shortM128_tab_i_26;

	//Row 1 and Row 3
	r_xmm0 = _mm_load_si128((__m128i *) pInput);
	r_xmm4 = _mm_load_si128((__m128i *) (&pInput[2*8]));

	// *** Work on the data in xmm0
	//low shuffle mask = 0xd8 = 11 01 10 00
	//get short 2 and short 0 into ls 32-bits
	r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);

	// copy short 2 and short 0 to all locations
	r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
		
	// add to those copies
	r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));

	// shuffle mask = 0x55 = 01 01 01 01
	// copy short 3 and short 1 to all locations
	r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
		
	// high shuffle mask = 0xd8 = 11 01 10 00
	// get short 6 and short 4 into bit positions 64-95
	// get short 7 and short 5 into bit positions 96-127
	r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
		
	// add to short 3 and short 1
	r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
		
	// shuffle mask = 0xaa = 10 10 10 10
	// copy short 6 and short 4 to all locations
	r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
		
	// shuffle mask = 0xaa = 11 11 11 11
	// copy short 7 and short 5 to all locations
	r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
		
	// add to short 6 and short 4
	r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8])); 
		
	// *** Work on the data in xmm4
	// high shuffle mask = 0xd8 11 01 10 00
	// get short 6 and short 4 into bit positions 64-95
	// get short 7 and short 5 into bit positions 96-127
	r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
		
	// (xmm0 short 2 and short 0 plus pSi) + some constants
	r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
	r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
	r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
	r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
	r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &shortM128_tab_i_26[0]));
	r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
	r_xmm2 = r_xmm1;
	r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
	r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &shortM128_tab_i_26[8])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
	r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
	r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
	r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &shortM128_tab_i_26[16])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
	r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
	r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &shortM128_tab_i_26[24]));
	r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
	r_xmm6 = r_xmm5;
	r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
	r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
	row0 = _mm_packs_epi32(r_xmm0, r_xmm2);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
	r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
	r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
	r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
	r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
	row2 = _mm_packs_epi32(r_xmm4, r_xmm6);

	//Row 5 and row 7
	r_xmm0 = _mm_load_si128((__m128i *) (&pInput[4*8]));
	r_xmm4 = _mm_load_si128((__m128i *) (&pInput[6*8]));

	r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);
	r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
	r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));
	r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
	r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
	r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
	r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
	r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
	r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8])); 
	r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
	r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
	r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
	r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
	r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
	r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &shortM128_tab_i_26[0]));
	r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
	r_xmm2 = r_xmm1;
	r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
	r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &shortM128_tab_i_26[8])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
	r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
	r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
	r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &shortM128_tab_i_26[16])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
	r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
	r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &shortM128_tab_i_26[24]));
	r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
	r_xmm6 = r_xmm5;
	r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
	r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
	row4 = _mm_packs_epi32(r_xmm0, r_xmm2);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
	r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
	r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
	r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
	r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
	row6 = _mm_packs_epi32(r_xmm4, r_xmm6);

	//Row 4 and row 2
	pTab_i_04 = shortM128_tab_i_35;
	pTab_i_26 = shortM128_tab_i_17;
	r_xmm0 = _mm_load_si128((__m128i *) (&pInput[3*8]));
	r_xmm4 = _mm_load_si128((__m128i *) (&pInput[1*8]));

	r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);
	r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
	r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));
	r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
	r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
	r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
	r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
	r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
	r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8])); 
	r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
	r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
	r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
	r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
	r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
	r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &pTab_i_26[0]));
	r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
	r_xmm2 = r_xmm1;
	r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
	r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &pTab_i_26[8])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
	r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
	r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
	r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &pTab_i_26[16])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
	r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
	r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &pTab_i_26[24]));
	r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
	r_xmm6 = r_xmm5;
	r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
	r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
	row3 = _mm_packs_epi32(r_xmm0, r_xmm2);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
	r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
	r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
	r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
	r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
	row1 = _mm_packs_epi32(r_xmm4, r_xmm6);

	//Row 6 and row 8
	r_xmm0 = _mm_load_si128((__m128i *) (&pInput[5*8]));
	r_xmm4 = _mm_load_si128((__m128i *) (&pInput[7*8]));

	r_xmm0 = _mm_shufflelo_epi16(r_xmm0, 0xd8);
	r_xmm1 = _mm_shuffle_epi32(r_xmm0, 0);
	r_xmm1 = _mm_madd_epi16(r_xmm1, *((__m128i *) pTab_i_04));
	r_xmm3 = _mm_shuffle_epi32(r_xmm0, 0x55);
	r_xmm0 = _mm_shufflehi_epi16(r_xmm0, 0xd8);
	r_xmm3 = _mm_madd_epi16(r_xmm3, *((__m128i *) &pTab_i_04[16]));
	r_xmm2 = _mm_shuffle_epi32(r_xmm0, 0xaa);
	r_xmm0 = _mm_shuffle_epi32(r_xmm0, 0xff);
	r_xmm2 = _mm_madd_epi16(r_xmm2, *((__m128i *) &pTab_i_04[8])); 
	r_xmm4 = _mm_shufflehi_epi16(r_xmm4, 0xd8);
	r_xmm1 = _mm_add_epi32(r_xmm1, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_shufflelo_epi16(r_xmm4, 0xd8);
	r_xmm0 = _mm_madd_epi16(r_xmm0, *((__m128i *) &pTab_i_04[24]));
	r_xmm5 = _mm_shuffle_epi32(r_xmm4, 0);
	r_xmm6 = _mm_shuffle_epi32(r_xmm4, 0xaa);
	r_xmm5 = _mm_madd_epi16(r_xmm5, *((__m128i *) &pTab_i_26[0]));
	r_xmm1 = _mm_add_epi32(r_xmm1, r_xmm2);
	r_xmm2 = r_xmm1;
	r_xmm7 = _mm_shuffle_epi32(r_xmm4, 0x55);
	r_xmm6 = _mm_madd_epi16(r_xmm6, *((__m128i *) &pTab_i_26[8])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm3);
	r_xmm4 = _mm_shuffle_epi32(r_xmm4, 0xff);
	r_xmm2 = _mm_sub_epi32(r_xmm2, r_xmm0);
	r_xmm7 = _mm_madd_epi16(r_xmm7, *((__m128i *) &pTab_i_26[16])); 
	r_xmm0 = _mm_add_epi32(r_xmm0, r_xmm1);
	r_xmm2 = _mm_srai_epi32(r_xmm2, 12);
	r_xmm5 = _mm_add_epi32(r_xmm5, *((__m128i *) shortM128_round_inv_row));
	r_xmm4 = _mm_madd_epi16(r_xmm4, *((__m128i *) &pTab_i_26[24]));
	r_xmm5 = _mm_add_epi32(r_xmm5, r_xmm6);
	r_xmm6 = r_xmm5;
	r_xmm0 = _mm_srai_epi32(r_xmm0, 12);
	r_xmm2 = _mm_shuffle_epi32(r_xmm2, 0x1b);
	row5 = _mm_packs_epi32(r_xmm0, r_xmm2);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm7);
	r_xmm6 = _mm_sub_epi32(r_xmm6, r_xmm4);
	r_xmm4 = _mm_add_epi32(r_xmm4, r_xmm5);
	r_xmm6 = _mm_srai_epi32(r_xmm6, 12);
	r_xmm4 = _mm_srai_epi32(r_xmm4, 12);
	r_xmm6 = _mm_shuffle_epi32(r_xmm6, 0x1b);
	row7 = _mm_packs_epi32(r_xmm4, r_xmm6);

	r_xmm1 = _mm_load_si128((__m128i *) shortM128_tg_3_16);
	r_xmm2 = row5;
	r_xmm3 = row3;
	r_xmm0 = _mm_mulhi_epi16(row5, r_xmm1);

	r_xmm1 = _mm_mulhi_epi16(r_xmm1, r_xmm3);
	r_xmm5 = _mm_load_si128((__m128i *) shortM128_tg_1_16);
	r_xmm6 = row7;
	r_xmm4 = _mm_mulhi_epi16(row7, r_xmm5);

	r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm2);
	r_xmm5 = _mm_mulhi_epi16(r_xmm5, row1);
	r_xmm1 = _mm_adds_epi16(r_xmm1, r_xmm3);
	r_xmm7 = row6;

	r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm3);
	r_xmm3 = _mm_load_si128((__m128i *) shortM128_tg_2_16);
	r_xmm2 = _mm_subs_epi16(r_xmm2, r_xmm1);
	r_xmm7 = _mm_mulhi_epi16(r_xmm7, r_xmm3);
	r_xmm1 = r_xmm0;
	r_xmm3 = _mm_mulhi_epi16(r_xmm3, row2);
	r_xmm5 = _mm_subs_epi16(r_xmm5, r_xmm6);
	r_xmm4 = _mm_adds_epi16(r_xmm4, row1);
	r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm4);
	r_xmm0 = _mm_adds_epi16(r_xmm0, *((__m128i *) shortM128_one_corr));
	r_xmm4 = _mm_subs_epi16(r_xmm4, r_xmm1);
	r_xmm6 = r_xmm5;
	r_xmm5 = _mm_subs_epi16(r_xmm5, r_xmm2);
	r_xmm5 = _mm_adds_epi16(r_xmm5, *((__m128i *) shortM128_one_corr));
	r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm2);

	//Intermediate results, needed later
	__m128i temp3, temp7;
	temp7 = r_xmm0;

	r_xmm1 = r_xmm4;
	r_xmm0 = _mm_load_si128((__m128i *) shortM128_cos_4_16);
	r_xmm4 = _mm_adds_epi16(r_xmm4, r_xmm5);
	r_xmm2 = _mm_load_si128((__m128i *) shortM128_cos_4_16);
	r_xmm2 = _mm_mulhi_epi16(r_xmm2, r_xmm4);

	//Intermediate results, needed later
	temp3 = r_xmm6;

	r_xmm1 = _mm_subs_epi16(r_xmm1, r_xmm5);
	r_xmm7 = _mm_adds_epi16(r_xmm7, row2);
	r_xmm3 = _mm_subs_epi16(r_xmm3, row6);
	r_xmm6 = row0;
	r_xmm0 = _mm_mulhi_epi16(r_xmm0, r_xmm1);
	r_xmm5 = row4;
	r_xmm5 = _mm_adds_epi16(r_xmm5, r_xmm6);
	r_xmm6 = _mm_subs_epi16(r_xmm6, row4);
	r_xmm4 = _mm_adds_epi16(r_xmm4, r_xmm2);

	r_xmm4 = _mm_or_si128(r_xmm4, *((__m128i *) shortM128_one_corr));
	r_xmm0 = _mm_adds_epi16(r_xmm0, r_xmm1);
	r_xmm0 = _mm_or_si128(r_xmm0, *((__m128i *) shortM128_one_corr));

	r_xmm2 = r_xmm5;
	r_xmm5 = _mm_adds_epi16(r_xmm5, r_xmm7);
	r_xmm1 = r_xmm6;
	r_xmm5 = _mm_adds_epi16(r_xmm5, *((__m128i *) shortM128_round_inv_col));
	r_xmm2 = _mm_subs_epi16(r_xmm2, r_xmm7);
	r_xmm7 = temp7;
	r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm3);
	r_xmm6 = _mm_adds_epi16(r_xmm6, *((__m128i *) shortM128_round_inv_col));
	r_xmm7 = _mm_adds_epi16(r_xmm7, r_xmm5);
	r_xmm7 = _mm_srai_epi16(r_xmm7, SHIFT_INV_COL);
	r_xmm1 = _mm_subs_epi16(r_xmm1, r_xmm3);
	r_xmm1 = _mm_adds_epi16(r_xmm1, *((__m128i *) shortM128_round_inv_corr));
	r_xmm3 = r_xmm6;
	r_xmm2 = _mm_adds_epi16(r_xmm2, *((__m128i *) shortM128_round_inv_corr));
	r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm4);

	//Store results for row 0
	//_mm_store_si128((__m128i *) pOutput, r_xmm7);
	__m128i r0 = r_xmm7;

	r_xmm6 = _mm_srai_epi16(r_xmm6, SHIFT_INV_COL);
	r_xmm7 = r_xmm1;
	r_xmm1 = _mm_adds_epi16(r_xmm1, r_xmm0);

	//Store results for row 1
	//_mm_store_si128((__m128i *) (&pOutput[1*8]), r_xmm6); 
	__m128i r1 = r_xmm6;

	r_xmm1 = _mm_srai_epi16(r_xmm1, SHIFT_INV_COL);
	r_xmm6 = temp3;
	r_xmm7 = _mm_subs_epi16(r_xmm7, r_xmm0);
	r_xmm7 = _mm_srai_epi16(r_xmm7, SHIFT_INV_COL);

	//Store results for row 2
	//_mm_store_si128((__m128i *) (&pOutput[2*8]), r_xmm1); 
	__m128i r2 = r_xmm1;

	r_xmm5 = _mm_subs_epi16(r_xmm5, temp7); 
	r_xmm5 = _mm_srai_epi16(r_xmm5, SHIFT_INV_COL);

	//Store results for row 7
	//_mm_store_si128((__m128i *) (&pOutput[7*8]), r_xmm5); 
	__m128i r7 = r_xmm5;

	r_xmm3 = _mm_subs_epi16(r_xmm3, r_xmm4);
	r_xmm6 = _mm_adds_epi16(r_xmm6, r_xmm2);
	r_xmm2 = _mm_subs_epi16(r_xmm2, temp3); 
	r_xmm6 = _mm_srai_epi16(r_xmm6, SHIFT_INV_COL);
	r_xmm2 = _mm_srai_epi16(r_xmm2, SHIFT_INV_COL);

	//Store results for row 3
	//_mm_store_si128((__m128i *) (&pOutput[3*8]), r_xmm6); 
	__m128i r3 = r_xmm6;

	r_xmm3 = _mm_srai_epi16(r_xmm3, SHIFT_INV_COL);

	//Store results for rows 4, 5, and 6
	//_mm_store_si128((__m128i *) (&pOutput[4*8]), r_xmm2);
	//_mm_store_si128((__m128i *) (&pOutput[5*8]), r_xmm7);
	//_mm_store_si128((__m128i *) (&pOutput[6*8]), r_xmm3);

	__m128i r4 = r_xmm2;
	__m128i r5 = r_xmm7;
	__m128i r6 = r_xmm3;

	r0 = _mm_add_epi16(*(const __m128i *)shortM128_128, r0);
	r1 = _mm_add_epi16(*(const __m128i *)shortM128_128, r1);
	r2 = _mm_add_epi16(*(const __m128i *)shortM128_128, r2);
	r3 = _mm_add_epi16(*(const __m128i *)shortM128_128, r3);
	r4 = _mm_add_epi16(*(const __m128i *)shortM128_128, r4);
	r5 = _mm_add_epi16(*(const __m128i *)shortM128_128, r5);
	r6 = _mm_add_epi16(*(const __m128i *)shortM128_128, r6);
	r7 = _mm_add_epi16(*(const __m128i *)shortM128_128, r7);

	((__m128i *)pOutputUB)[0] = _mm_packus_epi16(r0, r1);
	((__m128i *)pOutputUB)[1] = _mm_packus_epi16(r2, r3);
	((__m128i *)pOutputUB)[2] = _mm_packus_epi16(r4, r5);
	((__m128i *)pOutputUB)[3] = _mm_packus_epi16(r6, r7);
}