summaryrefslogtreecommitdiff
path: root/thirdparty/embree-aarch64/common/simd/vllong4_avx2.h
blob: de3ebc16a72811689862e4761f0fc3f3a7286487 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
// Copyright 2009-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0

#pragma once

namespace embree
{ 
  /* 4-wide AVX2 64-bit long long type */
  template<>
  struct vllong<4>
  {
    ALIGNED_STRUCT_(32);
    
    typedef vboold4 Bool;

    enum  { size = 4 }; // number of SIMD elements
    union {             // data
      __m256i v; 
      long long i[4];
    };
    
    ////////////////////////////////////////////////////////////////////////////////
    /// Constructors, Assignment & Cast Operators
    ////////////////////////////////////////////////////////////////////////////////
       
    __forceinline vllong() {}
    __forceinline vllong(const vllong4& t) { v = t.v; }
    __forceinline vllong4& operator =(const vllong4& f) { v = f.v; return *this; }

    __forceinline vllong(const __m256i& t) { v = t; }
    __forceinline operator __m256i() const { return v; }
    __forceinline operator __m256d() const { return _mm256_castsi256_pd(v); }


    __forceinline vllong(long long i) {
      v = _mm256_set1_epi64x(i);
    }
    
    __forceinline vllong(long long a, long long b, long long c, long long d) {
      v = _mm256_set_epi64x(d,c,b,a);      
    }
   
    
    ////////////////////////////////////////////////////////////////////////////////
    /// Constants
    ////////////////////////////////////////////////////////////////////////////////
    
    __forceinline vllong(ZeroTy) : v(_mm256_setzero_si256()) {}
    __forceinline vllong(OneTy)  : v(_mm256_set1_epi64x(1)) {}
    __forceinline vllong(StepTy) : v(_mm256_set_epi64x(3,2,1,0)) {}
    __forceinline vllong(ReverseStepTy) : v(_mm256_set_epi64x(0,1,2,3)) {}

    ////////////////////////////////////////////////////////////////////////////////
    /// Loads and Stores
    ////////////////////////////////////////////////////////////////////////////////

    static __forceinline void store_nt(void* __restrict__ ptr, const vllong4& a) {
      _mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(a));
    }

    static __forceinline vllong4 loadu(const void* addr)
    {
      return _mm256_loadu_si256((__m256i*)addr);
    }

    static __forceinline vllong4 load(const vllong4* addr) {
      return _mm256_load_si256((__m256i*)addr);
    }

    static __forceinline vllong4 load(const long long* addr) {
      return _mm256_load_si256((__m256i*)addr);
    }

    static __forceinline void store(void* ptr, const vllong4& v) {
      _mm256_store_si256((__m256i*)ptr,v);
    }

    static __forceinline void storeu(void* ptr, const vllong4& v) {
      _mm256_storeu_si256((__m256i*)ptr,v);
    }

    static __forceinline void storeu(const vboold4& mask, long long* ptr, const vllong4& f) {
#if defined(__AVX512VL__)
      _mm256_mask_storeu_epi64(ptr,mask,f);
#else
      _mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));
#endif
    }

    static __forceinline void store(const vboold4& mask, void* ptr, const vllong4& f) {
#if defined(__AVX512VL__)
      _mm256_mask_store_epi64(ptr,mask,f);
#else
      _mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));
#endif
    }

    static __forceinline vllong4 broadcast64bit(size_t v) {
      return _mm256_set1_epi64x(v);
    }

    static __forceinline size_t extract64bit(const vllong4& v)
    {
      return _mm_cvtsi128_si64(_mm256_castsi256_si128(v));
    }


    ////////////////////////////////////////////////////////////////////////////////
    /// Array Access
    ////////////////////////////////////////////////////////////////////////////////
    
    __forceinline       long long& operator [](size_t index)       { assert(index < 4); return i[index]; }
    __forceinline const long long& operator [](size_t index) const { assert(index < 4); return i[index]; }

  };

  ////////////////////////////////////////////////////////////////////////////////
  /// Select
  ////////////////////////////////////////////////////////////////////////////////
  
  __forceinline vllong4 select(const vboold4& m, const vllong4& t, const vllong4& f) {
  #if defined(__AVX512VL__)
    return _mm256_mask_blend_epi64(m, f, t);
  #else
    return _mm256_castpd_si256(_mm256_blendv_pd(_mm256_castsi256_pd(f), _mm256_castsi256_pd(t), m));
  #endif
  }

  ////////////////////////////////////////////////////////////////////////////////
  /// Unary Operators
  ////////////////////////////////////////////////////////////////////////////////

#if defined(__AVX512VL__)
  __forceinline vboold4 asBool(const vllong4& a) { return _mm256_movepi64_mask(a); }
#else
  __forceinline vboold4 asBool(const vllong4& a) { return _mm256_castsi256_pd(a); }
#endif

  __forceinline vllong4 operator +(const vllong4& a) { return a; }
  __forceinline vllong4 operator -(const vllong4& a) { return _mm256_sub_epi64(_mm256_setzero_si256(), a); }

  ////////////////////////////////////////////////////////////////////////////////
  /// Binary Operators
  ////////////////////////////////////////////////////////////////////////////////

  __forceinline vllong4 operator +(const vllong4& a, const vllong4& b) { return _mm256_add_epi64(a, b); }
  __forceinline vllong4 operator +(const vllong4& a, long long      b) { return a + vllong4(b); }
  __forceinline vllong4 operator +(long long      a, const vllong4& b) { return vllong4(a) + b; }

  __forceinline vllong4 operator -(const vllong4& a, const vllong4& b) { return _mm256_sub_epi64(a, b); }
  __forceinline vllong4 operator -(const vllong4& a, long long      b) { return a - vllong4(b); }
  __forceinline vllong4 operator -(long long      a, const vllong4& b) { return vllong4(a) - b; }

  /* only low 32bit part */
  __forceinline vllong4 operator *(const vllong4& a, const vllong4& b) { return _mm256_mul_epi32(a, b); }
  __forceinline vllong4 operator *(const vllong4& a, long long      b) { return a * vllong4(b); }
  __forceinline vllong4 operator *(long long      a, const vllong4& b) { return vllong4(a) * b; }

  __forceinline vllong4 operator &(const vllong4& a, const vllong4& b) { return _mm256_and_si256(a, b); }
  __forceinline vllong4 operator &(const vllong4& a, long long      b) { return a & vllong4(b); }
  __forceinline vllong4 operator &(long long      a, const vllong4& b) { return vllong4(a) & b; }

  __forceinline vllong4 operator |(const vllong4& a, const vllong4& b) { return _mm256_or_si256(a, b); }
  __forceinline vllong4 operator |(const vllong4& a, long long      b) { return a | vllong4(b); }
  __forceinline vllong4 operator |(long long      a, const vllong4& b) { return vllong4(a) | b; }

  __forceinline vllong4 operator ^(const vllong4& a, const vllong4& b) { return _mm256_xor_si256(a, b); }
  __forceinline vllong4 operator ^(const vllong4& a, long long      b) { return a ^ vllong4(b); }
  __forceinline vllong4 operator ^(long long      a, const vllong4& b) { return vllong4(a) ^ b; }

  __forceinline vllong4 operator <<(const vllong4& a, long long n) { return _mm256_slli_epi64(a, (int)n); }
  //__forceinline vllong4 operator >>(const vllong4& a, long long n) { return _mm256_srai_epi64(a, n); }

  __forceinline vllong4 operator <<(const vllong4& a, const vllong4& n) { return _mm256_sllv_epi64(a, n); }
  //__forceinline vllong4 operator >>(const vllong4& a, const vllong4& n) { return _mm256_srav_epi64(a, n); }
  //__forceinline vllong4 sra(const vllong4& a, long long b) { return _mm256_srai_epi64(a, b); }

  __forceinline vllong4 srl(const vllong4& a, long long b) { return _mm256_srli_epi64(a, (int)b); }
  
  //__forceinline vllong4 min(const vllong4& a, const vllong4& b) { return _mm256_min_epi64(a, b); }
  //__forceinline vllong4 min(const vllong4& a, long long      b) { return min(a,vllong4(b)); }
  //__forceinline vllong4 min(long long      a, const vllong4& b) { return min(vllong4(a),b); }

  //__forceinline vllong4 max(const vllong4& a, const vllong4& b) { return _mm256_max_epi64(a, b); }
  //__forceinline vllong4 max(const vllong4& a, long long      b) { return max(a,vllong4(b)); }
  //__forceinline vllong4 max(long long      a, const vllong4& b) { return max(vllong4(a),b); }

#if defined(__AVX512VL__)
  __forceinline vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_and_epi64(c,m,a,b); }
  __forceinline vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_or_epi64(c,m,a,b); }
#else
  __forceinline vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a & b, c); }
  __forceinline vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a | b, c); }
#endif
  
  ////////////////////////////////////////////////////////////////////////////////
  /// Assignment Operators
  ////////////////////////////////////////////////////////////////////////////////

  __forceinline vllong4& operator +=(vllong4& a, const vllong4& b) { return a = a + b; }
  __forceinline vllong4& operator +=(vllong4& a, long long      b) { return a = a + b; }
  
  __forceinline vllong4& operator -=(vllong4& a, const vllong4& b) { return a = a - b; }
  __forceinline vllong4& operator -=(vllong4& a, long long      b) { return a = a - b; }

  __forceinline vllong4& operator *=(vllong4& a, const vllong4& b) { return a = a * b; }
  __forceinline vllong4& operator *=(vllong4& a, long long      b) { return a = a * b; }
  
  __forceinline vllong4& operator &=(vllong4& a, const vllong4& b) { return a = a & b; }
  __forceinline vllong4& operator &=(vllong4& a, long long      b) { return a = a & b; }
  
  __forceinline vllong4& operator |=(vllong4& a, const vllong4& b) { return a = a | b; }
  __forceinline vllong4& operator |=(vllong4& a, long long      b) { return a = a | b; }
  
  __forceinline vllong4& operator <<=(vllong4& a, long long      b) { return a = a << b; }
  //__forceinline vllong4& operator >>=(vllong4& a, long long      b) { return a = a >> b; }

  ////////////////////////////////////////////////////////////////////////////////
  /// Comparison Operators
  ////////////////////////////////////////////////////////////////////////////////

#if defined(__AVX512VL__)
  __forceinline vboold4 operator ==(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_EQ); }
  __forceinline vboold4 operator !=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_NE); }
  __forceinline vboold4 operator < (const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LT); }
  __forceinline vboold4 operator >=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GE); }
  __forceinline vboold4 operator > (const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GT); }
  __forceinline vboold4 operator <=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LE); }
#else
  __forceinline vboold4 operator ==(const vllong4& a, const vllong4& b) { return _mm256_cmpeq_epi64(a,b); }
  __forceinline vboold4 operator !=(const vllong4& a, const vllong4& b) { return !(a == b); }
  __forceinline vboold4 operator > (const vllong4& a, const vllong4& b) { return _mm256_cmpgt_epi64(a,b); }
  __forceinline vboold4 operator < (const vllong4& a, const vllong4& b) { return _mm256_cmpgt_epi64(b,a); }
  __forceinline vboold4 operator >=(const vllong4& a, const vllong4& b) { return !(a < b); }
  __forceinline vboold4 operator <=(const vllong4& a, const vllong4& b) { return !(a > b); }
#endif

  __forceinline vboold4 operator ==(const vllong4& a, long long      b) { return a == vllong4(b); }
  __forceinline vboold4 operator ==(long long      a, const vllong4& b) { return vllong4(a) == b; }

  __forceinline vboold4 operator !=(const vllong4& a, long long      b) { return a != vllong4(b); }
  __forceinline vboold4 operator !=(long long      a, const vllong4& b) { return vllong4(a) != b; }

  __forceinline vboold4 operator > (const vllong4& a, long long      b) { return a >  vllong4(b); }
  __forceinline vboold4 operator > (long long      a, const vllong4& b) { return vllong4(a) >  b; }

  __forceinline vboold4 operator < (const vllong4& a, long long      b) { return a <  vllong4(b); }
  __forceinline vboold4 operator < (long long      a, const vllong4& b) { return vllong4(a) <  b; }

  __forceinline vboold4 operator >=(const vllong4& a, long long      b) { return a >= vllong4(b); }
  __forceinline vboold4 operator >=(long long      a, const vllong4& b) { return vllong4(a) >= b; }

  __forceinline vboold4 operator <=(const vllong4& a, long long      b) { return a <= vllong4(b); }
  __forceinline vboold4 operator <=(long long      a, const vllong4& b) { return vllong4(a) <= b; }

  __forceinline vboold4 eq(const vllong4& a, const vllong4& b) { return a == b; }
  __forceinline vboold4 ne(const vllong4& a, const vllong4& b) { return a != b; }
  __forceinline vboold4 lt(const vllong4& a, const vllong4& b) { return a <  b; }
  __forceinline vboold4 ge(const vllong4& a, const vllong4& b) { return a >= b; }
  __forceinline vboold4 gt(const vllong4& a, const vllong4& b) { return a >  b; }
  __forceinline vboold4 le(const vllong4& a, const vllong4& b) { return a <= b; }

#if defined(__AVX512VL__)
  __forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_EQ); }
  __forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_NE); }
  __forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LT); }
  __forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GE); }
  __forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GT); }
  __forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LE); }
#else
  __forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a == b); }
  __forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a != b); }
  __forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a <  b); }
  __forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a >= b); }
  __forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a >  b); }
  __forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a <= b); }
#endif

  __forceinline void xchg(const vboold4& m, vllong4& a, vllong4& b) {
    const vllong4 c = a; a = select(m,b,a); b = select(m,c,b);
  }

  __forceinline vboold4 test(const vllong4& a, const vllong4& b) {
#if defined(__AVX512VL__)
    return _mm256_test_epi64_mask(a,b);
#else
    return _mm256_testz_si256(a,b);
#endif
  }

  ////////////////////////////////////////////////////////////////////////////////
  // Movement/Shifting/Shuffling Functions
  ////////////////////////////////////////////////////////////////////////////////

  template<int i0, int i1>
  __forceinline vllong4 shuffle(const vllong4& v) {
    return _mm256_castpd_si256(_mm256_permute_pd(_mm256_castsi256_pd(v), (i1 << 3) | (i0 << 2) | (i1 << 1) | i0));
  }

  template<int i>
  __forceinline vllong4 shuffle(const vllong4& v) {
    return shuffle<i, i>(v);
  }

  template<int i0, int i1>
  __forceinline vllong4 shuffle2(const vllong4& v) {
    return _mm256_castpd_si256(_mm256_permute2f128_pd(_mm256_castsi256_pd(v), _mm256_castsi256_pd(v), (i1 << 4) | i0));
  }

  __forceinline long long toScalar(const vllong4& v) {
    return _mm_cvtsi128_si64(_mm256_castsi256_si128(v));
  }

#if defined(__AVX512VL__)
  __forceinline vllong4 permute(const vllong4& a, const __m256i& index) {
    // workaround for GCC 7.x
#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
    return _mm256_permutex2var_epi64(a,index,a);
#else
    return _mm256_permutexvar_epi64(index,a);
#endif
  }

  __forceinline vllong4 permutex2var(const vllong4& index, const vllong4& a, const vllong4& b) {
    return _mm256_permutex2var_epi64(a,index,b);
  }

#endif
  ////////////////////////////////////////////////////////////////////////////////
  /// Reductions
  ////////////////////////////////////////////////////////////////////////////////
  

  __forceinline vllong4 vreduce_and2(const vllong4& x) { return x & shuffle<1,0>(x); }
  __forceinline vllong4 vreduce_and (const vllong4& y) { const vllong4 x = vreduce_and2(y); return x & shuffle2<1,0>(x); }

  __forceinline vllong4 vreduce_or2(const vllong4& x) { return x | shuffle<1,0>(x); }
  __forceinline vllong4 vreduce_or (const vllong4& y) { const vllong4 x = vreduce_or2(y); return x | shuffle2<1,0>(x); }

  __forceinline vllong4 vreduce_add2(const vllong4& x) { return x + shuffle<1,0>(x); }
  __forceinline vllong4 vreduce_add (const vllong4& y) { const vllong4 x = vreduce_add2(y); return x + shuffle2<1,0>(x); }

  __forceinline long long reduce_add(const vllong4& a) { return toScalar(vreduce_add(a)); }
  __forceinline long long reduce_or (const vllong4& a) { return toScalar(vreduce_or(a)); }
  __forceinline long long reduce_and(const vllong4& a) { return toScalar(vreduce_and(a)); }
  
  ////////////////////////////////////////////////////////////////////////////////
  /// Output Operators
  ////////////////////////////////////////////////////////////////////////////////
  
  __forceinline embree_ostream operator <<(embree_ostream cout, const vllong4& v)
  {
    cout << "<" << v[0];
    for (size_t i=1; i<4; i++) cout << ", " << v[i];
    cout << ">";
    return cout;
  }
}